mirror of
https://github.com/DeBrosOfficial/network.git
synced 2026-01-30 09:53:03 +00:00
fixing bugs on tests and on codebase
This commit is contained in:
parent
1a717537e5
commit
380b10add3
4
.gitignore
vendored
4
.gitignore
vendored
@ -88,4 +88,6 @@ configs/
|
|||||||
# Remote node credentials
|
# Remote node credentials
|
||||||
scripts/remote-nodes.conf
|
scripts/remote-nodes.conf
|
||||||
|
|
||||||
orama-cli-linux
|
orama-cli-linux
|
||||||
|
|
||||||
|
rnd/
|
||||||
@ -213,6 +213,11 @@ func createNextJSDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath
|
|||||||
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
|
||||||
body.WriteString(name + "\r\n")
|
body.WriteString(name + "\r\n")
|
||||||
|
|
||||||
|
// Write ssr field (enable SSR mode)
|
||||||
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
|
body.WriteString("Content-Disposition: form-data; name=\"ssr\"\r\n\r\n")
|
||||||
|
body.WriteString("true\r\n")
|
||||||
|
|
||||||
// Write tarball file
|
// Write tarball file
|
||||||
body.WriteString("--" + boundary + "\r\n")
|
body.WriteString("--" + boundary + "\r\n")
|
||||||
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
|
||||||
@ -230,7 +235,9 @@ func createNextJSDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath
|
|||||||
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
|
||||||
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
||||||
|
|
||||||
resp, err := env.HTTPClient.Do(req)
|
// Use a longer timeout for large Next.js uploads (can be 50MB+)
|
||||||
|
uploadClient := e2e.NewHTTPClient(5 * time.Minute)
|
||||||
|
resp, err := uploadClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to execute request: %v", err)
|
t.Fatalf("failed to execute request: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
73
e2e/env.go
73
e2e/env.go
@ -89,7 +89,13 @@ func GetGatewayURL() string {
|
|||||||
}
|
}
|
||||||
cacheMutex.RUnlock()
|
cacheMutex.RUnlock()
|
||||||
|
|
||||||
// Check environment variable first
|
// Check environment variables first (ORAMA_GATEWAY_URL takes precedence)
|
||||||
|
if envURL := os.Getenv("ORAMA_GATEWAY_URL"); envURL != "" {
|
||||||
|
cacheMutex.Lock()
|
||||||
|
gatewayURLCache = envURL
|
||||||
|
cacheMutex.Unlock()
|
||||||
|
return envURL
|
||||||
|
}
|
||||||
if envURL := os.Getenv("GATEWAY_URL"); envURL != "" {
|
if envURL := os.Getenv("GATEWAY_URL"); envURL != "" {
|
||||||
cacheMutex.Lock()
|
cacheMutex.Lock()
|
||||||
gatewayURLCache = envURL
|
gatewayURLCache = envURL
|
||||||
@ -153,7 +159,16 @@ func queryAPIKeyFromRQLite() (string, error) {
|
|||||||
return envKey, nil
|
return envKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Build database path from bootstrap/node config
|
// 2. If ORAMA_GATEWAY_URL is set (production mode), query the remote RQLite HTTP API
|
||||||
|
if gatewayURL := os.Getenv("ORAMA_GATEWAY_URL"); gatewayURL != "" {
|
||||||
|
apiKey, err := queryAPIKeyFromRemoteRQLite(gatewayURL)
|
||||||
|
if err == nil && apiKey != "" {
|
||||||
|
return apiKey, nil
|
||||||
|
}
|
||||||
|
// Fall through to local database check if remote fails
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Build database path from bootstrap/node config (for local development)
|
||||||
homeDir, err := os.UserHomeDir()
|
homeDir, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to get home directory: %w", err)
|
return "", fmt.Errorf("failed to get home directory: %w", err)
|
||||||
@ -210,6 +225,60 @@ func queryAPIKeyFromRQLite() (string, error) {
|
|||||||
return "", fmt.Errorf("failed to retrieve API key from any SQLite database")
|
return "", fmt.Errorf("failed to retrieve API key from any SQLite database")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// queryAPIKeyFromRemoteRQLite queries the remote RQLite HTTP API for an API key
|
||||||
|
func queryAPIKeyFromRemoteRQLite(gatewayURL string) (string, error) {
|
||||||
|
// Parse the gateway URL to extract the host
|
||||||
|
parsed, err := url.Parse(gatewayURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to parse gateway URL: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RQLite HTTP API runs on port 5001 (not the gateway port 6001)
|
||||||
|
rqliteURL := fmt.Sprintf("http://%s:5001/db/query", parsed.Hostname())
|
||||||
|
|
||||||
|
// Create request body
|
||||||
|
reqBody := `["SELECT key FROM api_keys LIMIT 1"]`
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, rqliteURL, strings.NewReader(reqBody))
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to query rqlite: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return "", fmt.Errorf("rqlite returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse response
|
||||||
|
var result struct {
|
||||||
|
Results []struct {
|
||||||
|
Columns []string `json:"columns"`
|
||||||
|
Values [][]interface{} `json:"values"`
|
||||||
|
} `json:"results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to decode response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(result.Results) > 0 && len(result.Results[0].Values) > 0 && len(result.Results[0].Values[0]) > 0 {
|
||||||
|
if apiKey, ok := result.Results[0].Values[0][0].(string); ok && apiKey != "" {
|
||||||
|
return apiKey, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("no API key found in rqlite")
|
||||||
|
}
|
||||||
|
|
||||||
// GetAPIKey returns the gateway API key from rqlite or cache
|
// GetAPIKey returns the gateway API key from rqlite or cache
|
||||||
func GetAPIKey() string {
|
func GetAPIKey() string {
|
||||||
cacheMutex.RLock()
|
cacheMutex.RLock()
|
||||||
|
|||||||
@ -13,7 +13,12 @@ import (
|
|||||||
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Note: These tests connect directly to IPFS Cluster API (localhost:9094)
|
||||||
|
// and IPFS API (localhost:4501). They are for local development only.
|
||||||
|
// For production testing, use storage_http_test.go which uses gateway endpoints.
|
||||||
|
|
||||||
func TestIPFSCluster_Health(t *testing.T) {
|
func TestIPFSCluster_Health(t *testing.T) {
|
||||||
|
SkipIfProduction(t) // Direct IPFS connection not available in production
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -35,6 +40,7 @@ func TestIPFSCluster_Health(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIPFSCluster_GetPeerCount(t *testing.T) {
|
func TestIPFSCluster_GetPeerCount(t *testing.T) {
|
||||||
|
SkipIfProduction(t)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -62,6 +68,7 @@ func TestIPFSCluster_GetPeerCount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIPFSCluster_AddFile(t *testing.T) {
|
func TestIPFSCluster_AddFile(t *testing.T) {
|
||||||
|
SkipIfProduction(t)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -94,6 +101,7 @@ func TestIPFSCluster_AddFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIPFSCluster_PinFile(t *testing.T) {
|
func TestIPFSCluster_PinFile(t *testing.T) {
|
||||||
|
SkipIfProduction(t)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -131,6 +139,7 @@ func TestIPFSCluster_PinFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIPFSCluster_PinStatus(t *testing.T) {
|
func TestIPFSCluster_PinStatus(t *testing.T) {
|
||||||
|
SkipIfProduction(t)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -188,6 +197,7 @@ func TestIPFSCluster_PinStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIPFSCluster_UnpinFile(t *testing.T) {
|
func TestIPFSCluster_UnpinFile(t *testing.T) {
|
||||||
|
SkipIfProduction(t)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -226,6 +236,7 @@ func TestIPFSCluster_UnpinFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIPFSCluster_GetFile(t *testing.T) {
|
func TestIPFSCluster_GetFile(t *testing.T) {
|
||||||
|
SkipIfProduction(t)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -272,6 +283,7 @@ func TestIPFSCluster_GetFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIPFSCluster_LargeFile(t *testing.T) {
|
func TestIPFSCluster_LargeFile(t *testing.T) {
|
||||||
|
SkipIfProduction(t)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -305,6 +317,7 @@ func TestIPFSCluster_LargeFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIPFSCluster_ReplicationFactor(t *testing.T) {
|
func TestIPFSCluster_ReplicationFactor(t *testing.T) {
|
||||||
|
SkipIfProduction(t) // Direct IPFS connection not available in production
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -352,6 +365,7 @@ func TestIPFSCluster_ReplicationFactor(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIPFSCluster_MultipleFiles(t *testing.T) {
|
func TestIPFSCluster_MultipleFiles(t *testing.T) {
|
||||||
|
SkipIfProduction(t) // Direct IPFS connection not available in production
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
|||||||
@ -107,7 +107,10 @@ func (o *Orchestrator) Execute() error {
|
|||||||
|
|
||||||
// Phase 4: Generate configs (BEFORE service initialization)
|
// Phase 4: Generate configs (BEFORE service initialization)
|
||||||
fmt.Printf("\n⚙️ Phase 4: Generating configurations...\n")
|
fmt.Printf("\n⚙️ Phase 4: Generating configurations...\n")
|
||||||
enableHTTPS := o.flags.Domain != ""
|
// Internal gateway always runs HTTP on port 6001
|
||||||
|
// When using Caddy (nameserver mode), Caddy handles external HTTPS and proxies to internal gateway
|
||||||
|
// When not using Caddy, the gateway runs HTTP-only (use a reverse proxy for HTTPS)
|
||||||
|
enableHTTPS := false
|
||||||
if err := o.setup.Phase4GenerateConfigs(o.peers, o.flags.VpsIP, enableHTTPS, o.flags.Domain, o.flags.BaseDomain, o.flags.JoinAddress); err != nil {
|
if err := o.setup.Phase4GenerateConfigs(o.peers, o.flags.VpsIP, enableHTTPS, o.flags.Domain, o.flags.BaseDomain, o.flags.JoinAddress); err != nil {
|
||||||
return fmt.Errorf("configuration generation failed: %w", err)
|
return fmt.Errorf("configuration generation failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -106,18 +106,11 @@ func (cg *ConfigGenerator) GenerateNodeConfig(peerAddresses []string, vpsIP stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Determine advertise addresses - use vpsIP if provided
|
// Determine advertise addresses - use vpsIP if provided
|
||||||
// When HTTPS is enabled, RQLite uses native TLS on port 7002 (not SNI gateway)
|
// Always use port 7001 for RQLite Raft (no TLS)
|
||||||
// This avoids conflicts between SNI gateway TLS termination and RQLite's native TLS
|
|
||||||
var httpAdvAddr, raftAdvAddr string
|
var httpAdvAddr, raftAdvAddr string
|
||||||
if vpsIP != "" {
|
if vpsIP != "" {
|
||||||
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
|
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
|
||||||
if enableHTTPS {
|
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
|
||||||
// Use direct IP:7002 for Raft - RQLite handles TLS natively via -node-cert
|
|
||||||
// This bypasses the SNI gateway which would cause TLS termination conflicts
|
|
||||||
raftAdvAddr = net.JoinHostPort(vpsIP, "7002")
|
|
||||||
} else {
|
|
||||||
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Fallback to localhost if no vpsIP
|
// Fallback to localhost if no vpsIP
|
||||||
httpAdvAddr = "localhost:5001"
|
httpAdvAddr = "localhost:5001"
|
||||||
@ -125,21 +118,14 @@ func (cg *ConfigGenerator) GenerateNodeConfig(peerAddresses []string, vpsIP stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Determine RQLite join address
|
// Determine RQLite join address
|
||||||
// When HTTPS is enabled, use port 7002 (direct RQLite TLS) instead of 7001 (SNI gateway)
|
// Always use port 7001 for RQLite Raft communication (no TLS)
|
||||||
joinPort := "7001"
|
joinPort := "7001"
|
||||||
if enableHTTPS {
|
|
||||||
joinPort = "7002"
|
|
||||||
}
|
|
||||||
|
|
||||||
var rqliteJoinAddr string
|
var rqliteJoinAddr string
|
||||||
if joinAddress != "" {
|
if joinAddress != "" {
|
||||||
// Use explicitly provided join address
|
// Use explicitly provided join address
|
||||||
// Adjust port based on HTTPS mode:
|
// Normalize to port 7001 (non-TLS) regardless of what was provided
|
||||||
// - HTTPS enabled: use port 7002 (direct RQLite TLS, bypassing SNI gateway)
|
if strings.Contains(joinAddress, ":7002") {
|
||||||
// - HTTPS disabled: use port 7001 (standard RQLite Raft port)
|
|
||||||
if enableHTTPS && strings.Contains(joinAddress, ":7001") {
|
|
||||||
rqliteJoinAddr = strings.Replace(joinAddress, ":7001", ":7002", 1)
|
|
||||||
} else if !enableHTTPS && strings.Contains(joinAddress, ":7002") {
|
|
||||||
rqliteJoinAddr = strings.Replace(joinAddress, ":7002", ":7001", 1)
|
rqliteJoinAddr = strings.Replace(joinAddress, ":7002", ":7001", 1)
|
||||||
} else {
|
} else {
|
||||||
rqliteJoinAddr = joinAddress
|
rqliteJoinAddr = joinAddress
|
||||||
@ -166,11 +152,9 @@ func (cg *ConfigGenerator) GenerateNodeConfig(peerAddresses []string, vpsIP stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unified data directory (all nodes equal)
|
// Unified data directory (all nodes equal)
|
||||||
// When HTTPS/SNI is enabled, use internal port 7002 for RQLite Raft (SNI gateway listens on 7001)
|
// Always use port 7001 for RQLite Raft - TLS is optional and managed separately
|
||||||
|
// The SNI gateway approach was removed to simplify certificate management
|
||||||
raftInternalPort := 7001
|
raftInternalPort := 7001
|
||||||
if enableHTTPS {
|
|
||||||
raftInternalPort = 7002 // Internal port when SNI is enabled
|
|
||||||
}
|
|
||||||
|
|
||||||
data := templates.NodeConfigData{
|
data := templates.NodeConfigData{
|
||||||
NodeID: nodeID,
|
NodeID: nodeID,
|
||||||
@ -194,15 +178,10 @@ func (cg *ConfigGenerator) GenerateNodeConfig(peerAddresses []string, vpsIP stri
|
|||||||
HTTPSPort: httpsPort,
|
HTTPSPort: httpsPort,
|
||||||
}
|
}
|
||||||
|
|
||||||
// When HTTPS is enabled, configure RQLite node-to-node TLS encryption
|
// RQLite node-to-node TLS encryption is disabled by default
|
||||||
// RQLite handles TLS natively on port 7002, bypassing the SNI gateway
|
// This simplifies certificate management - RQLite uses plain TCP for internal Raft
|
||||||
// This avoids TLS termination conflicts between SNI gateway and RQLite
|
// HTTPS is still used for client-facing gateway traffic via autocert
|
||||||
if enableHTTPS && domain != "" {
|
// TLS can be enabled manually later if needed for inter-node encryption
|
||||||
data.NodeCert = filepath.Join(tlsCacheDir, domain+".crt")
|
|
||||||
data.NodeKey = filepath.Join(tlsCacheDir, domain+".key")
|
|
||||||
// Skip verification since nodes may have different domain certificates
|
|
||||||
data.NodeNoVerify = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return templates.RenderNodeConfig(data)
|
return templates.RenderNodeConfig(data)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -314,12 +315,42 @@ func (ci *CoreDNSInstaller) generateCorefile(domain, rqliteDSN string) string {
|
|||||||
|
|
||||||
// seedStaticRecords inserts static zone records into RQLite
|
// seedStaticRecords inserts static zone records into RQLite
|
||||||
func (ci *CoreDNSInstaller) seedStaticRecords(domain, rqliteDSN, ns1IP, ns2IP, ns3IP string) error {
|
func (ci *CoreDNSInstaller) seedStaticRecords(domain, rqliteDSN, ns1IP, ns2IP, ns3IP string) error {
|
||||||
|
// First, check if nameserver A records already exist with different IPs
|
||||||
|
// If so, we should preserve them instead of overwriting with potentially wrong IPs
|
||||||
|
existingNSIPs, err := ci.getExistingNameserverIPs(domain, rqliteDSN)
|
||||||
|
if err == nil && len(existingNSIPs) == 3 {
|
||||||
|
// Check if they have at least 2 different IPs (properly configured cluster)
|
||||||
|
uniqueIPs := make(map[string]bool)
|
||||||
|
for _, ip := range existingNSIPs {
|
||||||
|
uniqueIPs[ip] = true
|
||||||
|
}
|
||||||
|
if len(uniqueIPs) >= 2 {
|
||||||
|
// Nameserver records are already properly configured, use existing IPs
|
||||||
|
fmt.Fprintf(ci.logWriter, " Using existing nameserver IPs from database\n")
|
||||||
|
ns1IP = existingNSIPs[0]
|
||||||
|
ns2IP = existingNSIPs[1]
|
||||||
|
ns3IP = existingNSIPs[2]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Generate serial based on current date
|
// Generate serial based on current date
|
||||||
serial := fmt.Sprintf("%d", time.Now().Unix())
|
serial := fmt.Sprintf("%d", time.Now().Unix())
|
||||||
|
|
||||||
// SOA record format: "mname rname serial refresh retry expire minimum"
|
// SOA record format: "mname rname serial refresh retry expire minimum"
|
||||||
soaValue := fmt.Sprintf("ns1.%s. admin.%s. %s 3600 1800 604800 300", domain, domain, serial)
|
soaValue := fmt.Sprintf("ns1.%s. admin.%s. %s 3600 1800 604800 300", domain, domain, serial)
|
||||||
|
|
||||||
|
// First, delete existing system records to avoid duplicates
|
||||||
|
// We only delete system records, not deployment-created records
|
||||||
|
deleteStatements := []string{
|
||||||
|
fmt.Sprintf(`DELETE FROM dns_records WHERE namespace = 'system' AND fqdn = '%s.' AND record_type IN ('SOA', 'NS', 'A')`, domain),
|
||||||
|
fmt.Sprintf(`DELETE FROM dns_records WHERE namespace = 'system' AND fqdn = '*.%s.' AND record_type = 'A'`, domain),
|
||||||
|
fmt.Sprintf(`DELETE FROM dns_records WHERE namespace = 'system' AND fqdn LIKE 'ns%%.%s.' AND record_type = 'A'`, domain),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ci.executeRQLiteStatements(rqliteDSN, deleteStatements); err != nil {
|
||||||
|
return fmt.Errorf("failed to clean up old records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Define all static records
|
// Define all static records
|
||||||
records := []struct {
|
records := []struct {
|
||||||
fqdn string
|
fqdn string
|
||||||
@ -354,10 +385,9 @@ func (ci *CoreDNSInstaller) seedStaticRecords(domain, rqliteDSN, ns1IP, ns2IP, n
|
|||||||
// Build SQL statements
|
// Build SQL statements
|
||||||
var statements []string
|
var statements []string
|
||||||
for _, r := range records {
|
for _, r := range records {
|
||||||
// Use INSERT OR REPLACE to handle updates
|
|
||||||
// IMPORTANT: Must set is_active = TRUE for CoreDNS to find the records
|
// IMPORTANT: Must set is_active = TRUE for CoreDNS to find the records
|
||||||
stmt := fmt.Sprintf(
|
stmt := fmt.Sprintf(
|
||||||
`INSERT OR REPLACE INTO dns_records (fqdn, record_type, value, ttl, namespace, created_by, is_active, created_at, updated_at) VALUES ('%s', '%s', '%s', %d, 'system', 'system', TRUE, datetime('now'), datetime('now'))`,
|
`INSERT INTO dns_records (fqdn, record_type, value, ttl, namespace, created_by, is_active, created_at, updated_at) VALUES ('%s', '%s', '%s', %d, 'system', 'system', TRUE, datetime('now'), datetime('now'))`,
|
||||||
r.fqdn, r.recordType, r.value, r.ttl,
|
r.fqdn, r.recordType, r.value, r.ttl,
|
||||||
)
|
)
|
||||||
statements = append(statements, stmt)
|
statements = append(statements, stmt)
|
||||||
@ -367,6 +397,63 @@ func (ci *CoreDNSInstaller) seedStaticRecords(domain, rqliteDSN, ns1IP, ns2IP, n
|
|||||||
return ci.executeRQLiteStatements(rqliteDSN, statements)
|
return ci.executeRQLiteStatements(rqliteDSN, statements)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getExistingNameserverIPs queries RQLite for existing ns1, ns2, ns3 A record IPs
|
||||||
|
func (ci *CoreDNSInstaller) getExistingNameserverIPs(domain, rqliteDSN string) ([]string, error) {
|
||||||
|
// Build query - use url.QueryEscape to properly encode the SQL
|
||||||
|
query := fmt.Sprintf("SELECT fqdn, value FROM dns_records WHERE fqdn LIKE 'ns_.%s.' AND record_type = 'A' AND is_active = TRUE ORDER BY fqdn", domain)
|
||||||
|
queryURL := fmt.Sprintf("%s/db/query?q=%s", rqliteDSN, url.QueryEscape(query))
|
||||||
|
|
||||||
|
client := &http.Client{Timeout: 5 * time.Second}
|
||||||
|
resp, err := client.Get(queryURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("query failed with status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
Results []struct {
|
||||||
|
Values [][]interface{} `json:"values"`
|
||||||
|
} `json:"results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(result.Results) == 0 || result.Results[0].Values == nil || len(result.Results[0].Values) < 3 {
|
||||||
|
return nil, fmt.Errorf("not enough nameserver records found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract IPs for ns1, ns2, ns3 (ordered by fqdn)
|
||||||
|
ips := make([]string, 0, 3)
|
||||||
|
for _, row := range result.Results[0].Values {
|
||||||
|
if len(row) >= 2 {
|
||||||
|
if ip, ok := row[1].(string); ok {
|
||||||
|
ips = append(ips, ip)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ips) != 3 {
|
||||||
|
return nil, fmt.Errorf("expected 3 nameserver IPs, got %d", len(ips))
|
||||||
|
}
|
||||||
|
|
||||||
|
return ips, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// rqliteResult represents the response from RQLite execute endpoint
|
||||||
|
type rqliteResult struct {
|
||||||
|
Results []struct {
|
||||||
|
Error string `json:"error,omitempty"`
|
||||||
|
RowsAffected int `json:"rows_affected,omitempty"`
|
||||||
|
LastInsertID int `json:"last_insert_id,omitempty"`
|
||||||
|
} `json:"results"`
|
||||||
|
}
|
||||||
|
|
||||||
// executeRQLiteStatements executes SQL statements via RQLite HTTP API
|
// executeRQLiteStatements executes SQL statements via RQLite HTTP API
|
||||||
func (ci *CoreDNSInstaller) executeRQLiteStatements(rqliteDSN string, statements []string) error {
|
func (ci *CoreDNSInstaller) executeRQLiteStatements(rqliteDSN string, statements []string) error {
|
||||||
// RQLite execute endpoint
|
// RQLite execute endpoint
|
||||||
@ -378,6 +465,9 @@ func (ci *CoreDNSInstaller) executeRQLiteStatements(rqliteDSN string, statements
|
|||||||
return fmt.Errorf("failed to marshal statements: %w", err)
|
return fmt.Errorf("failed to marshal statements: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Log what we're sending for debugging
|
||||||
|
fmt.Fprintf(ci.logWriter, " Executing %d SQL statements...\n", len(statements))
|
||||||
|
|
||||||
// Create request
|
// Create request
|
||||||
req, err := http.NewRequest("POST", executeURL, bytes.NewReader(body))
|
req, err := http.NewRequest("POST", executeURL, bytes.NewReader(body))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -393,11 +483,39 @@ func (ci *CoreDNSInstaller) executeRQLiteStatements(rqliteDSN string, statements
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Read response body
|
||||||
|
respBody, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
respBody, _ := io.ReadAll(resp.Body)
|
|
||||||
return fmt.Errorf("RQLite returned status %d: %s", resp.StatusCode, string(respBody))
|
return fmt.Errorf("RQLite returned status %d: %s", resp.StatusCode, string(respBody))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse response to check for SQL errors
|
||||||
|
var result rqliteResult
|
||||||
|
if err := json.Unmarshal(respBody, &result); err != nil {
|
||||||
|
return fmt.Errorf("failed to parse RQLite response: %w (body: %s)", err, string(respBody))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check each result for errors
|
||||||
|
var errors []string
|
||||||
|
successCount := 0
|
||||||
|
for i, r := range result.Results {
|
||||||
|
if r.Error != "" {
|
||||||
|
errors = append(errors, fmt.Sprintf("statement %d: %s", i+1, r.Error))
|
||||||
|
} else {
|
||||||
|
successCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errors) > 0 {
|
||||||
|
fmt.Fprintf(ci.logWriter, " ⚠️ %d/%d statements succeeded, %d failed\n", successCount, len(statements), len(errors))
|
||||||
|
return fmt.Errorf("SQL errors: %v", errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(ci.logWriter, " ✓ All %d statements executed successfully\n", successCount)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -668,18 +668,32 @@ func (ps *ProductionSetup) SeedDNSRecords(baseDomain, vpsIP string, peerAddresse
|
|||||||
|
|
||||||
ps.logf("Seeding DNS records...")
|
ps.logf("Seeding DNS records...")
|
||||||
|
|
||||||
// Get node IPs from peer addresses or use the VPS IP for all
|
// Get node IPs from peer addresses (multiaddrs) or use the VPS IP for all
|
||||||
|
// Peer addresses are multiaddrs like /ip4/1.2.3.4/tcp/4001/p2p/12D3KooW...
|
||||||
|
// We need to extract just the IP from them
|
||||||
ns1IP := vpsIP
|
ns1IP := vpsIP
|
||||||
ns2IP := vpsIP
|
ns2IP := vpsIP
|
||||||
ns3IP := vpsIP
|
ns3IP := vpsIP
|
||||||
if len(peerAddresses) >= 1 && peerAddresses[0] != "" {
|
|
||||||
ns1IP = peerAddresses[0]
|
// Extract IPs from multiaddrs
|
||||||
|
var extractedIPs []string
|
||||||
|
for _, peer := range peerAddresses {
|
||||||
|
if peer != "" {
|
||||||
|
if ip := extractIPFromMultiaddr(peer); ip != "" {
|
||||||
|
extractedIPs = append(extractedIPs, ip)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if len(peerAddresses) >= 2 && peerAddresses[1] != "" {
|
|
||||||
ns2IP = peerAddresses[1]
|
// Assign extracted IPs to nameservers
|
||||||
|
if len(extractedIPs) >= 1 {
|
||||||
|
ns1IP = extractedIPs[0]
|
||||||
}
|
}
|
||||||
if len(peerAddresses) >= 3 && peerAddresses[2] != "" {
|
if len(extractedIPs) >= 2 {
|
||||||
ns3IP = peerAddresses[2]
|
ns2IP = extractedIPs[1]
|
||||||
|
}
|
||||||
|
if len(extractedIPs) >= 3 {
|
||||||
|
ns3IP = extractedIPs[2]
|
||||||
}
|
}
|
||||||
|
|
||||||
rqliteDSN := "http://localhost:5001"
|
rqliteDSN := "http://localhost:5001"
|
||||||
|
|||||||
@ -49,7 +49,7 @@ logging:
|
|||||||
|
|
||||||
http_gateway:
|
http_gateway:
|
||||||
enabled: true
|
enabled: true
|
||||||
listen_addr: "{{if .EnableHTTPS}}:{{.HTTPSPort}}{{else}}:{{.UnifiedGatewayPort}}{{end}}"
|
listen_addr: ":{{.UnifiedGatewayPort}}"
|
||||||
node_name: "{{.NodeID}}"
|
node_name: "{{.NodeID}}"
|
||||||
base_domain: "{{.BaseDomain}}"
|
base_domain: "{{.BaseDomain}}"
|
||||||
|
|
||||||
@ -63,17 +63,8 @@ http_gateway:
|
|||||||
email: "admin@{{.Domain}}"
|
email: "admin@{{.Domain}}"
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
{{if .EnableHTTPS}}sni:
|
# SNI gateway disabled - Caddy handles TLS termination for external traffic
|
||||||
enabled: true
|
# Internal service-to-service communication uses plain TCP
|
||||||
listen_addr: ":{{.RQLiteRaftPort}}"
|
|
||||||
cert_file: "{{.TLSCacheDir}}/{{.Domain}}.crt"
|
|
||||||
key_file: "{{.TLSCacheDir}}/{{.Domain}}.key"
|
|
||||||
routes:
|
|
||||||
# Note: Raft traffic bypasses SNI gateway - RQLite uses native TLS on port 7002
|
|
||||||
ipfs.{{.Domain}}: "localhost:4101"
|
|
||||||
ipfs-cluster.{{.Domain}}: "localhost:9098"
|
|
||||||
olric.{{.Domain}}: "localhost:3322"
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
# Full gateway configuration (for API, auth, pubsub, and internal service routing)
|
# Full gateway configuration (for API, auth, pubsub, and internal service routing)
|
||||||
client_namespace: "default"
|
client_namespace: "default"
|
||||||
|
|||||||
@ -258,6 +258,10 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
|
|||||||
if gw.cfg.BaseDomain != "" {
|
if gw.cfg.BaseDomain != "" {
|
||||||
gw.deploymentService.SetBaseDomain(gw.cfg.BaseDomain)
|
gw.deploymentService.SetBaseDomain(gw.cfg.BaseDomain)
|
||||||
}
|
}
|
||||||
|
// Set node peer ID so deployments run on the node that receives the request
|
||||||
|
if gw.cfg.NodePeerID != "" {
|
||||||
|
gw.deploymentService.SetNodePeerID(gw.cfg.NodePeerID)
|
||||||
|
}
|
||||||
|
|
||||||
// Create deployment handlers
|
// Create deployment handlers
|
||||||
gw.staticHandler = deploymentshandlers.NewStaticDeploymentHandler(
|
gw.staticHandler = deploymentshandlers.NewStaticDeploymentHandler(
|
||||||
|
|||||||
@ -73,6 +73,15 @@ func (h *GoHandler) HandleUpload(w http.ResponseWriter, r *http.Request) {
|
|||||||
healthCheckPath = "/health"
|
healthCheckPath = "/health"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse environment variables (form fields starting with "env_")
|
||||||
|
envVars := make(map[string]string)
|
||||||
|
for key, values := range r.MultipartForm.Value {
|
||||||
|
if strings.HasPrefix(key, "env_") && len(values) > 0 {
|
||||||
|
envName := strings.TrimPrefix(key, "env_")
|
||||||
|
envVars[envName] = values[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get tarball file
|
// Get tarball file
|
||||||
file, header, err := r.FormFile("tarball")
|
file, header, err := r.FormFile("tarball")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -99,7 +108,7 @@ func (h *GoHandler) HandleUpload(w http.ResponseWriter, r *http.Request) {
|
|||||||
cid := addResp.Cid
|
cid := addResp.Cid
|
||||||
|
|
||||||
// Deploy the Go backend
|
// Deploy the Go backend
|
||||||
deployment, err := h.deploy(ctx, namespace, name, subdomain, cid, healthCheckPath)
|
deployment, err := h.deploy(ctx, namespace, name, subdomain, cid, healthCheckPath, envVars)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error("Failed to deploy Go backend", zap.Error(err))
|
h.logger.Error("Failed to deploy Go backend", zap.Error(err))
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
@ -131,7 +140,7 @@ func (h *GoHandler) HandleUpload(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// deploy deploys a Go backend
|
// deploy deploys a Go backend
|
||||||
func (h *GoHandler) deploy(ctx context.Context, namespace, name, subdomain, cid, healthCheckPath string) (*deployments.Deployment, error) {
|
func (h *GoHandler) deploy(ctx context.Context, namespace, name, subdomain, cid, healthCheckPath string, envVars map[string]string) (*deployments.Deployment, error) {
|
||||||
// Create deployment directory
|
// Create deployment directory
|
||||||
deployPath := filepath.Join(h.baseDeployPath, namespace, name)
|
deployPath := filepath.Join(h.baseDeployPath, namespace, name)
|
||||||
if err := os.MkdirAll(deployPath, 0755); err != nil {
|
if err := os.MkdirAll(deployPath, 0755); err != nil {
|
||||||
@ -169,7 +178,7 @@ func (h *GoHandler) deploy(ctx context.Context, namespace, name, subdomain, cid,
|
|||||||
Status: deployments.DeploymentStatusDeploying,
|
Status: deployments.DeploymentStatusDeploying,
|
||||||
ContentCID: cid,
|
ContentCID: cid,
|
||||||
Subdomain: subdomain,
|
Subdomain: subdomain,
|
||||||
Environment: make(map[string]string),
|
Environment: envVars,
|
||||||
MemoryLimitMB: 256,
|
MemoryLimitMB: 256,
|
||||||
CPULimitPercent: 100,
|
CPULimitPercent: 100,
|
||||||
HealthCheckPath: healthCheckPath,
|
HealthCheckPath: healthCheckPath,
|
||||||
|
|||||||
@ -19,6 +19,7 @@ type DeploymentService struct {
|
|||||||
portAllocator *deployments.PortAllocator
|
portAllocator *deployments.PortAllocator
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
baseDomain string // Base domain for deployments (e.g., "dbrs.space")
|
baseDomain string // Base domain for deployments (e.g., "dbrs.space")
|
||||||
|
nodePeerID string // Current node's peer ID (deployments run on this node)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDeploymentService creates a new deployment service
|
// NewDeploymentService creates a new deployment service
|
||||||
@ -44,6 +45,12 @@ func (s *DeploymentService) SetBaseDomain(domain string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNodePeerID sets the current node's peer ID
|
||||||
|
// Deployments will always run on this node (no cross-node routing for deployment creation)
|
||||||
|
func (s *DeploymentService) SetNodePeerID(peerID string) {
|
||||||
|
s.nodePeerID = peerID
|
||||||
|
}
|
||||||
|
|
||||||
// BaseDomain returns the configured base domain
|
// BaseDomain returns the configured base domain
|
||||||
func (s *DeploymentService) BaseDomain() string {
|
func (s *DeploymentService) BaseDomain() string {
|
||||||
if s.baseDomain == "" {
|
if s.baseDomain == "" {
|
||||||
@ -69,8 +76,13 @@ func GetShortNodeID(peerID string) string {
|
|||||||
|
|
||||||
// CreateDeployment creates a new deployment
|
// CreateDeployment creates a new deployment
|
||||||
func (s *DeploymentService) CreateDeployment(ctx context.Context, deployment *deployments.Deployment) error {
|
func (s *DeploymentService) CreateDeployment(ctx context.Context, deployment *deployments.Deployment) error {
|
||||||
// Assign home node if not already assigned
|
// Always use current node's peer ID for home node
|
||||||
if deployment.HomeNodeID == "" {
|
// Deployments run on the node that receives the creation request
|
||||||
|
// This ensures port allocation matches where the service actually runs
|
||||||
|
if s.nodePeerID != "" {
|
||||||
|
deployment.HomeNodeID = s.nodePeerID
|
||||||
|
} else if deployment.HomeNodeID == "" {
|
||||||
|
// Fallback to home node manager if no node peer ID configured
|
||||||
homeNodeID, err := s.homeNodeManager.AssignHomeNode(ctx, deployment.Namespace)
|
homeNodeID, err := s.homeNodeManager.AssignHomeNode(ctx, deployment.Namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to assign home node: %w", err)
|
return fmt.Errorf("failed to assign home node: %w", err)
|
||||||
|
|||||||
@ -54,12 +54,19 @@ func NewSQLiteHandler(db rqlite.Client, homeNodeManager *deployments.HomeNodeMan
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// writeCreateError writes an error response as JSON for consistency
|
||||||
|
func writeCreateError(w http.ResponseWriter, status int, message string) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(status)
|
||||||
|
json.NewEncoder(w).Encode(map[string]string{"error": message})
|
||||||
|
}
|
||||||
|
|
||||||
// CreateDatabase creates a new SQLite database for a namespace
|
// CreateDatabase creates a new SQLite database for a namespace
|
||||||
func (h *SQLiteHandler) CreateDatabase(w http.ResponseWriter, r *http.Request) {
|
func (h *SQLiteHandler) CreateDatabase(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
namespace, ok := ctx.Value(ctxkeys.NamespaceOverride).(string)
|
namespace, ok := ctx.Value(ctxkeys.NamespaceOverride).(string)
|
||||||
if !ok || namespace == "" {
|
if !ok || namespace == "" {
|
||||||
http.Error(w, "Namespace not found in context", http.StatusUnauthorized)
|
writeCreateError(w, http.StatusUnauthorized, "Namespace not found in context")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,18 +75,18 @@ func (h *SQLiteHandler) CreateDatabase(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
writeCreateError(w, http.StatusBadRequest, "Invalid request body")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.DatabaseName == "" {
|
if req.DatabaseName == "" {
|
||||||
http.Error(w, "database_name is required", http.StatusBadRequest)
|
writeCreateError(w, http.StatusBadRequest, "database_name is required")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate database name (alphanumeric, underscore, hyphen only)
|
// Validate database name (alphanumeric, underscore, hyphen only)
|
||||||
if !isValidDatabaseName(req.DatabaseName) {
|
if !isValidDatabaseName(req.DatabaseName) {
|
||||||
http.Error(w, "Invalid database name. Use only alphanumeric characters, underscores, and hyphens", http.StatusBadRequest)
|
writeCreateError(w, http.StatusBadRequest, "Invalid database name. Use only alphanumeric characters, underscores, and hyphens")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,18 +95,26 @@ func (h *SQLiteHandler) CreateDatabase(w http.ResponseWriter, r *http.Request) {
|
|||||||
zap.String("database", req.DatabaseName),
|
zap.String("database", req.DatabaseName),
|
||||||
)
|
)
|
||||||
|
|
||||||
// Assign home node for namespace
|
// For SQLite databases, the home node is ALWAYS the current node
|
||||||
homeNodeID, err := h.homeNodeManager.AssignHomeNode(ctx, namespace)
|
// because the database file is stored locally on this node's filesystem.
|
||||||
if err != nil {
|
// This is different from deployments which can be load-balanced across nodes.
|
||||||
h.logger.Error("Failed to assign home node", zap.Error(err))
|
homeNodeID := h.currentNodeID
|
||||||
http.Error(w, "Failed to assign home node", http.StatusInternalServerError)
|
if homeNodeID == "" {
|
||||||
return
|
// Fallback: if node ID not configured, try to get from HomeNodeManager
|
||||||
|
// This provides backward compatibility for single-node setups
|
||||||
|
var err error
|
||||||
|
homeNodeID, err = h.homeNodeManager.AssignHomeNode(ctx, namespace)
|
||||||
|
if err != nil {
|
||||||
|
h.logger.Error("Failed to assign home node", zap.Error(err))
|
||||||
|
writeCreateError(w, http.StatusInternalServerError, "Failed to assign home node")
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if database already exists
|
// Check if database already exists
|
||||||
existing, err := h.getDatabaseRecord(ctx, namespace, req.DatabaseName)
|
existing, err := h.getDatabaseRecord(ctx, namespace, req.DatabaseName)
|
||||||
if err == nil && existing != nil {
|
if err == nil && existing != nil {
|
||||||
http.Error(w, "Database already exists", http.StatusConflict)
|
writeCreateError(w, http.StatusConflict, "Database already exists")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,7 +125,7 @@ func (h *SQLiteHandler) CreateDatabase(w http.ResponseWriter, r *http.Request) {
|
|||||||
// Create directory if needed
|
// Create directory if needed
|
||||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
|
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
|
||||||
h.logger.Error("Failed to create directory", zap.Error(err))
|
h.logger.Error("Failed to create directory", zap.Error(err))
|
||||||
http.Error(w, "Failed to create database directory", http.StatusInternalServerError)
|
writeCreateError(w, http.StatusInternalServerError, "Failed to create database directory")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,7 +133,7 @@ func (h *SQLiteHandler) CreateDatabase(w http.ResponseWriter, r *http.Request) {
|
|||||||
sqliteDB, err := sql.Open("sqlite3", dbPath)
|
sqliteDB, err := sql.Open("sqlite3", dbPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error("Failed to create SQLite database", zap.Error(err))
|
h.logger.Error("Failed to create SQLite database", zap.Error(err))
|
||||||
http.Error(w, "Failed to create database", http.StatusInternalServerError)
|
writeCreateError(w, http.StatusInternalServerError, "Failed to create database")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,7 +156,7 @@ func (h *SQLiteHandler) CreateDatabase(w http.ResponseWriter, r *http.Request) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error("Failed to record database", zap.Error(err))
|
h.logger.Error("Failed to record database", zap.Error(err))
|
||||||
os.Remove(dbPath) // Cleanup
|
os.Remove(dbPath) // Cleanup
|
||||||
http.Error(w, "Failed to record database", http.StatusInternalServerError)
|
writeCreateError(w, http.StatusInternalServerError, "Failed to record database")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -28,35 +28,42 @@ type QueryResponse struct {
|
|||||||
Error string `json:"error,omitempty"`
|
Error string `json:"error,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// writeJSONError writes an error response as JSON for consistency
|
||||||
|
func writeJSONError(w http.ResponseWriter, status int, message string) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(status)
|
||||||
|
json.NewEncoder(w).Encode(QueryResponse{Error: message})
|
||||||
|
}
|
||||||
|
|
||||||
// QueryDatabase executes a SQL query on a namespace database
|
// QueryDatabase executes a SQL query on a namespace database
|
||||||
func (h *SQLiteHandler) QueryDatabase(w http.ResponseWriter, r *http.Request) {
|
func (h *SQLiteHandler) QueryDatabase(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
namespace, ok := ctx.Value(ctxkeys.NamespaceOverride).(string)
|
namespace, ok := ctx.Value(ctxkeys.NamespaceOverride).(string)
|
||||||
if !ok || namespace == "" {
|
if !ok || namespace == "" {
|
||||||
http.Error(w, "Namespace not found in context", http.StatusUnauthorized)
|
writeJSONError(w, http.StatusUnauthorized, "Namespace not found in context")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var req QueryRequest
|
var req QueryRequest
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
writeJSONError(w, http.StatusBadRequest, "Invalid request body")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.DatabaseName == "" {
|
if req.DatabaseName == "" {
|
||||||
http.Error(w, "database_name is required", http.StatusBadRequest)
|
writeJSONError(w, http.StatusBadRequest, "database_name is required")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.Query == "" {
|
if req.Query == "" {
|
||||||
http.Error(w, "query is required", http.StatusBadRequest)
|
writeJSONError(w, http.StatusBadRequest, "query is required")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get database metadata
|
// Get database metadata
|
||||||
dbMeta, err := h.getDatabaseRecord(ctx, namespace, req.DatabaseName)
|
dbMeta, err := h.getDatabaseRecord(ctx, namespace, req.DatabaseName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "Database not found", http.StatusNotFound)
|
writeJSONError(w, http.StatusNotFound, "Database not found")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,7 +77,7 @@ func (h *SQLiteHandler) QueryDatabase(w http.ResponseWriter, r *http.Request) {
|
|||||||
zap.String("home_node", homeNodeID),
|
zap.String("home_node", homeNodeID),
|
||||||
zap.String("current_node", h.currentNodeID),
|
zap.String("current_node", h.currentNodeID),
|
||||||
)
|
)
|
||||||
http.Error(w, "Database is on a different node. Use node-specific URL or wait for routing implementation.", http.StatusMisdirectedRequest)
|
writeJSONError(w, http.StatusMisdirectedRequest, "Database is on a different node. Use node-specific URL or wait for routing implementation.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,7 +90,7 @@ func (h *SQLiteHandler) QueryDatabase(w http.ResponseWriter, r *http.Request) {
|
|||||||
zap.String("namespace", namespace),
|
zap.String("namespace", namespace),
|
||||||
zap.String("database", req.DatabaseName),
|
zap.String("database", req.DatabaseName),
|
||||||
)
|
)
|
||||||
http.Error(w, "Database file not found on this node", http.StatusNotFound)
|
writeJSONError(w, http.StatusNotFound, "Database file not found on this node")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,7 +98,7 @@ func (h *SQLiteHandler) QueryDatabase(w http.ResponseWriter, r *http.Request) {
|
|||||||
db, err := sql.Open("sqlite3", filePath)
|
db, err := sql.Open("sqlite3", filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error("Failed to open database", zap.Error(err))
|
h.logger.Error("Failed to open database", zap.Error(err))
|
||||||
http.Error(w, "Failed to open database", http.StatusInternalServerError)
|
writeJSONError(w, http.StatusInternalServerError, "Failed to open database")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package gateway
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@ -713,9 +712,9 @@ func (g *Gateway) proxyCrossNode(w http.ResponseWriter, r *http.Request, deploym
|
|||||||
zap.String("current_node", g.nodePeerID),
|
zap.String("current_node", g.nodePeerID),
|
||||||
)
|
)
|
||||||
|
|
||||||
// Proxy to home node via HTTPS
|
// Proxy to home node via internal HTTP port (6001)
|
||||||
// Use the original Host header so the home node's TLS works correctly
|
// This is node-to-node internal communication - no TLS needed
|
||||||
targetURL := "https://" + homeIP + r.URL.Path
|
targetURL := "http://" + homeIP + ":6001" + r.URL.Path
|
||||||
if r.URL.RawQuery != "" {
|
if r.URL.RawQuery != "" {
|
||||||
targetURL += "?" + r.URL.RawQuery
|
targetURL += "?" + r.URL.RawQuery
|
||||||
}
|
}
|
||||||
@ -726,26 +725,19 @@ func (g *Gateway) proxyCrossNode(w http.ResponseWriter, r *http.Request, deploym
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy headers and set Host header to original host
|
// Copy headers and set Host header to original domain for routing
|
||||||
for key, values := range r.Header {
|
for key, values := range r.Header {
|
||||||
for _, value := range values {
|
for _, value := range values {
|
||||||
proxyReq.Header.Add(key, value)
|
proxyReq.Header.Add(key, value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
proxyReq.Host = r.Host // Keep original host for TLS SNI
|
proxyReq.Host = r.Host // Keep original host for domain routing on target node
|
||||||
proxyReq.Header.Set("X-Forwarded-For", getClientIP(r))
|
proxyReq.Header.Set("X-Forwarded-For", getClientIP(r))
|
||||||
proxyReq.Header.Set("X-Orama-Proxy-Node", g.nodePeerID) // Prevent loops
|
proxyReq.Header.Set("X-Orama-Proxy-Node", g.nodePeerID) // Prevent loops
|
||||||
|
|
||||||
// Skip TLS verification since we're connecting by IP with a Host header
|
// Simple HTTP client for internal node-to-node communication
|
||||||
// The home node has the correct certificate for the domain
|
|
||||||
httpClient := &http.Client{
|
httpClient := &http.Client{
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
Transport: &http.Transport{
|
|
||||||
TLSClientConfig: &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
|
||||||
ServerName: r.Host, // Use original host for SNI
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := httpClient.Do(proxyReq)
|
resp, err := httpClient.Do(proxyReq)
|
||||||
|
|||||||
@ -1,7 +1,9 @@
|
|||||||
package ipfs
|
package ipfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -10,6 +12,7 @@ import (
|
|||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -81,13 +84,19 @@ func (cm *ClusterConfigManager) DiscoverClusterPeersFromGateway() ([]ClusterPeer
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DiscoverClusterPeersFromLibP2P uses libp2p host to find other cluster peers
|
// DiscoverClusterPeersFromLibP2P discovers IPFS and IPFS Cluster peers by querying
|
||||||
|
// the /v1/network/status endpoint of connected libp2p peers.
|
||||||
|
// This is the correct approach since IPFS/Cluster peer IDs are different from libp2p peer IDs.
|
||||||
func (cm *ClusterConfigManager) DiscoverClusterPeersFromLibP2P(h host.Host) error {
|
func (cm *ClusterConfigManager) DiscoverClusterPeersFromLibP2P(h host.Host) error {
|
||||||
if h == nil {
|
if h == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var clusterPeers []string
|
var clusterPeers []string
|
||||||
|
var ipfsPeers []IPFSPeerEntry
|
||||||
|
|
||||||
|
// Get unique IPs from connected libp2p peers
|
||||||
|
peerIPs := make(map[string]bool)
|
||||||
for _, p := range h.Peerstore().Peers() {
|
for _, p := range h.Peerstore().Peers() {
|
||||||
if p == h.ID() {
|
if p == h.ID() {
|
||||||
continue
|
continue
|
||||||
@ -95,20 +104,248 @@ func (cm *ClusterConfigManager) DiscoverClusterPeersFromLibP2P(h host.Host) erro
|
|||||||
|
|
||||||
info := h.Peerstore().PeerInfo(p)
|
info := h.Peerstore().PeerInfo(p)
|
||||||
for _, addr := range info.Addrs {
|
for _, addr := range info.Addrs {
|
||||||
if strings.Contains(addr.String(), "/tcp/9096") || strings.Contains(addr.String(), "/tcp/9094") {
|
// Extract IP from multiaddr
|
||||||
ma := addr.Encapsulate(multiaddr.StringCast(fmt.Sprintf("/p2p/%s", p.String())))
|
ip := extractIPFromMultiaddr(addr)
|
||||||
clusterPeers = append(clusterPeers, ma.String())
|
if ip != "" && !strings.HasPrefix(ip, "127.") && !strings.HasPrefix(ip, "::1") {
|
||||||
|
peerIPs[ip] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(peerIPs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query each peer's /v1/network/status endpoint to get IPFS and Cluster info
|
||||||
|
client := &http.Client{Timeout: 5 * time.Second}
|
||||||
|
for ip := range peerIPs {
|
||||||
|
statusURL := fmt.Sprintf("http://%s:6001/v1/network/status", ip)
|
||||||
|
resp, err := client.Get(statusURL)
|
||||||
|
if err != nil {
|
||||||
|
cm.logger.Debug("Failed to query peer status", zap.String("ip", ip), zap.Error(err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var status NetworkStatusResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
cm.logger.Debug("Failed to decode peer status", zap.String("ip", ip), zap.Error(err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
// Add IPFS Cluster peer if available
|
||||||
|
if status.IPFSCluster != nil && status.IPFSCluster.PeerID != "" {
|
||||||
|
for _, addr := range status.IPFSCluster.Addresses {
|
||||||
|
if strings.Contains(addr, "/tcp/9100") {
|
||||||
|
clusterPeers = append(clusterPeers, addr)
|
||||||
|
cm.logger.Info("Discovered IPFS Cluster peer", zap.String("peer", addr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add IPFS peer if available
|
||||||
|
if status.IPFS != nil && status.IPFS.PeerID != "" {
|
||||||
|
for _, addr := range status.IPFS.SwarmAddresses {
|
||||||
|
if strings.Contains(addr, "/tcp/4101") && !strings.Contains(addr, "127.0.0.1") {
|
||||||
|
ipfsPeers = append(ipfsPeers, IPFSPeerEntry{
|
||||||
|
ID: status.IPFS.PeerID,
|
||||||
|
Addrs: []string{addr},
|
||||||
|
})
|
||||||
|
cm.logger.Info("Discovered IPFS peer", zap.String("peer_id", status.IPFS.PeerID))
|
||||||
|
break // One address per peer is enough
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update IPFS Cluster peer addresses
|
||||||
if len(clusterPeers) > 0 {
|
if len(clusterPeers) > 0 {
|
||||||
return cm.UpdatePeerAddresses(clusterPeers)
|
if err := cm.UpdatePeerAddresses(clusterPeers); err != nil {
|
||||||
|
cm.logger.Warn("Failed to update cluster peer addresses", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
cm.logger.Info("Updated IPFS Cluster peer addresses", zap.Int("count", len(clusterPeers)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update IPFS Peering.Peers
|
||||||
|
if len(ipfsPeers) > 0 {
|
||||||
|
if err := cm.UpdateIPFSPeeringConfig(ipfsPeers); err != nil {
|
||||||
|
cm.logger.Warn("Failed to update IPFS peering config", zap.Error(err))
|
||||||
|
} else {
|
||||||
|
cm.logger.Info("Updated IPFS Peering.Peers", zap.Int("count", len(ipfsPeers)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NetworkStatusResponse represents the response from /v1/network/status
|
||||||
|
type NetworkStatusResponse struct {
|
||||||
|
PeerID string `json:"peer_id"`
|
||||||
|
PeerCount int `json:"peer_count"`
|
||||||
|
IPFS *NetworkStatusIPFS `json:"ipfs,omitempty"`
|
||||||
|
IPFSCluster *NetworkStatusIPFSCluster `json:"ipfs_cluster,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NetworkStatusIPFS struct {
|
||||||
|
PeerID string `json:"peer_id"`
|
||||||
|
SwarmAddresses []string `json:"swarm_addresses"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NetworkStatusIPFSCluster struct {
|
||||||
|
PeerID string `json:"peer_id"`
|
||||||
|
Addresses []string `json:"addresses"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPFSPeerEntry represents an IPFS peer for Peering.Peers config
|
||||||
|
type IPFSPeerEntry struct {
|
||||||
|
ID string `json:"ID"`
|
||||||
|
Addrs []string `json:"Addrs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractIPFromMultiaddr extracts the IP address from a multiaddr
|
||||||
|
func extractIPFromMultiaddr(ma multiaddr.Multiaddr) string {
|
||||||
|
if ma == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to convert to net.Addr and extract IP
|
||||||
|
if addr, err := manet.ToNetAddr(ma); err == nil {
|
||||||
|
addrStr := addr.String()
|
||||||
|
// Handle "ip:port" format
|
||||||
|
if idx := strings.LastIndex(addrStr, ":"); idx > 0 {
|
||||||
|
return addrStr[:idx]
|
||||||
|
}
|
||||||
|
return addrStr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: parse manually
|
||||||
|
parts := strings.Split(ma.String(), "/")
|
||||||
|
for i, part := range parts {
|
||||||
|
if (part == "ip4" || part == "ip6") && i+1 < len(parts) {
|
||||||
|
return parts[i+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIPFSPeeringConfig updates the Peering.Peers section in IPFS config
|
||||||
|
func (cm *ClusterConfigManager) UpdateIPFSPeeringConfig(peers []IPFSPeerEntry) error {
|
||||||
|
// Find IPFS config path
|
||||||
|
ipfsRepoPath := cm.findIPFSRepoPath()
|
||||||
|
if ipfsRepoPath == "" {
|
||||||
|
return fmt.Errorf("could not find IPFS repo path")
|
||||||
|
}
|
||||||
|
|
||||||
|
configPath := filepath.Join(ipfsRepoPath, "config")
|
||||||
|
|
||||||
|
// Read existing config
|
||||||
|
data, err := os.ReadFile(configPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read IPFS config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var config map[string]interface{}
|
||||||
|
if err := json.Unmarshal(data, &config); err != nil {
|
||||||
|
return fmt.Errorf("failed to parse IPFS config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get or create Peering section
|
||||||
|
peering, ok := config["Peering"].(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
peering = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get existing peers
|
||||||
|
existingPeers := []IPFSPeerEntry{}
|
||||||
|
if existingPeersList, ok := peering["Peers"].([]interface{}); ok {
|
||||||
|
for _, p := range existingPeersList {
|
||||||
|
if peerMap, ok := p.(map[string]interface{}); ok {
|
||||||
|
entry := IPFSPeerEntry{}
|
||||||
|
if id, ok := peerMap["ID"].(string); ok {
|
||||||
|
entry.ID = id
|
||||||
|
}
|
||||||
|
if addrs, ok := peerMap["Addrs"].([]interface{}); ok {
|
||||||
|
for _, a := range addrs {
|
||||||
|
if addr, ok := a.(string); ok {
|
||||||
|
entry.Addrs = append(entry.Addrs, addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if entry.ID != "" {
|
||||||
|
existingPeers = append(existingPeers, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge new peers with existing (avoid duplicates by ID)
|
||||||
|
seenIDs := make(map[string]bool)
|
||||||
|
mergedPeers := []interface{}{}
|
||||||
|
|
||||||
|
// Add existing peers first
|
||||||
|
for _, p := range existingPeers {
|
||||||
|
seenIDs[p.ID] = true
|
||||||
|
mergedPeers = append(mergedPeers, map[string]interface{}{
|
||||||
|
"ID": p.ID,
|
||||||
|
"Addrs": p.Addrs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add new peers
|
||||||
|
for _, p := range peers {
|
||||||
|
if !seenIDs[p.ID] {
|
||||||
|
seenIDs[p.ID] = true
|
||||||
|
mergedPeers = append(mergedPeers, map[string]interface{}{
|
||||||
|
"ID": p.ID,
|
||||||
|
"Addrs": p.Addrs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update config
|
||||||
|
peering["Peers"] = mergedPeers
|
||||||
|
config["Peering"] = peering
|
||||||
|
|
||||||
|
// Write back
|
||||||
|
updatedData, err := json.MarshalIndent(config, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal IPFS config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(configPath, updatedData, 0600); err != nil {
|
||||||
|
return fmt.Errorf("failed to write IPFS config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findIPFSRepoPath finds the IPFS repository path
|
||||||
|
func (cm *ClusterConfigManager) findIPFSRepoPath() string {
|
||||||
|
dataDir := cm.cfg.Node.DataDir
|
||||||
|
if strings.HasPrefix(dataDir, "~") {
|
||||||
|
home, _ := os.UserHomeDir()
|
||||||
|
dataDir = filepath.Join(home, dataDir[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
possiblePaths := []string{
|
||||||
|
filepath.Join(dataDir, "ipfs", "repo"),
|
||||||
|
filepath.Join(dataDir, "node-1", "ipfs", "repo"),
|
||||||
|
filepath.Join(dataDir, "node-2", "ipfs", "repo"),
|
||||||
|
filepath.Join(filepath.Dir(dataDir), "ipfs", "repo"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range possiblePaths {
|
||||||
|
if _, err := os.Stat(filepath.Join(path, "config")); err == nil {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
func (cm *ClusterConfigManager) getPeerID() (string, error) {
|
func (cm *ClusterConfigManager) getPeerID() (string, error) {
|
||||||
dataDir := cm.cfg.Node.DataDir
|
dataDir := cm.cfg.Node.DataDir
|
||||||
if strings.HasPrefix(dataDir, "~") {
|
if strings.HasPrefix(dataDir, "~") {
|
||||||
|
|||||||
@ -2,8 +2,6 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@ -12,11 +10,13 @@ import (
|
|||||||
"github.com/DeBrosOfficial/network/pkg/gateway"
|
"github.com/DeBrosOfficial/network/pkg/gateway"
|
||||||
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
||||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||||
"golang.org/x/crypto/acme"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/crypto/acme/autocert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// startHTTPGateway initializes and starts the full API gateway
|
// startHTTPGateway initializes and starts the full API gateway
|
||||||
|
// The gateway always runs HTTP on the configured port (default :6001).
|
||||||
|
// When running with Caddy (nameserver mode), Caddy handles external HTTPS
|
||||||
|
// and proxies requests to this internal HTTP gateway.
|
||||||
func (n *Node) startHTTPGateway(ctx context.Context) error {
|
func (n *Node) startHTTPGateway(ctx context.Context) error {
|
||||||
if !n.config.HTTPGateway.Enabled {
|
if !n.config.HTTPGateway.Enabled {
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "HTTP Gateway disabled in config")
|
n.logger.ComponentInfo(logging.ComponentNode, "HTTP Gateway disabled in config")
|
||||||
@ -43,9 +43,6 @@ func (n *Node) startHTTPGateway(ctx context.Context) error {
|
|||||||
IPFSClusterAPIURL: n.config.HTTPGateway.IPFSClusterAPIURL,
|
IPFSClusterAPIURL: n.config.HTTPGateway.IPFSClusterAPIURL,
|
||||||
IPFSAPIURL: n.config.HTTPGateway.IPFSAPIURL,
|
IPFSAPIURL: n.config.HTTPGateway.IPFSAPIURL,
|
||||||
IPFSTimeout: n.config.HTTPGateway.IPFSTimeout,
|
IPFSTimeout: n.config.HTTPGateway.IPFSTimeout,
|
||||||
EnableHTTPS: n.config.HTTPGateway.HTTPS.Enabled,
|
|
||||||
DomainName: n.config.HTTPGateway.HTTPS.Domain,
|
|
||||||
TLSCacheDir: n.config.HTTPGateway.HTTPS.CacheDir,
|
|
||||||
BaseDomain: n.config.HTTPGateway.BaseDomain,
|
BaseDomain: n.config.HTTPGateway.BaseDomain,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,135 +52,28 @@ func (n *Node) startHTTPGateway(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
n.apiGateway = apiGateway
|
n.apiGateway = apiGateway
|
||||||
|
|
||||||
var certManager *autocert.Manager
|
|
||||||
if gwCfg.EnableHTTPS && gwCfg.DomainName != "" {
|
|
||||||
tlsCacheDir := gwCfg.TLSCacheDir
|
|
||||||
if tlsCacheDir == "" {
|
|
||||||
tlsCacheDir = "/home/debros/.orama/tls-cache"
|
|
||||||
}
|
|
||||||
_ = os.MkdirAll(tlsCacheDir, 0700)
|
|
||||||
|
|
||||||
certManager = &autocert.Manager{
|
|
||||||
Prompt: autocert.AcceptTOS,
|
|
||||||
HostPolicy: autocert.HostWhitelist(gwCfg.DomainName),
|
|
||||||
Cache: autocert.DirCache(tlsCacheDir),
|
|
||||||
Email: fmt.Sprintf("admin@%s", gwCfg.DomainName),
|
|
||||||
Client: &acme.Client{
|
|
||||||
DirectoryURL: "https://acme-staging-v02.api.letsencrypt.org/directory",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
n.certManager = certManager
|
|
||||||
n.certReady = make(chan struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
httpReady := make(chan struct{})
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if gwCfg.EnableHTTPS && gwCfg.DomainName != "" && certManager != nil {
|
server := &http.Server{
|
||||||
httpsPort := 443
|
Addr: gwCfg.ListenAddr,
|
||||||
httpPort := 80
|
Handler: apiGateway.Routes(),
|
||||||
|
|
||||||
httpServer := &http.Server{
|
|
||||||
Addr: fmt.Sprintf(":%d", httpPort),
|
|
||||||
Handler: certManager.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
target := fmt.Sprintf("https://%s%s", r.Host, r.URL.RequestURI())
|
|
||||||
http.Redirect(w, r, target, http.StatusMovedPermanently)
|
|
||||||
})),
|
|
||||||
}
|
|
||||||
|
|
||||||
httpListener, err := net.Listen("tcp", fmt.Sprintf(":%d", httpPort))
|
|
||||||
if err != nil {
|
|
||||||
close(httpReady)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
go httpServer.Serve(httpListener)
|
|
||||||
|
|
||||||
// Pre-provision cert
|
|
||||||
certReq := &tls.ClientHelloInfo{ServerName: gwCfg.DomainName}
|
|
||||||
_, certErr := certManager.GetCertificate(certReq)
|
|
||||||
|
|
||||||
if certErr != nil {
|
|
||||||
close(httpReady)
|
|
||||||
httpServer.Handler = apiGateway.Routes()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
close(httpReady)
|
|
||||||
|
|
||||||
tlsConfig := &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
GetCertificate: certManager.GetCertificate,
|
|
||||||
}
|
|
||||||
|
|
||||||
httpsServer := &http.Server{
|
|
||||||
Addr: fmt.Sprintf(":%d", httpsPort),
|
|
||||||
TLSConfig: tlsConfig,
|
|
||||||
Handler: apiGateway.Routes(),
|
|
||||||
}
|
|
||||||
n.apiGatewayServer = httpsServer
|
|
||||||
|
|
||||||
ln, err := tls.Listen("tcp", fmt.Sprintf(":%d", httpsPort), tlsConfig)
|
|
||||||
if err == nil {
|
|
||||||
httpsServer.Serve(ln)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
close(httpReady)
|
|
||||||
server := &http.Server{
|
|
||||||
Addr: gwCfg.ListenAddr,
|
|
||||||
Handler: apiGateway.Routes(),
|
|
||||||
}
|
|
||||||
n.apiGatewayServer = server
|
|
||||||
ln, err := net.Listen("tcp", gwCfg.ListenAddr)
|
|
||||||
if err == nil {
|
|
||||||
server.Serve(ln)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
n.apiGatewayServer = server
|
||||||
|
|
||||||
|
ln, err := net.Listen("tcp", gwCfg.ListenAddr)
|
||||||
|
if err != nil {
|
||||||
|
n.logger.ComponentError(logging.ComponentNode, "Failed to bind HTTP gateway",
|
||||||
|
zap.String("addr", gwCfg.ListenAddr), zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n.logger.ComponentInfo(logging.ComponentNode, "HTTP gateway started",
|
||||||
|
zap.String("addr", gwCfg.ListenAddr))
|
||||||
|
server.Serve(ln)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// SNI Gateway
|
|
||||||
if n.config.HTTPGateway.SNI.Enabled && n.certManager != nil {
|
|
||||||
go n.startSNIGateway(ctx, httpReady)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Node) startSNIGateway(ctx context.Context, httpReady <-chan struct{}) {
|
|
||||||
<-httpReady
|
|
||||||
domain := n.config.HTTPGateway.HTTPS.Domain
|
|
||||||
if domain == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
certReq := &tls.ClientHelloInfo{ServerName: domain}
|
|
||||||
tlsCert, err := n.certManager.GetCertificate(certReq)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsCacheDir := n.config.HTTPGateway.HTTPS.CacheDir
|
|
||||||
if tlsCacheDir == "" {
|
|
||||||
tlsCacheDir = "/home/debros/.orama/tls-cache"
|
|
||||||
}
|
|
||||||
|
|
||||||
certPath := filepath.Join(tlsCacheDir, domain+".crt")
|
|
||||||
keyPath := filepath.Join(tlsCacheDir, domain+".key")
|
|
||||||
|
|
||||||
if err := extractPEMFromTLSCert(tlsCert, certPath, keyPath); err == nil {
|
|
||||||
if n.certReady != nil {
|
|
||||||
close(n.certReady)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sniCfg := n.config.HTTPGateway.SNI
|
|
||||||
sniGateway, err := gateway.NewTCPSNIGateway(n.logger, &sniCfg)
|
|
||||||
if err == nil {
|
|
||||||
n.sniGateway = sniGateway
|
|
||||||
sniGateway.Start(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// startIPFSClusterConfig initializes and ensures IPFS Cluster configuration
|
// startIPFSClusterConfig initializes and ensures IPFS Cluster configuration
|
||||||
func (n *Node) startIPFSClusterConfig() error {
|
func (n *Node) startIPFSClusterConfig() error {
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Initializing IPFS Cluster configuration")
|
n.logger.ComponentInfo(logging.ComponentNode, "Initializing IPFS Cluster configuration")
|
||||||
@ -202,4 +92,3 @@ func (n *Node) startIPFSClusterConfig() error {
|
|||||||
_ = cm.RepairPeerConfiguration()
|
_ = cm.RepairPeerConfiguration()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -184,16 +184,18 @@ func (n *Node) GetDiscoveryStatus() map[string]interface{} {
|
|||||||
// Unlike nodes which need extensive monitoring, clients only need basic health checks.
|
// Unlike nodes which need extensive monitoring, clients only need basic health checks.
|
||||||
func (n *Node) startConnectionMonitoring() {
|
func (n *Node) startConnectionMonitoring() {
|
||||||
go func() {
|
go func() {
|
||||||
ticker := time.NewTicker(30 * time.Second) // Less frequent than nodes (60s vs 30s)
|
ticker := time.NewTicker(30 * time.Second) // Ticks every 30 seconds
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
var lastPeerCount int
|
var lastPeerCount int
|
||||||
firstCheck := true
|
firstCheck := true
|
||||||
|
tickCount := 0
|
||||||
|
|
||||||
for range ticker.C {
|
for range ticker.C {
|
||||||
if n.host == nil {
|
if n.host == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
tickCount++
|
||||||
|
|
||||||
// Get current peer count
|
// Get current peer count
|
||||||
peers := n.host.Network().Peers()
|
peers := n.host.Network().Peers()
|
||||||
@ -217,9 +219,9 @@ func (n *Node) startConnectionMonitoring() {
|
|||||||
// This discovers all cluster peers and updates peer_addresses in service.json
|
// This discovers all cluster peers and updates peer_addresses in service.json
|
||||||
// so IPFS Cluster can automatically connect to all discovered peers
|
// so IPFS Cluster can automatically connect to all discovered peers
|
||||||
if n.clusterConfigManager != nil {
|
if n.clusterConfigManager != nil {
|
||||||
// First try to discover from LibP2P connections (works even if cluster peers aren't connected yet)
|
// Discover from LibP2P connections every 2 ticks (once per minute)
|
||||||
// This runs every minute to discover peers automatically via LibP2P discovery
|
// Works even if cluster peers aren't connected yet
|
||||||
if time.Now().Unix()%60 == 0 {
|
if tickCount%2 == 0 {
|
||||||
if err := n.clusterConfigManager.DiscoverClusterPeersFromLibP2P(n.host); err != nil {
|
if err := n.clusterConfigManager.DiscoverClusterPeersFromLibP2P(n.host); err != nil {
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to discover cluster peers from LibP2P", zap.Error(err))
|
n.logger.ComponentWarn(logging.ComponentNode, "Failed to discover cluster peers from LibP2P", zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
@ -227,9 +229,9 @@ func (n *Node) startConnectionMonitoring() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also try to update from cluster API (works once peers are connected)
|
// Update from cluster API every 4 ticks (once per 2 minutes)
|
||||||
// Update all cluster peers every 2 minutes to discover new peers
|
// Works once peers are already connected
|
||||||
if time.Now().Unix()%120 == 0 {
|
if tickCount%4 == 0 {
|
||||||
if err := n.clusterConfigManager.UpdateAllClusterPeers(); err != nil {
|
if err := n.clusterConfigManager.UpdateAllClusterPeers(); err != nil {
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to update cluster peers during monitoring", zap.Error(err))
|
n.logger.ComponentWarn(logging.ComponentNode, "Failed to update cluster peers during monitoring", zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -18,7 +18,6 @@ import (
|
|||||||
database "github.com/DeBrosOfficial/network/pkg/rqlite"
|
database "github.com/DeBrosOfficial/network/pkg/rqlite"
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/crypto/acme/autocert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Node represents a network node with RQLite database
|
// Node represents a network node with RQLite database
|
||||||
@ -44,17 +43,8 @@ type Node struct {
|
|||||||
clusterConfigManager *ipfs.ClusterConfigManager
|
clusterConfigManager *ipfs.ClusterConfigManager
|
||||||
|
|
||||||
// Full gateway (for API, auth, pubsub, and internal service routing)
|
// Full gateway (for API, auth, pubsub, and internal service routing)
|
||||||
apiGateway *gateway.Gateway
|
apiGateway *gateway.Gateway
|
||||||
apiGatewayServer *http.Server
|
apiGatewayServer *http.Server
|
||||||
|
|
||||||
// SNI gateway (for TCP routing of raft, ipfs, olric, etc.)
|
|
||||||
sniGateway *gateway.TCPSNIGateway
|
|
||||||
|
|
||||||
// Shared certificate manager for HTTPS and SNI
|
|
||||||
certManager *autocert.Manager
|
|
||||||
|
|
||||||
// Certificate ready signal - closed when TLS certificates are extracted and ready for use
|
|
||||||
certReady chan struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNode creates a new network node
|
// NewNode creates a new network node
|
||||||
@ -156,13 +146,6 @@ func (n *Node) Stop() error {
|
|||||||
n.apiGateway.Close()
|
n.apiGateway.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop SNI Gateway
|
|
||||||
if n.sniGateway != nil {
|
|
||||||
if err := n.sniGateway.Stop(); err != nil {
|
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "SNI Gateway stop error", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop cluster discovery
|
// Stop cluster discovery
|
||||||
if n.clusterDiscovery != nil {
|
if n.clusterDiscovery != nil {
|
||||||
n.clusterDiscovery.Stop()
|
n.clusterDiscovery.Stop()
|
||||||
|
|||||||
@ -5,8 +5,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
database "github.com/DeBrosOfficial/network/pkg/rqlite"
|
database "github.com/DeBrosOfficial/network/pkg/rqlite"
|
||||||
"go.uber.org/zap"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// startRQLite initializes and starts the RQLite database
|
// startRQLite initializes and starts the RQLite database
|
||||||
@ -55,25 +53,6 @@ func (n *Node) startRQLite(ctx context.Context) error {
|
|||||||
n.logger.Info("Cluster discovery service started (waiting for RQLite)")
|
n.logger.Info("Cluster discovery service started (waiting for RQLite)")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If node-to-node TLS is configured, wait for certificates to be provisioned
|
|
||||||
// This ensures RQLite can start with TLS when joining through the SNI gateway
|
|
||||||
if n.config.Database.NodeCert != "" && n.config.Database.NodeKey != "" && n.certReady != nil {
|
|
||||||
n.logger.Info("RQLite node TLS configured, waiting for certificates to be provisioned...",
|
|
||||||
zap.String("node_cert", n.config.Database.NodeCert),
|
|
||||||
zap.String("node_key", n.config.Database.NodeKey))
|
|
||||||
|
|
||||||
// Wait for certificate ready signal with timeout
|
|
||||||
certTimeout := 5 * time.Minute
|
|
||||||
select {
|
|
||||||
case <-n.certReady:
|
|
||||||
n.logger.Info("Certificates ready, proceeding with RQLite startup")
|
|
||||||
case <-time.After(certTimeout):
|
|
||||||
return fmt.Errorf("timeout waiting for TLS certificates after %v - ensure HTTPS is configured and ports 80/443 are accessible for ACME challenges", certTimeout)
|
|
||||||
case <-ctx.Done():
|
|
||||||
return fmt.Errorf("context cancelled while waiting for certificates: %w", ctx.Err())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start RQLite FIRST before updating metadata
|
// Start RQLite FIRST before updating metadata
|
||||||
if err := n.rqliteManager.Start(ctx); err != nil {
|
if err := n.rqliteManager.Start(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
BIN
testdata/apps/go-backend/app
vendored
BIN
testdata/apps/go-backend/app
vendored
Binary file not shown.
34
testdata/apps/go-backend/main.go
vendored
34
testdata/apps/go-backend/main.go
vendored
@ -59,13 +59,16 @@ func executeSQL(query string, args ...interface{}) ([]map[string]interface{}, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Build the query with parameters
|
// Build the query with parameters
|
||||||
|
// Gateway expects: database_name, query (not sql), params
|
||||||
reqBody := map[string]interface{}{
|
reqBody := map[string]interface{}{
|
||||||
"sql": query,
|
"database_name": databaseName,
|
||||||
"params": args,
|
"query": query,
|
||||||
|
"params": args,
|
||||||
}
|
}
|
||||||
bodyBytes, _ := json.Marshal(reqBody)
|
bodyBytes, _ := json.Marshal(reqBody)
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/v1/db/%s/query", gatewayURL, databaseName)
|
// Gateway endpoint is /v1/db/sqlite/query (not /v1/db/{name}/query)
|
||||||
|
url := fmt.Sprintf("%s/v1/db/sqlite/query", gatewayURL)
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(bodyBytes))
|
req, err := http.NewRequest("POST", url, bytes.NewBuffer(bodyBytes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -87,15 +90,34 @@ func executeSQL(query string, args ...interface{}) ([]map[string]interface{}, er
|
|||||||
return nil, fmt.Errorf("database error: %s", string(body))
|
return nil, fmt.Errorf("database error: %s", string(body))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Gateway returns: columns []string, rows [][]interface{}
|
||||||
|
// We need to convert rows to []map[string]interface{}
|
||||||
var result struct {
|
var result struct {
|
||||||
Rows []map[string]interface{} `json:"rows"`
|
Rows [][]interface{} `json:"rows"`
|
||||||
Columns []string `json:"columns"`
|
Columns []string `json:"columns"`
|
||||||
|
Error string `json:"error,omitempty"`
|
||||||
}
|
}
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return result.Rows, nil
|
if result.Error != "" {
|
||||||
|
return nil, fmt.Errorf("query error: %s", result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert [][]interface{} to []map[string]interface{}
|
||||||
|
rows := make([]map[string]interface{}, 0, len(result.Rows))
|
||||||
|
for _, row := range result.Rows {
|
||||||
|
rowMap := make(map[string]interface{})
|
||||||
|
for i, col := range result.Columns {
|
||||||
|
if i < len(row) {
|
||||||
|
rowMap[col] = row[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rows = append(rows, rowMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rows, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initDatabase creates the users table if it doesn't exist
|
// initDatabase creates the users table if it doesn't exist
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user