mirror of
https://github.com/DeBrosOfficial/network.git
synced 2025-12-11 08:18:49 +00:00
feat: update node and gateway commands to use Orama naming convention
- Renamed the node executable from `node` to `orama-node` in the Makefile and various scripts to reflect the new naming convention. - Updated the gateway command to `orama-gateway` for consistency. - Modified service configurations and systemd templates to ensure proper execution of the renamed binaries. - Enhanced the interactive installer to prompt for the gateway URL, allowing users to select between local and remote nodes. - Added functionality to extract domain information for TLS configuration, improving security for remote connections.
This commit is contained in:
parent
3505a6a0eb
commit
9193f088a3
22
CHANGELOG.md
22
CHANGELOG.md
@ -13,6 +13,28 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Deprecated
|
### Deprecated
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
## [0.72.0] - 2025-11-28
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Interactive prompt for selecting local or remote gateway URL during CLI login.
|
||||||
|
- Support for discovering and configuring IPFS Cluster peers during installation and runtime via the gateway status endpoint.
|
||||||
|
- New CLI flags (`--ipfs-cluster-peer`, `--ipfs-cluster-addrs`) added to the `prod install` command for cluster discovery.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Renamed the main network node executable from `node` to `orama-node` and the gateway executable to `orama-gateway`.
|
||||||
|
- Improved the `auth login` flow to use a TLS-aware HTTP client, supporting Let's Encrypt staging certificates for remote gateways.
|
||||||
|
- Updated the production installer to set `CAP_NET_BIND_SERVICE` on `orama-node` to allow binding to privileged ports (80/443) without root.
|
||||||
|
- Updated the production installer to configure IPFS Cluster to listen on port 9098 for consistent multi-node communication.
|
||||||
|
- Refactored the `prod install` process to generate configurations before initializing services, ensuring configuration files are present.
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Corrected the IPFS Cluster API port used in `node.yaml` template from 9096 to 9098 to match the cluster's LibP2P port.
|
||||||
|
- Fixed the `anyone-client` systemd service configuration to use the correct binary name and allow writing to the home directory.
|
||||||
|
|
||||||
## [0.71.0] - 2025-11-27
|
## [0.71.0] - 2025-11-27
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
12
Makefile
12
Makefile
@ -19,7 +19,7 @@ test-e2e:
|
|||||||
|
|
||||||
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
|
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
|
||||||
|
|
||||||
VERSION := 0.71.0
|
VERSION := 0.72.0
|
||||||
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
||||||
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||||
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
||||||
@ -29,7 +29,7 @@ build: deps
|
|||||||
@echo "Building network executables (version=$(VERSION))..."
|
@echo "Building network executables (version=$(VERSION))..."
|
||||||
@mkdir -p bin
|
@mkdir -p bin
|
||||||
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
|
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
|
||||||
go build -ldflags "$(LDFLAGS)" -o bin/node ./cmd/node
|
go build -ldflags "$(LDFLAGS)" -o bin/orama-node ./cmd/node
|
||||||
go build -ldflags "$(LDFLAGS)" -o bin/orama cmd/cli/main.go
|
go build -ldflags "$(LDFLAGS)" -o bin/orama cmd/cli/main.go
|
||||||
# Inject gateway build metadata via pkg path variables
|
# Inject gateway build metadata via pkg path variables
|
||||||
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
|
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
|
||||||
@ -51,25 +51,25 @@ clean:
|
|||||||
run-node:
|
run-node:
|
||||||
@echo "Starting node..."
|
@echo "Starting node..."
|
||||||
@echo "Config: ~/.orama/node.yaml"
|
@echo "Config: ~/.orama/node.yaml"
|
||||||
go run ./cmd/node --config node.yaml
|
go run ./cmd/orama-node --config node.yaml
|
||||||
|
|
||||||
# Run second node - requires join address
|
# Run second node - requires join address
|
||||||
run-node2:
|
run-node2:
|
||||||
@echo "Starting second node..."
|
@echo "Starting second node..."
|
||||||
@echo "Config: ~/.orama/node2.yaml"
|
@echo "Config: ~/.orama/node2.yaml"
|
||||||
go run ./cmd/node --config node2.yaml
|
go run ./cmd/orama-node --config node2.yaml
|
||||||
|
|
||||||
# Run third node - requires join address
|
# Run third node - requires join address
|
||||||
run-node3:
|
run-node3:
|
||||||
@echo "Starting third node..."
|
@echo "Starting third node..."
|
||||||
@echo "Config: ~/.orama/node3.yaml"
|
@echo "Config: ~/.orama/node3.yaml"
|
||||||
go run ./cmd/node --config node3.yaml
|
go run ./cmd/orama-node --config node3.yaml
|
||||||
|
|
||||||
# Run gateway HTTP server
|
# Run gateway HTTP server
|
||||||
run-gateway:
|
run-gateway:
|
||||||
@echo "Starting gateway HTTP server..."
|
@echo "Starting gateway HTTP server..."
|
||||||
@echo "Note: Config must be in ~/.orama/data/gateway.yaml"
|
@echo "Note: Config must be in ~/.orama/data/gateway.yaml"
|
||||||
go run ./cmd/gateway
|
go run ./cmd/orama-gateway
|
||||||
|
|
||||||
# Setup local domain names for development
|
# Setup local domain names for development
|
||||||
setup-domains:
|
setup-domains:
|
||||||
|
|||||||
@ -10,6 +10,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PerformSimpleAuthentication performs a simple authentication flow where the user
|
// PerformSimpleAuthentication performs a simple authentication flow where the user
|
||||||
@ -91,7 +93,13 @@ func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
endpoint := gatewayURL + "/v1/auth/simple-key"
|
endpoint := gatewayURL + "/v1/auth/simple-key"
|
||||||
resp, err := http.Post(endpoint, "application/json", bytes.NewReader(payload))
|
|
||||||
|
// Extract domain from URL for TLS configuration
|
||||||
|
// This uses tlsutil which handles Let's Encrypt staging certificates for *.debros.network
|
||||||
|
domain := extractDomainFromURL(gatewayURL)
|
||||||
|
client := tlsutil.NewHTTPClientForDomain(30*time.Second, domain)
|
||||||
|
|
||||||
|
resp, err := client.Post(endpoint, "application/json", bytes.NewReader(payload))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to call gateway: %w", err)
|
return "", fmt.Errorf("failed to call gateway: %w", err)
|
||||||
}
|
}
|
||||||
@ -114,3 +122,23 @@ func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, err
|
|||||||
|
|
||||||
return apiKey, nil
|
return apiKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// extractDomainFromURL extracts the domain from a URL
|
||||||
|
// Removes protocol (https://, http://), path, and port components
|
||||||
|
func extractDomainFromURL(url string) string {
|
||||||
|
// Remove protocol prefixes
|
||||||
|
url = strings.TrimPrefix(url, "https://")
|
||||||
|
url = strings.TrimPrefix(url, "http://")
|
||||||
|
|
||||||
|
// Remove path component
|
||||||
|
if idx := strings.Index(url, "/"); idx != -1 {
|
||||||
|
url = url[:idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove port component
|
||||||
|
if idx := strings.Index(url, ":"); idx != -1 {
|
||||||
|
url = url[:idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
|||||||
@ -1,8 +1,10 @@
|
|||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/auth"
|
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||||
)
|
)
|
||||||
@ -56,7 +58,8 @@ func showAuthHelp() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func handleAuthLogin() {
|
func handleAuthLogin() {
|
||||||
gatewayURL := getGatewayURL()
|
// Prompt for node selection
|
||||||
|
gatewayURL := promptForGatewayURL()
|
||||||
fmt.Printf("🔐 Authenticating with gateway at: %s\n", gatewayURL)
|
fmt.Printf("🔐 Authenticating with gateway at: %s\n", gatewayURL)
|
||||||
|
|
||||||
// Use the simple authentication flow
|
// Use the simple authentication flow
|
||||||
@ -161,7 +164,55 @@ func handleAuthStatus() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// promptForGatewayURL interactively prompts for the gateway URL
|
||||||
|
// Allows user to choose between local node or remote node by domain
|
||||||
|
func promptForGatewayURL() string {
|
||||||
|
// Check environment variable first (allows override without prompting)
|
||||||
|
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
|
||||||
|
return url
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
|
||||||
|
fmt.Println("\n🌐 Node Connection")
|
||||||
|
fmt.Println("==================")
|
||||||
|
fmt.Println("1. Local node (localhost:6001)")
|
||||||
|
fmt.Println("2. Remote node (enter domain)")
|
||||||
|
fmt.Print("\nSelect option [1/2]: ")
|
||||||
|
|
||||||
|
choice, _ := reader.ReadString('\n')
|
||||||
|
choice = strings.TrimSpace(choice)
|
||||||
|
|
||||||
|
if choice == "1" || choice == "" {
|
||||||
|
return "http://localhost:6001"
|
||||||
|
}
|
||||||
|
|
||||||
|
if choice != "2" {
|
||||||
|
fmt.Println("⚠️ Invalid option, using localhost")
|
||||||
|
return "http://localhost:6001"
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print("Enter node domain (e.g., node-hk19de.debros.network): ")
|
||||||
|
domain, _ := reader.ReadString('\n')
|
||||||
|
domain = strings.TrimSpace(domain)
|
||||||
|
|
||||||
|
if domain == "" {
|
||||||
|
fmt.Println("⚠️ No domain entered, using localhost")
|
||||||
|
return "http://localhost:6001"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove any protocol prefix if user included it
|
||||||
|
domain = strings.TrimPrefix(domain, "https://")
|
||||||
|
domain = strings.TrimPrefix(domain, "http://")
|
||||||
|
// Remove trailing slash
|
||||||
|
domain = strings.TrimSuffix(domain, "/")
|
||||||
|
|
||||||
|
// Use HTTPS for remote domains
|
||||||
|
return fmt.Sprintf("https://%s", domain)
|
||||||
|
}
|
||||||
|
|
||||||
// getGatewayURL returns the gateway URL based on environment or env var
|
// getGatewayURL returns the gateway URL based on environment or env var
|
||||||
|
// Used by other commands that don't need interactive node selection
|
||||||
func getGatewayURL() string {
|
func getGatewayURL() string {
|
||||||
// Check environment variable first (for backwards compatibility)
|
// Check environment variable first (for backwards compatibility)
|
||||||
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
|
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
|
||||||
|
|||||||
@ -26,6 +26,12 @@ type IPFSPeerInfo struct {
|
|||||||
Addrs []string
|
Addrs []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IPFSClusterPeerInfo contains IPFS Cluster peer information for cluster discovery
|
||||||
|
type IPFSClusterPeerInfo struct {
|
||||||
|
PeerID string
|
||||||
|
Addrs []string
|
||||||
|
}
|
||||||
|
|
||||||
// validateSwarmKey validates that a swarm key is 64 hex characters
|
// validateSwarmKey validates that a swarm key is 64 hex characters
|
||||||
func validateSwarmKey(key string) error {
|
func validateSwarmKey(key string) error {
|
||||||
key = strings.TrimSpace(key)
|
key = strings.TrimSpace(key)
|
||||||
@ -76,6 +82,13 @@ func runInteractiveInstaller() {
|
|||||||
if len(config.IPFSSwarmAddrs) > 0 {
|
if len(config.IPFSSwarmAddrs) > 0 {
|
||||||
args = append(args, "--ipfs-addrs", strings.Join(config.IPFSSwarmAddrs, ","))
|
args = append(args, "--ipfs-addrs", strings.Join(config.IPFSSwarmAddrs, ","))
|
||||||
}
|
}
|
||||||
|
// Pass IPFS Cluster peer info for cluster peer_addresses configuration
|
||||||
|
if config.IPFSClusterPeerID != "" {
|
||||||
|
args = append(args, "--ipfs-cluster-peer", config.IPFSClusterPeerID)
|
||||||
|
}
|
||||||
|
if len(config.IPFSClusterAddrs) > 0 {
|
||||||
|
args = append(args, "--ipfs-cluster-addrs", strings.Join(config.IPFSClusterAddrs, ","))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-run with collected args
|
// Re-run with collected args
|
||||||
@ -315,6 +328,8 @@ func showProdHelp() {
|
|||||||
fmt.Printf(" --swarm-key HEX - 64-hex IPFS swarm key (required when joining)\n")
|
fmt.Printf(" --swarm-key HEX - 64-hex IPFS swarm key (required when joining)\n")
|
||||||
fmt.Printf(" --ipfs-peer ID - IPFS peer ID to connect to (auto-discovered)\n")
|
fmt.Printf(" --ipfs-peer ID - IPFS peer ID to connect to (auto-discovered)\n")
|
||||||
fmt.Printf(" --ipfs-addrs ADDRS - IPFS swarm addresses (auto-discovered)\n")
|
fmt.Printf(" --ipfs-addrs ADDRS - IPFS swarm addresses (auto-discovered)\n")
|
||||||
|
fmt.Printf(" --ipfs-cluster-peer ID - IPFS Cluster peer ID (auto-discovered)\n")
|
||||||
|
fmt.Printf(" --ipfs-cluster-addrs ADDRS - IPFS Cluster addresses (auto-discovered)\n")
|
||||||
fmt.Printf(" --branch BRANCH - Git branch to use (main or nightly, default: main)\n")
|
fmt.Printf(" --branch BRANCH - Git branch to use (main or nightly, default: main)\n")
|
||||||
fmt.Printf(" --no-pull - Skip git clone/pull, use existing /home/debros/src\n")
|
fmt.Printf(" --no-pull - Skip git clone/pull, use existing /home/debros/src\n")
|
||||||
fmt.Printf(" --ignore-resource-checks - Skip disk/RAM/CPU prerequisite validation\n")
|
fmt.Printf(" --ignore-resource-checks - Skip disk/RAM/CPU prerequisite validation\n")
|
||||||
@ -369,6 +384,8 @@ func handleProdInstall(args []string) {
|
|||||||
swarmKey := fs.String("swarm-key", "", "64-hex IPFS swarm key (for joining existing private network)")
|
swarmKey := fs.String("swarm-key", "", "64-hex IPFS swarm key (for joining existing private network)")
|
||||||
ipfsPeerID := fs.String("ipfs-peer", "", "IPFS peer ID to connect to (auto-discovered from peer domain)")
|
ipfsPeerID := fs.String("ipfs-peer", "", "IPFS peer ID to connect to (auto-discovered from peer domain)")
|
||||||
ipfsAddrs := fs.String("ipfs-addrs", "", "Comma-separated IPFS swarm addresses (auto-discovered from peer domain)")
|
ipfsAddrs := fs.String("ipfs-addrs", "", "Comma-separated IPFS swarm addresses (auto-discovered from peer domain)")
|
||||||
|
ipfsClusterPeerID := fs.String("ipfs-cluster-peer", "", "IPFS Cluster peer ID to connect to (auto-discovered from peer domain)")
|
||||||
|
ipfsClusterAddrs := fs.String("ipfs-cluster-addrs", "", "Comma-separated IPFS Cluster addresses (auto-discovered from peer domain)")
|
||||||
interactive := fs.Bool("interactive", false, "Run interactive TUI installer")
|
interactive := fs.Bool("interactive", false, "Run interactive TUI installer")
|
||||||
dryRun := fs.Bool("dry-run", false, "Show what would be done without making changes")
|
dryRun := fs.Bool("dry-run", false, "Show what would be done without making changes")
|
||||||
noPull := fs.Bool("no-pull", false, "Skip git clone/pull, use existing /home/debros/src")
|
noPull := fs.Bool("no-pull", false, "Skip git clone/pull, use existing /home/debros/src")
|
||||||
@ -488,6 +505,19 @@ func handleProdInstall(args []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Store IPFS Cluster peer info for cluster peer discovery
|
||||||
|
var ipfsClusterPeerInfo *IPFSClusterPeerInfo
|
||||||
|
if *ipfsClusterPeerID != "" {
|
||||||
|
var addrs []string
|
||||||
|
if *ipfsClusterAddrs != "" {
|
||||||
|
addrs = strings.Split(*ipfsClusterAddrs, ",")
|
||||||
|
}
|
||||||
|
ipfsClusterPeerInfo = &IPFSClusterPeerInfo{
|
||||||
|
PeerID: *ipfsClusterPeerID,
|
||||||
|
Addrs: addrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
setup := production.NewProductionSetup(oramaHome, os.Stdout, *force, *branch, *noPull, *skipResourceChecks)
|
setup := production.NewProductionSetup(oramaHome, os.Stdout, *force, *branch, *noPull, *skipResourceChecks)
|
||||||
|
|
||||||
// Inform user if skipping git pull
|
// Inform user if skipping git pull
|
||||||
@ -548,21 +578,8 @@ func handleProdInstall(args []string) {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase 2c: Initialize services (after secrets are in place)
|
// Phase 4: Generate configs (BEFORE service initialization)
|
||||||
fmt.Printf("\nPhase 2c: Initializing services...\n")
|
// This ensures node.yaml exists before services try to access it
|
||||||
var prodIPFSPeer *production.IPFSPeerInfo
|
|
||||||
if ipfsPeerInfo != nil {
|
|
||||||
prodIPFSPeer = &production.IPFSPeerInfo{
|
|
||||||
PeerID: ipfsPeerInfo.PeerID,
|
|
||||||
Addrs: ipfsPeerInfo.Addrs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := setup.Phase2cInitializeServices(peers, *vpsIP, prodIPFSPeer); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 4: Generate configs
|
|
||||||
fmt.Printf("\n⚙️ Phase 4: Generating configurations...\n")
|
fmt.Printf("\n⚙️ Phase 4: Generating configurations...\n")
|
||||||
enableHTTPS := *domain != ""
|
enableHTTPS := *domain != ""
|
||||||
if err := setup.Phase4GenerateConfigs(peers, *vpsIP, enableHTTPS, *domain, *joinAddress); err != nil {
|
if err := setup.Phase4GenerateConfigs(peers, *vpsIP, enableHTTPS, *domain, *joinAddress); err != nil {
|
||||||
@ -578,6 +595,27 @@ func handleProdInstall(args []string) {
|
|||||||
}
|
}
|
||||||
fmt.Printf(" ✓ Configuration validated\n")
|
fmt.Printf(" ✓ Configuration validated\n")
|
||||||
|
|
||||||
|
// Phase 2c: Initialize services (after config is in place)
|
||||||
|
fmt.Printf("\nPhase 2c: Initializing services...\n")
|
||||||
|
var prodIPFSPeer *production.IPFSPeerInfo
|
||||||
|
if ipfsPeerInfo != nil {
|
||||||
|
prodIPFSPeer = &production.IPFSPeerInfo{
|
||||||
|
PeerID: ipfsPeerInfo.PeerID,
|
||||||
|
Addrs: ipfsPeerInfo.Addrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var prodIPFSClusterPeer *production.IPFSClusterPeerInfo
|
||||||
|
if ipfsClusterPeerInfo != nil {
|
||||||
|
prodIPFSClusterPeer = &production.IPFSClusterPeerInfo{
|
||||||
|
PeerID: ipfsClusterPeerInfo.PeerID,
|
||||||
|
Addrs: ipfsClusterPeerInfo.Addrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := setup.Phase2cInitializeServices(peers, *vpsIP, prodIPFSPeer, prodIPFSClusterPeer); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
// Phase 5: Create systemd services
|
// Phase 5: Create systemd services
|
||||||
fmt.Printf("\n🔧 Phase 5: Creating systemd services...\n")
|
fmt.Printf("\n🔧 Phase 5: Creating systemd services...\n")
|
||||||
if err := setup.Phase5CreateSystemdServices(enableHTTPS); err != nil {
|
if err := setup.Phase5CreateSystemdServices(enableHTTPS); err != nil {
|
||||||
@ -876,20 +914,23 @@ func handleProdUpgrade(args []string) {
|
|||||||
fmt.Printf(" - Join address: %s\n", joinAddress)
|
fmt.Printf(" - Join address: %s\n", joinAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase 2c: Ensure services are properly initialized (fixes existing repos)
|
// Phase 4: Generate configs (BEFORE service initialization)
|
||||||
// Now that we have peers and VPS IP, we can properly configure IPFS Cluster
|
// This ensures node.yaml exists before services try to access it
|
||||||
// Note: IPFS peer info is nil for upgrades - peering is only configured during initial install
|
|
||||||
fmt.Printf("\nPhase 2c: Ensuring services are properly initialized...\n")
|
|
||||||
if err := setup.Phase2cInitializeServices(peers, vpsIP, nil); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := setup.Phase4GenerateConfigs(peers, vpsIP, enableHTTPS, domain, joinAddress); err != nil {
|
if err := setup.Phase4GenerateConfigs(peers, vpsIP, enableHTTPS, domain, joinAddress); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "⚠️ Config generation warning: %v\n", err)
|
fmt.Fprintf(os.Stderr, "⚠️ Config generation warning: %v\n", err)
|
||||||
fmt.Fprintf(os.Stderr, " Existing configs preserved\n")
|
fmt.Fprintf(os.Stderr, " Existing configs preserved\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Phase 2c: Ensure services are properly initialized (fixes existing repos)
|
||||||
|
// Now that we have peers and VPS IP, we can properly configure IPFS Cluster
|
||||||
|
// Note: IPFS peer info is nil for upgrades - peering is only configured during initial install
|
||||||
|
// Note: IPFS Cluster peer info is also nil for upgrades - peer_addresses is only configured during initial install
|
||||||
|
fmt.Printf("\nPhase 2c: Ensuring services are properly initialized...\n")
|
||||||
|
if err := setup.Phase2cInitializeServices(peers, vpsIP, nil, nil); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
// Phase 5: Update systemd services
|
// Phase 5: Update systemd services
|
||||||
fmt.Printf("\n🔧 Phase 5: Updating systemd services...\n")
|
fmt.Printf("\n🔧 Phase 5: Updating systemd services...\n")
|
||||||
if err := setup.Phase5CreateSystemdServices(enableHTTPS); err != nil {
|
if err := setup.Phase5CreateSystemdServices(enableHTTPS); err != nil {
|
||||||
|
|||||||
@ -509,6 +509,9 @@ func (n *NetworkInfoImpl) GetStatus(ctx context.Context) (*NetworkStatus, error)
|
|||||||
// Try to get IPFS peer info (optional - don't fail if unavailable)
|
// Try to get IPFS peer info (optional - don't fail if unavailable)
|
||||||
ipfsInfo := queryIPFSPeerInfo()
|
ipfsInfo := queryIPFSPeerInfo()
|
||||||
|
|
||||||
|
// Try to get IPFS Cluster peer info (optional - don't fail if unavailable)
|
||||||
|
ipfsClusterInfo := queryIPFSClusterPeerInfo()
|
||||||
|
|
||||||
return &NetworkStatus{
|
return &NetworkStatus{
|
||||||
NodeID: host.ID().String(),
|
NodeID: host.ID().String(),
|
||||||
PeerID: host.ID().String(),
|
PeerID: host.ID().String(),
|
||||||
@ -517,6 +520,7 @@ func (n *NetworkInfoImpl) GetStatus(ctx context.Context) (*NetworkStatus, error)
|
|||||||
DatabaseSize: dbSize,
|
DatabaseSize: dbSize,
|
||||||
Uptime: time.Since(n.client.startTime),
|
Uptime: time.Since(n.client.startTime),
|
||||||
IPFS: ipfsInfo,
|
IPFS: ipfsInfo,
|
||||||
|
IPFSCluster: ipfsClusterInfo,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -558,6 +562,44 @@ func queryIPFSPeerInfo() *IPFSPeerInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// queryIPFSClusterPeerInfo queries the local IPFS Cluster API for peer information
|
||||||
|
// Returns nil if IPFS Cluster is not running or unavailable
|
||||||
|
func queryIPFSClusterPeerInfo() *IPFSClusterPeerInfo {
|
||||||
|
// IPFS Cluster API typically runs on port 9094 in our setup
|
||||||
|
client := &http.Client{Timeout: 2 * time.Second}
|
||||||
|
resp, err := client.Get("http://localhost:9094/id")
|
||||||
|
if err != nil {
|
||||||
|
return nil // IPFS Cluster not available
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Addresses []string `json:"addresses"`
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter addresses to only include public/routable ones for cluster discovery
|
||||||
|
var clusterAddrs []string
|
||||||
|
for _, addr := range result.Addresses {
|
||||||
|
// Skip loopback addresses - only keep routable addresses
|
||||||
|
if !strings.Contains(addr, "127.0.0.1") && !strings.Contains(addr, "/ip6/::1") {
|
||||||
|
clusterAddrs = append(clusterAddrs, addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &IPFSClusterPeerInfo{
|
||||||
|
PeerID: result.ID,
|
||||||
|
Addresses: clusterAddrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ConnectToPeer connects to a specific peer
|
// ConnectToPeer connects to a specific peer
|
||||||
func (n *NetworkInfoImpl) ConnectToPeer(ctx context.Context, peerAddr string) error {
|
func (n *NetworkInfoImpl) ConnectToPeer(ctx context.Context, peerAddr string) error {
|
||||||
if !n.client.isConnected() {
|
if !n.client.isConnected() {
|
||||||
|
|||||||
@ -114,13 +114,14 @@ type PeerInfo struct {
|
|||||||
|
|
||||||
// NetworkStatus contains overall network status
|
// NetworkStatus contains overall network status
|
||||||
type NetworkStatus struct {
|
type NetworkStatus struct {
|
||||||
NodeID string `json:"node_id"`
|
NodeID string `json:"node_id"`
|
||||||
PeerID string `json:"peer_id"`
|
PeerID string `json:"peer_id"`
|
||||||
Connected bool `json:"connected"`
|
Connected bool `json:"connected"`
|
||||||
PeerCount int `json:"peer_count"`
|
PeerCount int `json:"peer_count"`
|
||||||
DatabaseSize int64 `json:"database_size"`
|
DatabaseSize int64 `json:"database_size"`
|
||||||
Uptime time.Duration `json:"uptime"`
|
Uptime time.Duration `json:"uptime"`
|
||||||
IPFS *IPFSPeerInfo `json:"ipfs,omitempty"`
|
IPFS *IPFSPeerInfo `json:"ipfs,omitempty"`
|
||||||
|
IPFSCluster *IPFSClusterPeerInfo `json:"ipfs_cluster,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// IPFSPeerInfo contains IPFS peer information for discovery
|
// IPFSPeerInfo contains IPFS peer information for discovery
|
||||||
@ -129,6 +130,12 @@ type IPFSPeerInfo struct {
|
|||||||
SwarmAddresses []string `json:"swarm_addresses"`
|
SwarmAddresses []string `json:"swarm_addresses"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IPFSClusterPeerInfo contains IPFS Cluster peer information for cluster discovery
|
||||||
|
type IPFSClusterPeerInfo struct {
|
||||||
|
PeerID string `json:"peer_id"` // Cluster peer ID (different from IPFS peer ID)
|
||||||
|
Addresses []string `json:"addresses"` // Cluster multiaddresses (e.g., /ip4/x.x.x.x/tcp/9098)
|
||||||
|
}
|
||||||
|
|
||||||
// HealthStatus contains health check information
|
// HealthStatus contains health check information
|
||||||
type HealthStatus struct {
|
type HealthStatus struct {
|
||||||
Status string `json:"status"` // "healthy", "degraded", "unhealthy"
|
Status string `json:"status"` // "healthy", "degraded", "unhealthy"
|
||||||
|
|||||||
@ -271,7 +271,7 @@ func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Filter peers to only include those with addresses on our port (4001)
|
// Filter peers to only include those with addresses on our port (4001)
|
||||||
// This prevents attempting to connect to IPFS (port 4101) or IPFS Cluster (port 9096)
|
// This prevents attempting to connect to IPFS (port 4101) or IPFS Cluster (port 9096/9098)
|
||||||
peerInfo := d.host.Peerstore().PeerInfo(pid)
|
peerInfo := d.host.Peerstore().PeerInfo(pid)
|
||||||
hasValidPort := false
|
hasValidPort := false
|
||||||
for _, addr := range peerInfo.Addrs {
|
for _, addr := range peerInfo.Addrs {
|
||||||
|
|||||||
@ -1024,7 +1024,7 @@ func (pm *ProcessManager) startAnon(ctx context.Context) error {
|
|||||||
|
|
||||||
func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
|
func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
|
||||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
|
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
|
||||||
cmd := exec.Command("./bin/node", "--config", configFile)
|
cmd := exec.Command("./bin/orama-node", "--config", configFile)
|
||||||
logFile, _ := os.Create(logPath)
|
logFile, _ := os.Create(logPath)
|
||||||
cmd.Stdout = logFile
|
cmd.Stdout = logFile
|
||||||
cmd.Stderr = logFile
|
cmd.Stderr = logFile
|
||||||
|
|||||||
@ -388,6 +388,17 @@ func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, oramaHome string
|
|||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown bin directory: %v\n", err)
|
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown bin directory: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Grant CAP_NET_BIND_SERVICE to orama-node to allow binding to ports 80/443 without root
|
||||||
|
nodeBinary := filepath.Join(binDir, "orama-node")
|
||||||
|
if _, err := os.Stat(nodeBinary); err == nil {
|
||||||
|
if err := exec.Command("setcap", "cap_net_bind_service=+ep", nodeBinary).Run(); err != nil {
|
||||||
|
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to setcap on orama-node: %v\n", err)
|
||||||
|
fmt.Fprintf(bi.logWriter, " ⚠️ Gateway may not be able to bind to port 80/443\n")
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(bi.logWriter, " ✓ Set CAP_NET_BIND_SERVICE on orama-node\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ DeBros binaries installed\n")
|
fmt.Fprintf(bi.logWriter, " ✓ DeBros binaries installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -418,6 +429,12 @@ type IPFSPeerInfo struct {
|
|||||||
Addrs []string
|
Addrs []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IPFSClusterPeerInfo contains IPFS Cluster peer information for cluster peer discovery
|
||||||
|
type IPFSClusterPeerInfo struct {
|
||||||
|
PeerID string // Cluster peer ID (different from IPFS peer ID)
|
||||||
|
Addrs []string // Cluster multiaddresses (e.g., /ip4/x.x.x.x/tcp/9098)
|
||||||
|
}
|
||||||
|
|
||||||
// InitializeIPFSRepo initializes an IPFS repository for a node (unified - no bootstrap/node distinction)
|
// InitializeIPFSRepo initializes an IPFS repository for a node (unified - no bootstrap/node distinction)
|
||||||
// If ipfsPeer is provided, configures Peering.Peers for peer discovery in private networks
|
// If ipfsPeer is provided, configures Peering.Peers for peer discovery in private networks
|
||||||
func (bi *BinaryInstaller) InitializeIPFSRepo(ipfsRepoPath string, swarmKeyPath string, apiPort, gatewayPort, swarmPort int, ipfsPeer *IPFSPeerInfo) error {
|
func (bi *BinaryInstaller) InitializeIPFSRepo(ipfsRepoPath string, swarmKeyPath string, apiPort, gatewayPort, swarmPort int, ipfsPeer *IPFSPeerInfo) error {
|
||||||
@ -702,9 +719,12 @@ func (bi *BinaryInstaller) updateClusterConfig(clusterPath, secret string, ipfsA
|
|||||||
return fmt.Errorf("failed to parse service.json: %w", err)
|
return fmt.Errorf("failed to parse service.json: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update cluster secret and peer addresses
|
// Update cluster secret, listen_multiaddress, and peer addresses
|
||||||
if cluster, ok := config["cluster"].(map[string]interface{}); ok {
|
if cluster, ok := config["cluster"].(map[string]interface{}); ok {
|
||||||
cluster["secret"] = secret
|
cluster["secret"] = secret
|
||||||
|
// Set consistent listen_multiaddress - port 9098 for cluster LibP2P communication
|
||||||
|
// This MUST match the port used in GetClusterPeerMultiaddr() and peer_addresses
|
||||||
|
cluster["listen_multiaddress"] = []interface{}{"/ip4/0.0.0.0/tcp/9098"}
|
||||||
// Configure peer addresses for cluster discovery
|
// Configure peer addresses for cluster discovery
|
||||||
// This allows nodes to find and connect to each other
|
// This allows nodes to find and connect to each other
|
||||||
if len(bootstrapClusterPeers) > 0 {
|
if len(bootstrapClusterPeers) > 0 {
|
||||||
@ -712,7 +732,8 @@ func (bi *BinaryInstaller) updateClusterConfig(clusterPath, secret string, ipfsA
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
clusterConfig := map[string]interface{}{
|
clusterConfig := map[string]interface{}{
|
||||||
"secret": secret,
|
"secret": secret,
|
||||||
|
"listen_multiaddress": []interface{}{"/ip4/0.0.0.0/tcp/9098"},
|
||||||
}
|
}
|
||||||
if len(bootstrapClusterPeers) > 0 {
|
if len(bootstrapClusterPeers) > 0 {
|
||||||
clusterConfig["peer_addresses"] = bootstrapClusterPeers
|
clusterConfig["peer_addresses"] = bootstrapClusterPeers
|
||||||
@ -821,7 +842,8 @@ func (bi *BinaryInstaller) InitializeRQLiteDataDir(dataDir string) error {
|
|||||||
// InstallAnyoneClient installs the anyone-client npm package globally
|
// InstallAnyoneClient installs the anyone-client npm package globally
|
||||||
func (bi *BinaryInstaller) InstallAnyoneClient() error {
|
func (bi *BinaryInstaller) InstallAnyoneClient() error {
|
||||||
// Check if anyone-client is already available via npx (more reliable for scoped packages)
|
// Check if anyone-client is already available via npx (more reliable for scoped packages)
|
||||||
if cmd := exec.Command("npx", "--yes", "@anyone-protocol/anyone-client", "--version"); cmd.Run() == nil {
|
// Note: the CLI binary is "anyone-client", not the full scoped package name
|
||||||
|
if cmd := exec.Command("npx", "anyone-client", "--help"); cmd.Run() == nil {
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ anyone-client already installed\n")
|
fmt.Fprintf(bi.logWriter, " ✓ anyone-client already installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -873,8 +895,18 @@ func (bi *BinaryInstaller) InstallAnyoneClient() error {
|
|||||||
return fmt.Errorf("failed to install anyone-client: %w\n%s", err, string(output))
|
return fmt.Errorf("failed to install anyone-client: %w\n%s", err, string(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify installation - try npx first (most reliable for scoped packages)
|
// Create terms-agreement file to bypass interactive prompt when running as a service
|
||||||
verifyCmd := exec.Command("npx", "--yes", "@anyone-protocol/anyone-client", "--version")
|
termsFile := filepath.Join(debrosHome, "terms-agreement")
|
||||||
|
if err := os.WriteFile(termsFile, []byte("agreed"), 0644); err != nil {
|
||||||
|
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to create terms-agreement: %v\n", err)
|
||||||
|
} else {
|
||||||
|
if err := exec.Command("chown", "debros:debros", termsFile).Run(); err != nil {
|
||||||
|
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown terms-agreement: %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify installation - try npx with the correct CLI name (anyone-client, not full scoped package name)
|
||||||
|
verifyCmd := exec.Command("npx", "anyone-client", "--help")
|
||||||
if err := verifyCmd.Run(); err != nil {
|
if err := verifyCmd.Run(); err != nil {
|
||||||
// Fallback: check if binary exists in common locations
|
// Fallback: check if binary exists in common locations
|
||||||
possiblePaths := []string{
|
possiblePaths := []string{
|
||||||
|
|||||||
@ -273,7 +273,8 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
|
|||||||
|
|
||||||
// Phase2cInitializeServices initializes service repositories and configurations
|
// Phase2cInitializeServices initializes service repositories and configurations
|
||||||
// ipfsPeer can be nil for the first node, or contain peer info for joining nodes
|
// ipfsPeer can be nil for the first node, or contain peer info for joining nodes
|
||||||
func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vpsIP string, ipfsPeer *IPFSPeerInfo) error {
|
// ipfsClusterPeer can be nil for the first node, or contain IPFS Cluster peer info for joining nodes
|
||||||
|
func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vpsIP string, ipfsPeer *IPFSPeerInfo, ipfsClusterPeer *IPFSClusterPeerInfo) error {
|
||||||
ps.logf("Phase 2c: Initializing services...")
|
ps.logf("Phase 2c: Initializing services...")
|
||||||
|
|
||||||
// Ensure directories exist (unified structure)
|
// Ensure directories exist (unified structure)
|
||||||
@ -298,13 +299,28 @@ func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vps
|
|||||||
return fmt.Errorf("failed to get cluster secret: %w", err)
|
return fmt.Errorf("failed to get cluster secret: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get cluster peer addresses from peers if available
|
// Get cluster peer addresses from IPFS Cluster peer info if available
|
||||||
var clusterPeers []string
|
var clusterPeers []string
|
||||||
if len(peerAddresses) > 0 {
|
if ipfsClusterPeer != nil && ipfsClusterPeer.PeerID != "" {
|
||||||
// Infer IP from peers
|
// Construct cluster peer multiaddress using the discovered peer ID
|
||||||
|
// Format: /ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>
|
||||||
peerIP := inferPeerIP(peerAddresses, vpsIP)
|
peerIP := inferPeerIP(peerAddresses, vpsIP)
|
||||||
if peerIP != "" {
|
if peerIP != "" {
|
||||||
ps.logf(" ℹ️ Will attempt to connect to cluster peers at %s", peerIP)
|
// Construct the bootstrap multiaddress for IPFS Cluster
|
||||||
|
// Note: IPFS Cluster listens on port 9098 for cluster communication
|
||||||
|
clusterBootstrapAddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", peerIP, ipfsClusterPeer.PeerID)
|
||||||
|
clusterPeers = []string{clusterBootstrapAddr}
|
||||||
|
ps.logf(" ℹ️ IPFS Cluster will connect to peer: %s", clusterBootstrapAddr)
|
||||||
|
} else if len(ipfsClusterPeer.Addrs) > 0 {
|
||||||
|
// Fallback: use the addresses from discovery (if they include peer ID)
|
||||||
|
for _, addr := range ipfsClusterPeer.Addrs {
|
||||||
|
if strings.Contains(addr, ipfsClusterPeer.PeerID) {
|
||||||
|
clusterPeers = append(clusterPeers, addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(clusterPeers) > 0 {
|
||||||
|
ps.logf(" ℹ️ IPFS Cluster will connect to discovered peers: %v", clusterPeers)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -42,8 +42,8 @@ ExecStartPre=/bin/bash -c 'if [ -f %[3]s/secrets/swarm.key ] && [ ! -f %[2]s/swa
|
|||||||
ExecStart=%[5]s daemon --enable-pubsub-experiment --repo-dir=%[2]s
|
ExecStart=%[5]s daemon --enable-pubsub-experiment --repo-dir=%[2]s
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=file:%[4]s
|
StandardOutput=append:%[4]s
|
||||||
StandardError=file:%[4]s
|
StandardError=append:%[4]s
|
||||||
SyslogIdentifier=debros-ipfs
|
SyslogIdentifier=debros-ipfs
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
@ -92,8 +92,8 @@ ExecStartPre=/bin/bash -c 'mkdir -p %[2]s && chmod 700 %[2]s'
|
|||||||
ExecStart=%[4]s daemon
|
ExecStart=%[4]s daemon
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=file:%[3]s
|
StandardOutput=append:%[3]s
|
||||||
StandardError=file:%[3]s
|
StandardError=append:%[3]s
|
||||||
SyslogIdentifier=debros-ipfs-cluster
|
SyslogIdentifier=debros-ipfs-cluster
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
@ -147,8 +147,8 @@ Environment=HOME=%[1]s
|
|||||||
ExecStart=%[5]s %[2]s
|
ExecStart=%[5]s %[2]s
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=file:%[3]s
|
StandardOutput=append:%[3]s
|
||||||
StandardError=file:%[3]s
|
StandardError=append:%[3]s
|
||||||
SyslogIdentifier=debros-rqlite
|
SyslogIdentifier=debros-rqlite
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
@ -186,8 +186,8 @@ Environment=OLRIC_SERVER_CONFIG=%[2]s
|
|||||||
ExecStart=%[5]s
|
ExecStart=%[5]s
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=file:%[3]s
|
StandardOutput=append:%[3]s
|
||||||
StandardError=file:%[3]s
|
StandardError=append:%[3]s
|
||||||
SyslogIdentifier=olric
|
SyslogIdentifier=olric
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
@ -210,6 +210,8 @@ WantedBy=multi-user.target
|
|||||||
func (ssg *SystemdServiceGenerator) GenerateNodeService() string {
|
func (ssg *SystemdServiceGenerator) GenerateNodeService() string {
|
||||||
configFile := "node.yaml"
|
configFile := "node.yaml"
|
||||||
logFile := filepath.Join(ssg.oramaDir, "logs", "node.log")
|
logFile := filepath.Join(ssg.oramaDir, "logs", "node.log")
|
||||||
|
// Note: systemd StandardOutput/StandardError paths should not contain substitution variables
|
||||||
|
// Use absolute paths directly as they will be resolved by systemd at runtime
|
||||||
|
|
||||||
return fmt.Sprintf(`[Unit]
|
return fmt.Sprintf(`[Unit]
|
||||||
Description=DeBros Network Node
|
Description=DeBros Network Node
|
||||||
@ -222,11 +224,11 @@ User=debros
|
|||||||
Group=debros
|
Group=debros
|
||||||
WorkingDirectory=%[1]s
|
WorkingDirectory=%[1]s
|
||||||
Environment=HOME=%[1]s
|
Environment=HOME=%[1]s
|
||||||
ExecStart=%[1]s/bin/node --config %[2]s/configs/%[3]s
|
ExecStart=%[1]s/bin/orama-node --config %[2]s/configs/%[3]s
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=file:%[4]s
|
StandardOutput=append:%[4]s
|
||||||
StandardError=file:%[4]s
|
StandardError=append:%[4]s
|
||||||
SyslogIdentifier=debros-node
|
SyslogIdentifier=debros-node
|
||||||
|
|
||||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||||
@ -264,8 +266,8 @@ Environment=HOME=%[1]s
|
|||||||
ExecStart=%[1]s/bin/gateway --config %[2]s/data/gateway.yaml
|
ExecStart=%[1]s/bin/gateway --config %[2]s/data/gateway.yaml
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=file:%[3]s
|
StandardOutput=append:%[3]s
|
||||||
StandardError=file:%[3]s
|
StandardError=append:%[3]s
|
||||||
SyslogIdentifier=debros-gateway
|
SyslogIdentifier=debros-gateway
|
||||||
|
|
||||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||||
@ -303,17 +305,18 @@ User=debros
|
|||||||
Group=debros
|
Group=debros
|
||||||
Environment=HOME=%[1]s
|
Environment=HOME=%[1]s
|
||||||
Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/lib/node_modules/.bin
|
Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/lib/node_modules/.bin
|
||||||
ExecStart=/usr/bin/npx --yes @anyone-protocol/anyone-client
|
WorkingDirectory=%[1]s
|
||||||
|
ExecStart=/usr/bin/npx anyone-client
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=file:%[2]s
|
StandardOutput=append:%[2]s
|
||||||
StandardError=file:%[2]s
|
StandardError=append:%[2]s
|
||||||
SyslogIdentifier=anyone-client
|
SyslogIdentifier=anyone-client
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ProtectHome=read-only
|
ProtectHome=no
|
||||||
ProtectKernelTunables=yes
|
ProtectKernelTunables=yes
|
||||||
ProtectKernelModules=yes
|
ProtectKernelModules=yes
|
||||||
ProtectControlGroups=yes
|
ProtectControlGroups=yes
|
||||||
|
|||||||
@ -70,7 +70,7 @@ http_gateway:
|
|||||||
routes:
|
routes:
|
||||||
# Note: Raft traffic bypasses SNI gateway - RQLite uses native TLS on port 7002
|
# Note: Raft traffic bypasses SNI gateway - RQLite uses native TLS on port 7002
|
||||||
ipfs.{{.Domain}}: "localhost:4101"
|
ipfs.{{.Domain}}: "localhost:4101"
|
||||||
ipfs-cluster.{{.Domain}}: "localhost:9096"
|
ipfs-cluster.{{.Domain}}: "localhost:9098"
|
||||||
olric.{{.Domain}}: "localhost:3322"
|
olric.{{.Domain}}: "localhost:3322"
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ User=debros
|
|||||||
Group=debros
|
Group=debros
|
||||||
WorkingDirectory={{.HomeDir}}
|
WorkingDirectory={{.HomeDir}}
|
||||||
Environment=HOME={{.HomeDir}}
|
Environment=HOME={{.HomeDir}}
|
||||||
ExecStart={{.HomeDir}}/bin/node --config {{.OramaDir}}/configs/{{.ConfigFile}}
|
ExecStart={{.HomeDir}}/bin/orama-node --config {{.OramaDir}}/configs/{{.ConfigFile}}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=journal
|
StandardOutput=journal
|
||||||
|
|||||||
@ -32,9 +32,12 @@ type InstallerConfig struct {
|
|||||||
SwarmKeyHex string // 64-hex IPFS swarm key (for joining private network)
|
SwarmKeyHex string // 64-hex IPFS swarm key (for joining private network)
|
||||||
IPFSPeerID string // IPFS peer ID (auto-discovered from peer domain)
|
IPFSPeerID string // IPFS peer ID (auto-discovered from peer domain)
|
||||||
IPFSSwarmAddrs []string // IPFS swarm addresses (auto-discovered from peer domain)
|
IPFSSwarmAddrs []string // IPFS swarm addresses (auto-discovered from peer domain)
|
||||||
Branch string
|
// IPFS Cluster peer info for cluster discovery
|
||||||
IsFirstNode bool
|
IPFSClusterPeerID string // IPFS Cluster peer ID (auto-discovered from peer domain)
|
||||||
NoPull bool
|
IPFSClusterAddrs []string // IPFS Cluster addresses (auto-discovered from peer domain)
|
||||||
|
Branch string
|
||||||
|
IsFirstNode bool
|
||||||
|
NoPull bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step represents a step in the installation wizard
|
// Step represents a step in the installation wizard
|
||||||
@ -69,6 +72,7 @@ type Model struct {
|
|||||||
discovering bool // Whether domain discovery is in progress
|
discovering bool // Whether domain discovery is in progress
|
||||||
discoveryInfo string // Info message during discovery
|
discoveryInfo string // Info message during discovery
|
||||||
discoveredPeer string // Discovered peer ID from domain
|
discoveredPeer string // Discovered peer ID from domain
|
||||||
|
sniWarning string // Warning about missing SNI DNS records (non-blocking)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Styles
|
// Styles
|
||||||
@ -214,14 +218,13 @@ func (m *Model) handleEnter() (tea.Model, tea.Cmd) {
|
|||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check SNI DNS records for this domain
|
// Check SNI DNS records for this domain (non-blocking warning)
|
||||||
m.discovering = true
|
m.discovering = true
|
||||||
m.discoveryInfo = "Validating SNI DNS records for " + domain + "..."
|
m.discoveryInfo = "Checking SNI DNS records for " + domain + "..."
|
||||||
|
|
||||||
if err := validateSNIDNSRecords(domain); err != nil {
|
if warning := validateSNIDNSRecords(domain); warning != "" {
|
||||||
m.discovering = false
|
// Log warning but continue - SNI DNS is optional for single-node setups
|
||||||
m.err = fmt.Errorf("SNI DNS validation failed: %w", err)
|
m.sniWarning = warning
|
||||||
return m, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m.discovering = false
|
m.discovering = false
|
||||||
@ -255,14 +258,13 @@ func (m *Model) handleEnter() (tea.Model, tea.Cmd) {
|
|||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate SNI DNS records for peer domain
|
// Check SNI DNS records for peer domain (non-blocking warning)
|
||||||
m.discovering = true
|
m.discovering = true
|
||||||
m.discoveryInfo = "Validating SNI DNS records for " + peerDomain + "..."
|
m.discoveryInfo = "Checking SNI DNS records for " + peerDomain + "..."
|
||||||
|
|
||||||
if err := validateSNIDNSRecords(peerDomain); err != nil {
|
if warning := validateSNIDNSRecords(peerDomain); warning != "" {
|
||||||
m.discovering = false
|
// Log warning but continue - peer might have different DNS setup
|
||||||
m.err = fmt.Errorf("SNI DNS validation failed: %w", err)
|
m.sniWarning = warning
|
||||||
return m, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discover peer info from domain (try HTTPS first, then HTTP)
|
// Discover peer info from domain (try HTTPS first, then HTTP)
|
||||||
@ -313,6 +315,12 @@ func (m *Model) handleEnter() (tea.Model, tea.Cmd) {
|
|||||||
m.config.IPFSSwarmAddrs = discovery.IPFSSwarmAddrs
|
m.config.IPFSSwarmAddrs = discovery.IPFSSwarmAddrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Store IPFS Cluster peer info for cluster peer_addresses configuration
|
||||||
|
if discovery.IPFSClusterPeerID != "" {
|
||||||
|
m.config.IPFSClusterPeerID = discovery.IPFSClusterPeerID
|
||||||
|
m.config.IPFSClusterAddrs = discovery.IPFSClusterAddrs
|
||||||
|
}
|
||||||
|
|
||||||
m.err = nil
|
m.err = nil
|
||||||
m.step = StepClusterSecret
|
m.step = StepClusterSecret
|
||||||
m.setupStepInput()
|
m.setupStepInput()
|
||||||
@ -651,6 +659,13 @@ func (m Model) viewConfirm() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.WriteString(boxStyle.Render(config))
|
s.WriteString(boxStyle.Render(config))
|
||||||
|
|
||||||
|
// Show SNI DNS warning if present
|
||||||
|
if m.sniWarning != "" {
|
||||||
|
s.WriteString("\n\n")
|
||||||
|
s.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#FFA500")).Render(m.sniWarning))
|
||||||
|
}
|
||||||
|
|
||||||
s.WriteString("\n\n")
|
s.WriteString("\n\n")
|
||||||
s.WriteString(helpStyle.Render("Press Enter to install • Esc to go back"))
|
s.WriteString(helpStyle.Render("Press Enter to install • Esc to go back"))
|
||||||
return s.String()
|
return s.String()
|
||||||
@ -713,6 +728,9 @@ type DiscoveryResult struct {
|
|||||||
PeerID string // LibP2P peer ID
|
PeerID string // LibP2P peer ID
|
||||||
IPFSPeerID string // IPFS peer ID
|
IPFSPeerID string // IPFS peer ID
|
||||||
IPFSSwarmAddrs []string // IPFS swarm addresses
|
IPFSSwarmAddrs []string // IPFS swarm addresses
|
||||||
|
// IPFS Cluster info for cluster peer discovery
|
||||||
|
IPFSClusterPeerID string // IPFS Cluster peer ID
|
||||||
|
IPFSClusterAddrs []string // IPFS Cluster multiaddresses
|
||||||
}
|
}
|
||||||
|
|
||||||
// discoverPeerFromDomain queries an existing node to get its peer ID and IPFS info
|
// discoverPeerFromDomain queries an existing node to get its peer ID and IPFS info
|
||||||
@ -741,7 +759,7 @@ func discoverPeerFromDomain(domain string) (*DiscoveryResult, error) {
|
|||||||
return nil, fmt.Errorf("unexpected status from %s: %s", domain, resp.Status)
|
return nil, fmt.Errorf("unexpected status from %s: %s", domain, resp.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse response including IPFS info
|
// Parse response including IPFS and IPFS Cluster info
|
||||||
var status struct {
|
var status struct {
|
||||||
PeerID string `json:"peer_id"`
|
PeerID string `json:"peer_id"`
|
||||||
NodeID string `json:"node_id"` // fallback for backward compatibility
|
NodeID string `json:"node_id"` // fallback for backward compatibility
|
||||||
@ -749,6 +767,10 @@ func discoverPeerFromDomain(domain string) (*DiscoveryResult, error) {
|
|||||||
PeerID string `json:"peer_id"`
|
PeerID string `json:"peer_id"`
|
||||||
SwarmAddresses []string `json:"swarm_addresses"`
|
SwarmAddresses []string `json:"swarm_addresses"`
|
||||||
} `json:"ipfs,omitempty"`
|
} `json:"ipfs,omitempty"`
|
||||||
|
IPFSCluster *struct {
|
||||||
|
PeerID string `json:"peer_id"`
|
||||||
|
Addresses []string `json:"addresses"`
|
||||||
|
} `json:"ipfs_cluster,omitempty"`
|
||||||
}
|
}
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse response from %s: %w", domain, err)
|
return nil, fmt.Errorf("failed to parse response from %s: %w", domain, err)
|
||||||
@ -774,6 +796,12 @@ func discoverPeerFromDomain(domain string) (*DiscoveryResult, error) {
|
|||||||
result.IPFSSwarmAddrs = status.IPFS.SwarmAddresses
|
result.IPFSSwarmAddrs = status.IPFS.SwarmAddresses
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Include IPFS Cluster info if available
|
||||||
|
if status.IPFSCluster != nil {
|
||||||
|
result.IPFSClusterPeerID = status.IPFSCluster.PeerID
|
||||||
|
result.IPFSClusterAddrs = status.IPFSCluster.Addresses
|
||||||
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -860,7 +888,8 @@ func detectPublicIP() string {
|
|||||||
// It tries to resolve the key SNI hostnames for IPFS, IPFS Cluster, and Olric
|
// It tries to resolve the key SNI hostnames for IPFS, IPFS Cluster, and Olric
|
||||||
// Note: Raft no longer uses SNI - it uses direct RQLite TLS on port 7002
|
// Note: Raft no longer uses SNI - it uses direct RQLite TLS on port 7002
|
||||||
// All should resolve to the same IP (the node's public IP or domain)
|
// All should resolve to the same IP (the node's public IP or domain)
|
||||||
func validateSNIDNSRecords(domain string) error {
|
// Returns a warning string if records are missing (empty string if all OK)
|
||||||
|
func validateSNIDNSRecords(domain string) string {
|
||||||
// List of SNI services that need DNS records
|
// List of SNI services that need DNS records
|
||||||
// Note: raft.domain is NOT included - RQLite uses direct TLS on port 7002
|
// Note: raft.domain is NOT included - RQLite uses direct TLS on port 7002
|
||||||
sniServices := []string{
|
sniServices := []string{
|
||||||
@ -872,11 +901,12 @@ func validateSNIDNSRecords(domain string) error {
|
|||||||
// Try to resolve the main domain first to get baseline
|
// Try to resolve the main domain first to get baseline
|
||||||
mainIPs, err := net.LookupHost(domain)
|
mainIPs, err := net.LookupHost(domain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not resolve main domain %s: %w", domain, err)
|
// Main domain doesn't resolve - this is just a warning now
|
||||||
|
return fmt.Sprintf("Warning: could not resolve main domain %s: %v", domain, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(mainIPs) == 0 {
|
if len(mainIPs) == 0 {
|
||||||
return fmt.Errorf("main domain %s resolved to no IP addresses", domain)
|
return fmt.Sprintf("Warning: main domain %s resolved to no IP addresses", domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check each SNI service
|
// Check each SNI service
|
||||||
@ -890,18 +920,15 @@ func validateSNIDNSRecords(domain string) error {
|
|||||||
|
|
||||||
if len(unresolvedServices) > 0 {
|
if len(unresolvedServices) > 0 {
|
||||||
serviceList := strings.Join(unresolvedServices, ", ")
|
serviceList := strings.Join(unresolvedServices, ", ")
|
||||||
return fmt.Errorf(
|
return fmt.Sprintf(
|
||||||
"SNI DNS records not found for: %s\n\n"+
|
"⚠️ SNI DNS records not found for: %s\n"+
|
||||||
"You need to add DNS records (A records or wildcard CNAME) for these services:\n"+
|
" For multi-node clustering, add wildcard CNAME: *.%s -> %s\n"+
|
||||||
" - They should all resolve to the same IP as %s\n"+
|
" (Continuing anyway - single-node setup will work)",
|
||||||
" - Option 1: Add individual A records pointing to %s\n"+
|
serviceList, domain, domain,
|
||||||
" - Option 2: Add wildcard CNAME: *.%s -> %s\n\n"+
|
|
||||||
"Without these records, multi-node clustering will fail.",
|
|
||||||
serviceList, domain, domain, domain, domain,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run starts the TUI installer and returns the configuration
|
// Run starts the TUI installer and returns the configuration
|
||||||
|
|||||||
@ -490,21 +490,30 @@ func (cm *ClusterConfigManager) UpdateAllClusterPeers() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RepairPeerConfiguration automatically discovers and repairs peer configuration
|
// RepairPeerConfiguration automatically discovers and repairs peer configuration
|
||||||
// Tries multiple methods: config-based discovery, peer multiaddr, or discovery service
|
// Tries multiple methods: gateway /v1/network/status, config-based discovery, peer multiaddr
|
||||||
func (cm *ClusterConfigManager) RepairPeerConfiguration() (bool, error) {
|
func (cm *ClusterConfigManager) RepairPeerConfiguration() (bool, error) {
|
||||||
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
||||||
return false, nil // IPFS not configured
|
return false, nil // IPFS not configured
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip if this is the first node (creates the cluster, no join address)
|
// Method 1: Try to discover cluster peers via /v1/network/status endpoint
|
||||||
|
// This is the most reliable method as it uses the HTTPS gateway
|
||||||
|
if len(cm.cfg.Discovery.BootstrapPeers) > 0 {
|
||||||
|
success, err := cm.DiscoverClusterPeersFromGateway()
|
||||||
|
if err != nil {
|
||||||
|
cm.logger.Debug("Gateway discovery failed, trying direct API", zap.Error(err))
|
||||||
|
} else if success {
|
||||||
|
cm.logger.Info("Successfully discovered cluster peers from gateway")
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip direct API method if this is the first node (creates the cluster, no join address)
|
||||||
if cm.cfg.Database.RQLiteJoinAddress == "" {
|
if cm.cfg.Database.RQLiteJoinAddress == "" {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Method 1: Try to use peer API URL from config if available
|
// Method 2: Try direct cluster API (fallback)
|
||||||
// Check if we have a peer's cluster API URL in discovery metadata
|
|
||||||
// For now, we'll infer from peers multiaddr
|
|
||||||
|
|
||||||
var peerAPIURL string
|
var peerAPIURL string
|
||||||
|
|
||||||
// Try to extract from peers multiaddr
|
// Try to extract from peers multiaddr
|
||||||
@ -530,7 +539,7 @@ func (cm *ClusterConfigManager) RepairPeerConfiguration() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if success {
|
if success {
|
||||||
cm.logger.Info("Successfully repaired peer configuration")
|
cm.logger.Info("Successfully repaired peer configuration via direct API")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -539,6 +548,162 @@ func (cm *ClusterConfigManager) RepairPeerConfiguration() (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DiscoverClusterPeersFromGateway queries bootstrap peers' /v1/network/status endpoint
|
||||||
|
// to discover IPFS Cluster peer information and updates the local service.json
|
||||||
|
func (cm *ClusterConfigManager) DiscoverClusterPeersFromGateway() (bool, error) {
|
||||||
|
if len(cm.cfg.Discovery.BootstrapPeers) == 0 {
|
||||||
|
cm.logger.Debug("No bootstrap peers configured, skipping gateway discovery")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var discoveredPeers []string
|
||||||
|
seenPeers := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, peerAddr := range cm.cfg.Discovery.BootstrapPeers {
|
||||||
|
// Extract domain or IP from multiaddr
|
||||||
|
domain := extractDomainFromMultiaddr(peerAddr)
|
||||||
|
if domain == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query /v1/network/status endpoint
|
||||||
|
statusURL := fmt.Sprintf("https://%s/v1/network/status", domain)
|
||||||
|
cm.logger.Debug("Querying peer network status", zap.String("url", statusURL))
|
||||||
|
|
||||||
|
// Use TLS-aware HTTP client (handles staging certs for *.debros.network)
|
||||||
|
client := tlsutil.NewHTTPClientForDomain(10*time.Second, domain)
|
||||||
|
resp, err := client.Get(statusURL)
|
||||||
|
if err != nil {
|
||||||
|
// Try HTTP fallback
|
||||||
|
statusURL = fmt.Sprintf("http://%s/v1/network/status", domain)
|
||||||
|
resp, err = client.Get(statusURL)
|
||||||
|
if err != nil {
|
||||||
|
cm.logger.Debug("Failed to query peer status", zap.String("domain", domain), zap.Error(err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
cm.logger.Debug("Peer returned non-OK status", zap.String("domain", domain), zap.Int("status", resp.StatusCode))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse response
|
||||||
|
var status struct {
|
||||||
|
IPFSCluster *struct {
|
||||||
|
PeerID string `json:"peer_id"`
|
||||||
|
Addresses []string `json:"addresses"`
|
||||||
|
} `json:"ipfs_cluster"`
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
||||||
|
cm.logger.Debug("Failed to decode peer status", zap.String("domain", domain), zap.Error(err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if status.IPFSCluster == nil || status.IPFSCluster.PeerID == "" {
|
||||||
|
cm.logger.Debug("Peer has no IPFS Cluster info", zap.String("domain", domain))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract IP from domain or addresses
|
||||||
|
peerIP := extractIPFromMultiaddrForCluster(peerAddr)
|
||||||
|
if peerIP == "" {
|
||||||
|
// Try to resolve domain
|
||||||
|
ips, err := net.LookupIP(domain)
|
||||||
|
if err == nil && len(ips) > 0 {
|
||||||
|
for _, ip := range ips {
|
||||||
|
if ip.To4() != nil {
|
||||||
|
peerIP = ip.String()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if peerIP == "" {
|
||||||
|
cm.logger.Debug("Could not determine peer IP", zap.String("domain", domain))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct cluster multiaddr
|
||||||
|
// IPFS Cluster listens on port 9098 (REST API port 9094 + 4)
|
||||||
|
clusterAddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", peerIP, status.IPFSCluster.PeerID)
|
||||||
|
if !seenPeers[clusterAddr] {
|
||||||
|
discoveredPeers = append(discoveredPeers, clusterAddr)
|
||||||
|
seenPeers[clusterAddr] = true
|
||||||
|
cm.logger.Info("Discovered cluster peer from gateway",
|
||||||
|
zap.String("domain", domain),
|
||||||
|
zap.String("peer_id", status.IPFSCluster.PeerID),
|
||||||
|
zap.String("cluster_addr", clusterAddr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(discoveredPeers) == 0 {
|
||||||
|
cm.logger.Debug("No cluster peers discovered from gateway")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load current config
|
||||||
|
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
||||||
|
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to load config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update peerstore file
|
||||||
|
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
||||||
|
peerstoreContent := strings.Join(discoveredPeers, "\n") + "\n"
|
||||||
|
if err := os.WriteFile(peerstorePath, []byte(peerstoreContent), 0644); err != nil {
|
||||||
|
cm.logger.Warn("Failed to update peerstore file", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update peer_addresses in config
|
||||||
|
cfg.Cluster.PeerAddresses = discoveredPeers
|
||||||
|
|
||||||
|
// Save config
|
||||||
|
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
||||||
|
return false, fmt.Errorf("failed to save config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cm.logger.Info("Updated cluster peer addresses from gateway discovery",
|
||||||
|
zap.Int("peer_count", len(discoveredPeers)),
|
||||||
|
zap.Strings("peer_addresses", discoveredPeers))
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractDomainFromMultiaddr extracts domain or IP from a multiaddr string
|
||||||
|
// Handles formats like /dns4/domain/tcp/port/p2p/id or /ip4/ip/tcp/port/p2p/id
|
||||||
|
func extractDomainFromMultiaddr(multiaddrStr string) string {
|
||||||
|
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try DNS4 first (domain name)
|
||||||
|
if domain, err := ma.ValueForProtocol(multiaddr.P_DNS4); err == nil && domain != "" {
|
||||||
|
return domain
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try DNS6
|
||||||
|
if domain, err := ma.ValueForProtocol(multiaddr.P_DNS6); err == nil && domain != "" {
|
||||||
|
return domain
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try IP4
|
||||||
|
if ip, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ip != "" {
|
||||||
|
return ip
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try IP6
|
||||||
|
if ip, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ip != "" {
|
||||||
|
return ip
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
// DiscoverClusterPeersFromLibP2P loads IPFS cluster peer addresses from the peerstore file.
|
// DiscoverClusterPeersFromLibP2P loads IPFS cluster peer addresses from the peerstore file.
|
||||||
// If peerstore is empty, it means there are no peers to connect to.
|
// If peerstore is empty, it means there are no peers to connect to.
|
||||||
// Returns true if peers were loaded and configured, false otherwise (non-fatal)
|
// Returns true if peers were loaded and configured, false otherwise (non-fatal)
|
||||||
|
|||||||
@ -52,7 +52,7 @@ SPECIFIC_PATTERNS=(
|
|||||||
"ipfs daemon"
|
"ipfs daemon"
|
||||||
"ipfs-cluster-service daemon"
|
"ipfs-cluster-service daemon"
|
||||||
"olric-server"
|
"olric-server"
|
||||||
"bin/node"
|
"bin/orama-node"
|
||||||
"bin/gateway"
|
"bin/gateway"
|
||||||
"anyone-client"
|
"anyone-client"
|
||||||
)
|
)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user