feat: enhance development environment topology and configuration

- Introduced a new topology structure to manage multiple bootstrap and node configurations, allowing for a more flexible development environment.
- Updated Makefile and help commands to reflect the addition of a second bootstrap and fourth node.
- Enhanced health checks to require a minimum of three healthy nodes for RQLite and LibP2P connectivity.
- Refactored service management to dynamically handle multiple nodes and their respective configurations.
- Improved logging and configuration file generation for better clarity and maintainability.
This commit is contained in:
anonpenguin23 2025-11-11 05:26:43 +02:00
parent 239fb2084b
commit efa26e6ec8
No known key found for this signature in database
GPG Key ID: 1CBB1FE35AFBEE30
15 changed files with 607 additions and 339 deletions

View File

@ -13,6 +13,24 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Deprecated
### Fixed
## [0.65.0] - 2025-11-11
### Added
- Expanded the local development environment (`dbn dev up`) from 3 nodes to 5 nodes (2 bootstraps and 3 regular nodes) for better testing of cluster resilience and quorum.
- Added a new `bootstrap2` node configuration and service to the development topology.
### Changed
- Updated the `dbn dev up` command to configure and start all 5 nodes and associated services (IPFS, RQLite, IPFS Cluster).
- Modified RQLite and LibP2P health checks in the development environment to require a quorum of 3 out of 5 nodes.
- Refactored development environment configuration logic using a new `Topology` structure for easier management of node ports and addresses.
### Deprecated
### Removed
### Fixed
- Ensured that secondary bootstrap nodes can correctly join the primary RQLite cluster in the development environment.
## [0.64.1] - 2025-11-10
### Added

View File

@ -19,7 +19,7 @@ test-e2e:
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
VERSION := 0.64.1
VERSION := 0.65.0
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
@ -102,8 +102,8 @@ help:
@echo "Local Development (Recommended):"
@echo " make dev - Start full development stack with one command"
@echo " - Checks dependencies and available ports"
@echo " - Generates configs (bootstrap + node2 + node3 + gateway)"
@echo " - Starts IPFS, RQLite, Olric, nodes, and gateway"
@echo " - Generates configs (2 bootstraps + 3 nodes + gateway)"
@echo " - Starts IPFS, RQLite, Olric, all nodes, and gateway"
@echo " - Validates cluster health (IPFS peers, RQLite, LibP2P)"
@echo " - Stops all services if health checks fail"
@echo " - Includes comprehensive logging"

View File

@ -111,8 +111,8 @@ func GetRQLiteNodes() []string {
}
cacheMutex.RUnlock()
// Try bootstrap.yaml first, then node.yaml variants
for _, cfgFile := range []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"} {
// Try bootstrap.yaml first, then all node variants
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
@ -141,11 +141,13 @@ func queryAPIKeyFromRQLite() (string, error) {
return "", fmt.Errorf("failed to get home directory: %w", err)
}
// Try bootstrap first, then nodes
// Try bootstrap first, then all nodes
dbPaths := []string{
filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "bootstrap2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node3", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node4", "rqlite", "db.sqlite"),
}
for _, dbPath := range dbPaths {
@ -219,7 +221,7 @@ func GetBootstrapPeers() []string {
}
cacheMutex.RUnlock()
configFiles := []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"}
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"}
seen := make(map[string]struct{})
var peers []string
@ -270,7 +272,7 @@ func GetIPFSClusterURL() string {
cacheMutex.RUnlock()
// Try to load from node config
for _, cfgFile := range []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"} {
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
@ -302,7 +304,7 @@ func GetIPFSAPIURL() string {
cacheMutex.RUnlock()
// Try to load from node config
for _, cfgFile := range []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"} {
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
@ -327,7 +329,7 @@ func GetIPFSAPIURL() string {
// GetClientNamespace returns the test client namespace from config
func GetClientNamespace() string {
// Try to load from node config
for _, cfgFile := range []string{"bootstrap.yaml", "node.yaml"} {
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue

View File

@ -42,7 +42,7 @@ func showDevHelp() {
fmt.Printf("🚀 Development Environment Commands\n\n")
fmt.Printf("Usage: dbn dev <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" up - Start development environment (bootstrap + 2 nodes + gateway)\n")
fmt.Printf(" up - Start development environment (2 bootstraps + 3 nodes + gateway)\n")
fmt.Printf(" down - Stop all development services\n")
fmt.Printf(" status - Show status of running services\n")
fmt.Printf(" logs <component> - Tail logs for a component\n")
@ -107,15 +107,18 @@ func handleDevUp(args []string) {
// Step 5: Show summary
fmt.Printf("🎉 Development environment is running!\n\n")
fmt.Printf("Key endpoints:\n")
fmt.Printf(" Gateway: http://localhost:6001\n")
fmt.Printf(" Bootstrap IPFS: http://localhost:4501\n")
fmt.Printf(" Node2 IPFS: http://localhost:4502\n")
fmt.Printf(" Node3 IPFS: http://localhost:4503\n")
fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n")
fmt.Printf(" Olric Cache: http://localhost:3320\n\n")
fmt.Printf(" Gateway: http://localhost:6001\n")
fmt.Printf(" Bootstrap IPFS: http://localhost:4501\n")
fmt.Printf(" Bootstrap2 IPFS: http://localhost:4511\n")
fmt.Printf(" Node2 IPFS: http://localhost:4502\n")
fmt.Printf(" Node3 IPFS: http://localhost:4503\n")
fmt.Printf(" Node4 IPFS: http://localhost:4504\n")
fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n")
fmt.Printf(" Olric Cache: http://localhost:3320\n\n")
fmt.Printf("Useful commands:\n")
fmt.Printf(" dbn dev status - Show status\n")
fmt.Printf(" dbn dev logs bootstrap - Bootstrap logs\n")
fmt.Printf(" dbn dev logs bootstrap2 - Bootstrap2 logs\n")
fmt.Printf(" dbn dev down - Stop all services\n\n")
fmt.Printf("Logs directory: %s/logs\n\n", debrosDir)
}
@ -153,7 +156,7 @@ func handleDevStatus(args []string) {
func handleDevLogs(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: dbn dev logs <component> [--follow]\n")
fmt.Fprintf(os.Stderr, "\nComponents: bootstrap, node2, node3, gateway, ipfs-bootstrap, ipfs-node2, ipfs-node3, olric, anon\n")
fmt.Fprintf(os.Stderr, "\nComponents: bootstrap, bootstrap2, node2, node3, node4, gateway, ipfs-bootstrap, ipfs-bootstrap2, ipfs-node2, ipfs-node3, ipfs-node4, olric, anon\n")
os.Exit(1)
}

View File

@ -235,11 +235,16 @@ func (c *Config) validateDatabase() []error {
}
}
} else if c.Node.Type == "bootstrap" {
// Bootstrap nodes can optionally join another bootstrap's RQLite cluster
// This allows secondary bootstraps to synchronize with the primary
if dc.RQLiteJoinAddress != "" {
errs = append(errs, ValidationError{
Path: "database.rqlite_join_address",
Message: "must be empty for bootstrap type",
})
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
errs = append(errs, ValidationError{
Path: "database.rqlite_join_address",
Message: err.Error(),
Hint: "expected format: host:port",
})
}
}
}

View File

@ -87,26 +87,8 @@ type PortChecker struct {
}
// RequiredPorts defines all ports needed for dev environment
var RequiredPorts = []int{
// LibP2P
4001, 4002, 4003,
// IPFS API
4501, 4502, 4503,
// RQLite HTTP
5001, 5002, 5003,
// RQLite Raft
7001, 7002, 7003,
// Gateway
6001,
// Olric
3320, 3322,
// Anon SOCKS
9050,
// IPFS Cluster
9094, 9104, 9114,
// IPFS Gateway
8080, 8081, 8082,
}
// Computed from DefaultTopology
var RequiredPorts = DefaultTopology().AllPorts()
// NewPortChecker creates a new port checker with required ports
func NewPortChecker() *PortChecker {
@ -150,28 +132,5 @@ func isPortAvailable(port int) bool {
// PortMap provides a human-readable mapping of ports to services
func PortMap() map[int]string {
return map[int]string{
4001: "Bootstrap P2P",
4002: "Node2 P2P",
4003: "Node3 P2P",
4501: "Bootstrap IPFS API",
4502: "Node2 IPFS API",
4503: "Node3 IPFS API",
5001: "Bootstrap RQLite HTTP",
5002: "Node2 RQLite HTTP",
5003: "Node3 RQLite HTTP",
7001: "Bootstrap RQLite Raft",
7002: "Node2 RQLite Raft",
7003: "Node3 RQLite Raft",
6001: "Gateway",
3320: "Olric HTTP API",
3322: "Olric Memberlist",
9050: "Anon SOCKS Proxy",
9094: "Bootstrap IPFS Cluster",
9104: "Node2 IPFS Cluster",
9114: "Node3 IPFS Cluster",
8080: "Bootstrap IPFS Gateway",
8081: "Node2 IPFS Gateway",
8082: "Node3 IPFS Gateway",
}
return DefaultTopology().PortMap()
}

View File

@ -40,18 +40,28 @@ func (ce *ConfigEnsurer) EnsureAll() error {
return fmt.Errorf("failed to ensure shared secrets: %w", err)
}
// Ensure bootstrap config and identity
if err := ce.ensureBootstrap(); err != nil {
return fmt.Errorf("failed to ensure bootstrap: %w", err)
// Load topology
topology := DefaultTopology()
// Generate identities for all bootstrap nodes and collect multiaddrs
bootstrapAddrs := []string{}
for _, nodeSpec := range topology.GetBootstrapNodes() {
addr, err := ce.ensureNodeIdentity(nodeSpec)
if err != nil {
return fmt.Errorf("failed to ensure identity for %s: %w", nodeSpec.Name, err)
}
bootstrapAddrs = append(bootstrapAddrs, addr)
}
// Ensure node2 and node3 configs
if err := ce.ensureNode2And3(); err != nil {
return fmt.Errorf("failed to ensure nodes: %w", err)
// Ensure configs for all bootstrap and regular nodes
for _, nodeSpec := range topology.Nodes {
if err := ce.ensureNodeConfig(nodeSpec, bootstrapAddrs); err != nil {
return fmt.Errorf("failed to ensure config for %s: %w", nodeSpec.Name, err)
}
}
// Ensure gateway config
if err := ce.ensureGateway(); err != nil {
if err := ce.ensureGateway(bootstrapAddrs); err != nil {
return fmt.Errorf("failed to ensure gateway: %w", err)
}
@ -87,47 +97,62 @@ func (ce *ConfigEnsurer) ensureSharedSecrets() error {
return nil
}
// ensureBootstrap creates bootstrap identity and config
func (ce *ConfigEnsurer) ensureBootstrap() error {
bootstrapDir := filepath.Join(ce.debrosDir, "bootstrap")
identityPath := filepath.Join(bootstrapDir, "identity.key")
// ensureNodeIdentity creates or loads a node identity and returns its multiaddr
func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
identityPath := filepath.Join(nodeDir, "identity.key")
// Create identity if missing
var bootstrapPeerID string
var peerID string
if _, err := os.Stat(identityPath); os.IsNotExist(err) {
if err := os.MkdirAll(bootstrapDir, 0755); err != nil {
return fmt.Errorf("failed to create bootstrap directory: %w", err)
if err := os.MkdirAll(nodeDir, 0755); err != nil {
return "", fmt.Errorf("failed to create node directory: %w", err)
}
info, err := encryption.GenerateIdentity()
if err != nil {
return fmt.Errorf("failed to generate bootstrap identity: %w", err)
return "", fmt.Errorf("failed to generate identity: %w", err)
}
if err := encryption.SaveIdentity(info, identityPath); err != nil {
return fmt.Errorf("failed to save bootstrap identity: %w", err)
return "", fmt.Errorf("failed to save identity: %w", err)
}
bootstrapPeerID = info.PeerID.String()
fmt.Printf("✓ Generated bootstrap identity (Peer ID: %s)\n", bootstrapPeerID)
peerID = info.PeerID.String()
fmt.Printf("✓ Generated %s identity (Peer ID: %s)\n", nodeSpec.Name, peerID)
} else {
info, err := encryption.LoadIdentity(identityPath)
if err != nil {
return fmt.Errorf("failed to load bootstrap identity: %w", err)
return "", fmt.Errorf("failed to load identity: %w", err)
}
bootstrapPeerID = info.PeerID.String()
peerID = info.PeerID.String()
}
// Ensure bootstrap config - always regenerate to ensure template fixes are applied
bootstrapConfigPath := filepath.Join(ce.debrosDir, "bootstrap.yaml")
// Return multiaddr
return fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", nodeSpec.P2PPort, peerID), nil
}
// ensureNodeConfig creates or updates a node configuration
func (ce *ConfigEnsurer) ensureNodeConfig(nodeSpec NodeSpec, bootstrapAddrs []string) error {
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
configPath := filepath.Join(ce.debrosDir, nodeSpec.ConfigFilename)
if err := os.MkdirAll(nodeDir, 0755); err != nil {
return fmt.Errorf("failed to create node directory: %w", err)
}
if nodeSpec.Role == "bootstrap" {
// Generate bootstrap config
data := templates.BootstrapConfigData{
NodeID: "bootstrap",
P2PPort: 4001,
DataDir: bootstrapDir,
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
BootstrapPeers: bootstrapAddrs,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
}
config, err := templates.RenderBootstrapConfig(data)
@ -135,104 +160,66 @@ func (ce *ConfigEnsurer) ensureBootstrap() error {
return fmt.Errorf("failed to render bootstrap config: %w", err)
}
if err := os.WriteFile(bootstrapConfigPath, []byte(config), 0644); err != nil {
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write bootstrap config: %w", err)
}
fmt.Printf("✓ Generated bootstrap.yaml\n")
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
} else {
// Generate regular node config
data := templates.NodeConfigData{
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
BootstrapPeers: bootstrapAddrs,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
}
return nil
}
config, err := templates.RenderNodeConfig(data)
if err != nil {
return fmt.Errorf("failed to render node config: %w", err)
}
// ensureNode2And3 creates node2 and node3 configs
func (ce *ConfigEnsurer) ensureNode2And3() error {
// Get bootstrap multiaddr for join
bootstrapInfo, err := encryption.LoadIdentity(filepath.Join(ce.debrosDir, "bootstrap", "identity.key"))
if err != nil {
return fmt.Errorf("failed to load bootstrap identity: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write node config: %w", err)
}
bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String())
nodes := []struct {
name string
p2pPort int
rqliteHTTPPort int
rqliteRaftPort int
clusterAPIPort int
ipfsAPIPort int
}{
{"node2", 4002, 5002, 7002, 9104, 4502},
{"node3", 4003, 5003, 7003, 9114, 4503},
}
for _, node := range nodes {
nodeDir := filepath.Join(ce.debrosDir, node.name)
configPath := filepath.Join(ce.debrosDir, fmt.Sprintf("%s.yaml", node.name))
// Always regenerate to ensure template fixes are applied
if err := os.MkdirAll(nodeDir, 0755); err != nil {
return fmt.Errorf("failed to create %s directory: %w", node.name, err)
}
data := templates.NodeConfigData{
NodeID: node.name,
P2PPort: node.p2pPort,
DataDir: nodeDir,
RQLiteHTTPPort: node.rqliteHTTPPort,
RQLiteRaftPort: node.rqliteRaftPort,
RQLiteJoinAddress: "localhost:7001",
BootstrapPeers: []string{bootstrapMultiaddr},
ClusterAPIPort: node.clusterAPIPort,
IPFSAPIPort: node.ipfsAPIPort,
}
config, err := templates.RenderNodeConfig(data)
if err != nil {
return fmt.Errorf("failed to render %s config: %w", node.name, err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write %s config: %w", node.name, err)
}
fmt.Printf("✓ Generated %s.yaml\n", node.name)
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
}
return nil
}
// ensureGateway creates gateway config
func (ce *ConfigEnsurer) ensureGateway() error {
func (ce *ConfigEnsurer) ensureGateway(bootstrapAddrs []string) error {
configPath := filepath.Join(ce.debrosDir, "gateway.yaml")
// Always regenerate to ensure template fixes are applied
// Get bootstrap multiaddr
bootstrapInfo, err := encryption.LoadIdentity(filepath.Join(ce.debrosDir, "bootstrap", "identity.key"))
if err != nil {
return fmt.Errorf("failed to load bootstrap identity: %w", err)
}
// Get first bootstrap's cluster API port for default
topology := DefaultTopology()
firstBootstrap := topology.GetBootstrapNodes()[0]
bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String())
data := templates.GatewayConfigData{
ListenPort: topology.GatewayPort,
BootstrapPeers: bootstrapAddrs,
OlricServers: []string{fmt.Sprintf("127.0.0.1:%d", topology.OlricHTTPPort)},
ClusterAPIPort: firstBootstrap.ClusterAPIPort,
IPFSAPIPort: firstBootstrap.IPFSAPIPort,
}
data := templates.GatewayConfigData{
ListenPort: 6001,
BootstrapPeers: []string{bootstrapMultiaddr},
OlricServers: []string{"127.0.0.1:3320"},
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
}
config, err := templates.RenderGatewayConfig(data)
if err != nil {
return fmt.Errorf("failed to render gateway config: %w", err)
}
config, err := templates.RenderGatewayConfig(data)
if err != nil {
return fmt.Errorf("failed to render gateway config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write gateway config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write gateway config: %w", err)
}
fmt.Printf("✓ Generated gateway.yaml\n")
fmt.Printf("✓ Generated gateway.yaml\n")
return nil
}
@ -240,23 +227,23 @@ func (ce *ConfigEnsurer) ensureGateway() error {
func (ce *ConfigEnsurer) ensureOlric() error {
configPath := filepath.Join(ce.debrosDir, "olric-config.yaml")
// Always regenerate to ensure template fixes are applied
data := templates.OlricConfigData{
BindAddr: "127.0.0.1",
HTTPPort: 3320,
MemberlistPort: 3322,
}
topology := DefaultTopology()
data := templates.OlricConfigData{
BindAddr: "127.0.0.1",
HTTPPort: topology.OlricHTTPPort,
MemberlistPort: topology.OlricMemberPort,
}
config, err := templates.RenderOlricConfig(data)
if err != nil {
return fmt.Errorf("failed to render olric config: %w", err)
}
config, err := templates.RenderOlricConfig(data)
if err != nil {
return fmt.Errorf("failed to render olric config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write olric config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write olric config: %w", err)
}
fmt.Printf("✓ Generated olric-config.yaml\n")
fmt.Printf("✓ Generated olric-config.yaml\n")
return nil
}

View File

@ -37,17 +37,19 @@ func (pm *ProcessManager) IPFSHealthCheck(ctx context.Context, nodes []ipfsNodeI
for _, line := range peerLines {
if strings.TrimSpace(line) != "" {
peerCount++
}
}
}
}
if peerCount < 2 {
result.Details += fmt.Sprintf("%s: only %d peers (want 2+); ", node.name, peerCount)
// With 5 nodes, expect each node to see at least 3 other peers
if peerCount < 3 {
result.Details += fmt.Sprintf("%s: only %d peers (want 3+); ", node.name, peerCount)
} else {
result.Details += fmt.Sprintf("%s: %d peers; ", node.name, peerCount)
healthyCount++
}
}
// Require all 5 nodes to have healthy peer counts
result.Healthy = healthyCount == len(nodes)
return result
}
@ -56,24 +58,19 @@ func (pm *ProcessManager) IPFSHealthCheck(ctx context.Context, nodes []ipfsNodeI
func (pm *ProcessManager) RQLiteHealthCheck(ctx context.Context) HealthCheckResult {
result := HealthCheckResult{Name: "RQLite Cluster"}
// Check bootstrap node
bootstrapStatus := pm.checkRQLiteNode(ctx, "bootstrap", 5001)
if !bootstrapStatus.Healthy {
result.Details += fmt.Sprintf("bootstrap: %s; ", bootstrapStatus.Details)
return result
}
// Check node2 and node3
node2Status := pm.checkRQLiteNode(ctx, "node2", 5002)
node3Status := pm.checkRQLiteNode(ctx, "node3", 5003)
if node2Status.Healthy && node3Status.Healthy {
result.Healthy = true
result.Details = fmt.Sprintf("bootstrap: leader ok; node2: %s; node3: %s", node2Status.Details, node3Status.Details)
} else {
result.Details = fmt.Sprintf("bootstrap: ok; node2: %s; node3: %s", node2Status.Details, node3Status.Details)
topology := DefaultTopology()
healthyCount := 0
for _, nodeSpec := range topology.Nodes {
status := pm.checkRQLiteNode(ctx, nodeSpec.Name, nodeSpec.RQLiteHTTPPort)
if status.Healthy {
healthyCount++
}
result.Details += fmt.Sprintf("%s: %s; ", nodeSpec.Name, status.Details)
}
// Require at least 3 out of 5 nodes to be healthy for quorum
result.Healthy = healthyCount >= 3
return result
}
@ -99,7 +96,7 @@ func (pm *ProcessManager) checkRQLiteNode(ctx context.Context, name string, http
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
result.Details = fmt.Sprintf("decode error: %v", err)
return result
}
}
// Check the store.raft structure (RQLite 8 format)
store, ok := status["store"].(map[string]interface{})
@ -146,22 +143,22 @@ func (pm *ProcessManager) checkRQLiteNode(ctx context.Context, name string, http
func (pm *ProcessManager) LibP2PHealthCheck(ctx context.Context) HealthCheckResult {
result := HealthCheckResult{Name: "LibP2P/Node Peers"}
// Check that at least 2 nodes are part of the RQLite cluster (implies peer connectivity)
// and that they can communicate via LibP2P (which they use for cluster discovery)
// Check that nodes are part of the RQLite cluster and can communicate via LibP2P
topology := DefaultTopology()
healthyNodes := 0
for i, name := range []string{"bootstrap", "node2", "node3"} {
httpPort := 5001 + i
status := pm.checkRQLiteNode(ctx, name, httpPort)
for _, nodeSpec := range topology.Nodes {
status := pm.checkRQLiteNode(ctx, nodeSpec.Name, nodeSpec.RQLiteHTTPPort)
if status.Healthy {
healthyNodes++
result.Details += fmt.Sprintf("%s: connected; ", name)
result.Details += fmt.Sprintf("%s: connected; ", nodeSpec.Name)
} else {
result.Details += fmt.Sprintf("%s: %s; ", name, status.Details)
result.Details += fmt.Sprintf("%s: %s; ", nodeSpec.Name, status.Details)
}
}
// Healthy if at least 2 nodes report connectivity (including bootstrap)
result.Healthy = healthyNodes >= 2
// Healthy if at least 3 nodes report connectivity
result.Healthy = healthyNodes >= 3
return result
}

View File

@ -51,12 +51,10 @@ func NewProcessManager(debrosDir string, logWriter io.Writer) *ProcessManager {
func (pm *ProcessManager) StartAll(ctx context.Context) error {
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n\n")
// Define IPFS nodes for later use in health checks
ipfsNodes := []ipfsNodeInfo{
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs/repo"), 4501, 4101, 7501, ""},
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs/repo"), 4502, 4102, 7502, ""},
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs/repo"), 4503, 4103, 7503, ""},
}
topology := DefaultTopology()
// Build IPFS node info from topology
ipfsNodes := pm.buildIPFSNodes(topology)
// Start in order of dependencies
services := []struct {
@ -64,12 +62,11 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
fn func(context.Context) error
}{
{"IPFS", pm.startIPFS},
{"RQLite", pm.startRQLite},
{"IPFS Cluster", pm.startIPFSCluster},
{"Olric", pm.startOlric},
{"Anon", pm.startAnon},
{"Bootstrap Node", pm.startBootstrapNode},
{"Node2", pm.startNode2},
{"Node3", pm.startNode3},
{"Nodes (Network)", pm.startNodes},
{"Gateway", pm.startGateway},
}
@ -101,23 +98,28 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
func (pm *ProcessManager) StopAll(ctx context.Context) error {
fmt.Fprintf(pm.logWriter, "\n🛑 Stopping development environment...\n")
services := []string{
"gateway",
"node3",
"node2",
"bootstrap",
"olric",
"ipfs-cluster-node3",
"ipfs-cluster-node2",
"ipfs-cluster-bootstrap",
"rqlite-node3",
"rqlite-node2",
"rqlite-bootstrap",
"ipfs-node3",
"ipfs-node2",
"ipfs-bootstrap",
"anon",
topology := DefaultTopology()
var services []string
// Build service list from topology (in reverse order)
services = append(services, "gateway")
for i := len(topology.Nodes) - 1; i >= 0; i-- {
node := topology.Nodes[i]
services = append(services, node.Name)
}
for i := len(topology.Nodes) - 1; i >= 0; i-- {
node := topology.Nodes[i]
services = append(services, fmt.Sprintf("ipfs-cluster-%s", node.Name))
}
for i := len(topology.Nodes) - 1; i >= 0; i-- {
node := topology.Nodes[i]
services = append(services, fmt.Sprintf("rqlite-%s", node.Name))
}
for i := len(topology.Nodes) - 1; i >= 0; i-- {
node := topology.Nodes[i]
services = append(services, fmt.Sprintf("ipfs-%s", node.Name))
}
services = append(services, "olric", "anon")
for _, svc := range services {
pm.stopProcess(svc)
@ -132,27 +134,58 @@ func (pm *ProcessManager) Status(ctx context.Context) {
fmt.Fprintf(pm.logWriter, "\n📊 Development Environment Status\n")
fmt.Fprintf(pm.logWriter, "================================\n\n")
services := []struct {
topology := DefaultTopology()
// Build service list from topology
var services []struct {
name string
ports []int
}{
{"Bootstrap IPFS", []int{4501, 4101}},
{"Bootstrap RQLite", []int{5001, 7001}},
{"Node2 IPFS", []int{4502, 4102}},
{"Node2 RQLite", []int{5002, 7002}},
{"Node3 IPFS", []int{4503, 4103}},
{"Node3 RQLite", []int{5003, 7003}},
{"Bootstrap Cluster", []int{9094}},
{"Node2 Cluster", []int{9104}},
{"Node3 Cluster", []int{9114}},
{"Bootstrap Node (P2P)", []int{4001}},
{"Node2 (P2P)", []int{4002}},
{"Node3 (P2P)", []int{4003}},
{"Gateway", []int{6001}},
{"Olric", []int{3320, 3322}},
{"Anon SOCKS", []int{9050}},
}
for _, node := range topology.Nodes {
services = append(services, struct {
name string
ports []int
}{
fmt.Sprintf("%s IPFS", node.Name),
[]int{node.IPFSAPIPort, node.IPFSSwarmPort},
})
services = append(services, struct {
name string
ports []int
}{
fmt.Sprintf("%s RQLite", node.Name),
[]int{node.RQLiteHTTPPort, node.RQLiteRaftPort},
})
services = append(services, struct {
name string
ports []int
}{
fmt.Sprintf("%s Cluster", node.Name),
[]int{node.ClusterAPIPort},
})
services = append(services, struct {
name string
ports []int
}{
fmt.Sprintf("%s Node (P2P)", node.Name),
[]int{node.P2PPort},
})
}
services = append(services, struct {
name string
ports []int
}{"Gateway", []int{topology.GatewayPort}})
services = append(services, struct {
name string
ports []int
}{"Olric", []int{topology.OlricHTTPPort, topology.OlricMemberPort}})
services = append(services, struct {
name string
ports []int
}{"Anon SOCKS", []int{topology.AnonSOCKSPort}})
for _, svc := range services {
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", svc.name))
running := false
@ -173,8 +206,8 @@ func (pm *ProcessManager) Status(ctx context.Context) {
}
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.debrosDir)
files := []string{"bootstrap.yaml", "node2.yaml", "node3.yaml", "gateway.yaml", "olric-config.yaml"}
for _, f := range files {
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node2.yaml", "node3.yaml", "node4.yaml", "gateway.yaml", "olric-config.yaml"}
for _, f := range configFiles {
path := filepath.Join(pm.debrosDir, f)
if _, err := os.Stat(path); err == nil {
fmt.Fprintf(pm.logWriter, " ✓ %s\n", f)
@ -188,6 +221,35 @@ func (pm *ProcessManager) Status(ctx context.Context) {
// Helper functions for starting individual services
// buildIPFSNodes constructs ipfsNodeInfo from topology
func (pm *ProcessManager) buildIPFSNodes(topology *Topology) []ipfsNodeInfo {
var nodes []ipfsNodeInfo
for _, nodeSpec := range topology.Nodes {
nodes = append(nodes, ipfsNodeInfo{
name: nodeSpec.Name,
ipfsPath: filepath.Join(pm.debrosDir, nodeSpec.DataDir, "ipfs/repo"),
apiPort: nodeSpec.IPFSAPIPort,
swarmPort: nodeSpec.IPFSSwarmPort,
gatewayPort: nodeSpec.IPFSGatewayPort,
peerID: "",
})
}
return nodes
}
// startNodes starts all network nodes (bootstraps and regular)
func (pm *ProcessManager) startNodes(ctx context.Context) error {
topology := DefaultTopology()
for _, nodeSpec := range topology.Nodes {
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("%s.log", nodeSpec.Name))
if err := pm.startNode(nodeSpec.Name, nodeSpec.ConfigFilename, logPath); err != nil {
return fmt.Errorf("failed to start %s: %w", nodeSpec.Name, err)
}
time.Sleep(500 * time.Millisecond)
}
return nil
}
// ipfsNodeInfo holds information about an IPFS node for peer discovery
type ipfsNodeInfo struct {
name string
@ -408,11 +470,8 @@ func (pm *ProcessManager) ipfsHTTPCall(ctx context.Context, urlStr string, metho
}
func (pm *ProcessManager) startIPFS(ctx context.Context) error {
nodes := []ipfsNodeInfo{
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs/repo"), 4501, 4101, 7501, ""},
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs/repo"), 4502, 4102, 7502, ""},
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs/repo"), 4503, 4103, 7503, ""},
}
topology := DefaultTopology()
nodes := pm.buildIPFSNodes(topology)
// Phase 1: Initialize repos and configure addresses
for i := range nodes {
@ -480,25 +539,34 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
}
func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
nodes := []struct {
topology := DefaultTopology()
var nodes []struct {
name string
clusterPath string
restAPIPort int
clusterPort int
ipfsPort int
}{
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs-cluster"), 9094, 9096, 4501},
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs-cluster"), 9104, 9106, 4502},
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs-cluster"), 9114, 9116, 4503},
}
for _, nodeSpec := range topology.Nodes {
nodes = append(nodes, struct {
name string
clusterPath string
restAPIPort int
clusterPort int
ipfsPort int
}{
nodeSpec.Name,
filepath.Join(pm.debrosDir, nodeSpec.DataDir, "ipfs-cluster"),
nodeSpec.ClusterAPIPort,
nodeSpec.ClusterPort,
nodeSpec.IPFSAPIPort,
})
}
// Wait for all IPFS daemons to be ready before starting cluster services
fmt.Fprintf(pm.logWriter, " Waiting for IPFS daemons to be ready...\n")
ipfsNodes := []ipfsNodeInfo{
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs/repo"), 4501, 4101, 7501, ""},
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs/repo"), 4502, 4102, 7502, ""},
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs/repo"), 4503, 4103, 7503, ""},
}
ipfsNodes := pm.buildIPFSNodes(topology)
for _, ipfsNode := range ipfsNodes {
if err := pm.waitIPFSReady(ctx, ipfsNode); err != nil {
fmt.Fprintf(pm.logWriter, " Warning: IPFS %s did not become ready: %v\n", ipfsNode.name, err)
@ -875,16 +943,29 @@ func (pm *ProcessManager) ensureIPFSClusterPorts(clusterPath string, restAPIPort
}
func (pm *ProcessManager) startRQLite(ctx context.Context) error {
nodes := []struct {
topology := DefaultTopology()
var nodes []struct {
name string
dataDir string
httpPort int
raftPort int
joinAddr string
}{
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/rqlite"), 5001, 7001, ""},
{"node2", filepath.Join(pm.debrosDir, "node2/rqlite"), 5002, 7002, "localhost:7001"},
{"node3", filepath.Join(pm.debrosDir, "node3/rqlite"), 5003, 7003, "localhost:7001"},
}
for _, nodeSpec := range topology.Nodes {
nodes = append(nodes, struct {
name string
dataDir string
httpPort int
raftPort int
joinAddr string
}{
nodeSpec.Name,
filepath.Join(pm.debrosDir, nodeSpec.DataDir, "rqlite"),
nodeSpec.RQLiteHTTPPort,
nodeSpec.RQLiteRaftPort,
nodeSpec.RQLiteJoinTarget,
})
}
for _, node := range nodes {
@ -973,18 +1054,6 @@ func (pm *ProcessManager) startAnon(ctx context.Context) error {
return nil
}
func (pm *ProcessManager) startBootstrapNode(ctx context.Context) error {
return pm.startNode("bootstrap", "bootstrap.yaml", filepath.Join(pm.debrosDir, "logs", "bootstrap.log"))
}
func (pm *ProcessManager) startNode2(ctx context.Context) error {
return pm.startNode("node2", "node2.yaml", filepath.Join(pm.debrosDir, "logs", "node2.log"))
}
func (pm *ProcessManager) startNode3(ctx context.Context) error {
return pm.startNode("node3", "node3.yaml", filepath.Join(pm.debrosDir, "logs", "node3.log"))
}
func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
cmd := exec.Command("./bin/node", "--config", configFile)

View File

@ -0,0 +1,206 @@
package development
import "fmt"
// NodeSpec defines configuration for a single dev environment node
type NodeSpec struct {
Name string // bootstrap, bootstrap2, node2, node3, node4
Role string // "bootstrap" or "node"
ConfigFilename string // bootstrap.yaml, bootstrap2.yaml, node2.yaml, etc.
DataDir string // relative path from .debros root
P2PPort int // LibP2P listen port
IPFSAPIPort int // IPFS API port
IPFSSwarmPort int // IPFS Swarm port
IPFSGatewayPort int // IPFS HTTP Gateway port
RQLiteHTTPPort int // RQLite HTTP API port
RQLiteRaftPort int // RQLite Raft consensus port
ClusterAPIPort int // IPFS Cluster REST API port
ClusterPort int // IPFS Cluster P2P port
RQLiteJoinTarget string // which bootstrap RQLite port to join (leave empty for bootstraps that lead)
ClusterJoinTarget string // which bootstrap cluster to join (leave empty for bootstrap that leads)
}
// Topology defines the complete development environment topology
type Topology struct {
Nodes []NodeSpec
GatewayPort int
OlricHTTPPort int
OlricMemberPort int
AnonSOCKSPort int
}
// DefaultTopology returns the default five-node dev environment topology
func DefaultTopology() *Topology {
return &Topology{
Nodes: []NodeSpec{
{
Name: "bootstrap",
Role: "bootstrap",
ConfigFilename: "bootstrap.yaml",
DataDir: "bootstrap",
P2PPort: 4001,
IPFSAPIPort: 4501,
IPFSSwarmPort: 4101,
IPFSGatewayPort: 7501,
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
ClusterPort: 9096,
RQLiteJoinTarget: "",
ClusterJoinTarget: "",
},
{
Name: "bootstrap2",
Role: "bootstrap",
ConfigFilename: "bootstrap2.yaml",
DataDir: "bootstrap2",
P2PPort: 4011,
IPFSAPIPort: 4511,
IPFSSwarmPort: 4111,
IPFSGatewayPort: 7511,
RQLiteHTTPPort: 5011,
RQLiteRaftPort: 7011,
ClusterAPIPort: 9104,
ClusterPort: 9106,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node2",
Role: "node",
ConfigFilename: "node2.yaml",
DataDir: "node2",
P2PPort: 4002,
IPFSAPIPort: 4502,
IPFSSwarmPort: 4102,
IPFSGatewayPort: 7502,
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
ClusterAPIPort: 9114,
ClusterPort: 9116,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node3",
Role: "node",
ConfigFilename: "node3.yaml",
DataDir: "node3",
P2PPort: 4003,
IPFSAPIPort: 4503,
IPFSSwarmPort: 4103,
IPFSGatewayPort: 7503,
RQLiteHTTPPort: 5003,
RQLiteRaftPort: 7003,
ClusterAPIPort: 9124,
ClusterPort: 9126,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node4",
Role: "node",
ConfigFilename: "node4.yaml",
DataDir: "node4",
P2PPort: 4004,
IPFSAPIPort: 4504,
IPFSSwarmPort: 4104,
IPFSGatewayPort: 7504,
RQLiteHTTPPort: 5004,
RQLiteRaftPort: 7004,
ClusterAPIPort: 9134,
ClusterPort: 9136,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
},
GatewayPort: 6001,
OlricHTTPPort: 3320,
OlricMemberPort: 3322,
AnonSOCKSPort: 9050,
}
}
// AllPorts returns a slice of all ports used in the topology
func (t *Topology) AllPorts() []int {
var ports []int
// Node-specific ports
for _, node := range t.Nodes {
ports = append(ports,
node.P2PPort,
node.IPFSAPIPort,
node.IPFSSwarmPort,
node.IPFSGatewayPort,
node.RQLiteHTTPPort,
node.RQLiteRaftPort,
node.ClusterAPIPort,
node.ClusterPort,
)
}
// Shared service ports
ports = append(ports,
t.GatewayPort,
t.OlricHTTPPort,
t.OlricMemberPort,
t.AnonSOCKSPort,
)
return ports
}
// PortMap returns a human-readable mapping of ports to services
func (t *Topology) PortMap() map[int]string {
portMap := make(map[int]string)
for _, node := range t.Nodes {
portMap[node.P2PPort] = fmt.Sprintf("%s P2P", node.Name)
portMap[node.IPFSAPIPort] = fmt.Sprintf("%s IPFS API", node.Name)
portMap[node.IPFSSwarmPort] = fmt.Sprintf("%s IPFS Swarm", node.Name)
portMap[node.IPFSGatewayPort] = fmt.Sprintf("%s IPFS Gateway", node.Name)
portMap[node.RQLiteHTTPPort] = fmt.Sprintf("%s RQLite HTTP", node.Name)
portMap[node.RQLiteRaftPort] = fmt.Sprintf("%s RQLite Raft", node.Name)
portMap[node.ClusterAPIPort] = fmt.Sprintf("%s IPFS Cluster API", node.Name)
portMap[node.ClusterPort] = fmt.Sprintf("%s IPFS Cluster P2P", node.Name)
}
portMap[t.GatewayPort] = "Gateway"
portMap[t.OlricHTTPPort] = "Olric HTTP API"
portMap[t.OlricMemberPort] = "Olric Memberlist"
portMap[t.AnonSOCKSPort] = "Anon SOCKS Proxy"
return portMap
}
// GetBootstrapNodes returns only the bootstrap nodes
func (t *Topology) GetBootstrapNodes() []NodeSpec {
var bootstraps []NodeSpec
for _, node := range t.Nodes {
if node.Role == "bootstrap" {
bootstraps = append(bootstraps, node)
}
}
return bootstraps
}
// GetRegularNodes returns only the regular (non-bootstrap) nodes
func (t *Topology) GetRegularNodes() []NodeSpec {
var regulars []NodeSpec
for _, node := range t.Nodes {
if node.Role == "node" {
regulars = append(regulars, node)
}
}
return regulars
}
// GetNodeByName returns a node by its name, or nil if not found
func (t *Topology) GetNodeByName(name string) *NodeSpec {
for i, node := range t.Nodes {
if node.Name == name {
return &t.Nodes[i]
}
}
return nil
}

View File

@ -12,9 +12,9 @@ database:
shard_count: 16
max_database_size: 1073741824
backup_interval: "24h"
rqlite_port: { { .RQLiteHTTPPort } }
rqlite_raft_port: { { .RQLiteRaftPort } }
rqlite_join_address: ""
rqlite_port: {{.RQLiteHTTPPort}}
rqlite_raft_port: {{.RQLiteRaftPort}}
rqlite_join_address: "{{.RQLiteJoinAddress}}"
cluster_sync_interval: "30s"
peer_inactivity_limit: "24h"
min_cluster_size: 3
@ -26,9 +26,11 @@ database:
enable_encryption: true
discovery:
bootstrap_peers: []
bootstrap_peers:
{{range .BootstrapPeers}} - "{{.}}"
{{end}}
discovery_interval: "15s"
bootstrap_port: { { .P2PPort } }
bootstrap_port: {{.P2PPort}}
http_adv_address: "localhost:{{.RQLiteHTTPPort}}"
raft_adv_address: "localhost:{{.RQLiteRaftPort}}"
node_namespace: "default"

View File

@ -13,13 +13,15 @@ var templatesFS embed.FS
// BootstrapConfigData holds parameters for bootstrap.yaml rendering
type BootstrapConfigData struct {
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int
ClusterAPIPort int
IPFSAPIPort int // Default: 4501
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int
ClusterAPIPort int
IPFSAPIPort int // Default: 4501
BootstrapPeers []string // List of bootstrap peer multiaddrs
RQLiteJoinAddress string // Optional: join address for secondary bootstraps
}
// NodeConfigData holds parameters for node.yaml rendering

View File

@ -433,7 +433,7 @@ type ipfsDiscoveryResult struct {
}
// discoverIPFSFromNodeConfigs discovers IPFS configuration from node.yaml files
// Checks bootstrap.yaml first, then node.yaml, node2.yaml, etc.
// Checks bootstrap.yaml first, then bootstrap2.yaml, node.yaml, node2.yaml, node3.yaml, node4.yaml
func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
homeDir, err := os.UserHomeDir()
if err != nil {
@ -443,8 +443,8 @@ func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
configDir := filepath.Join(homeDir, ".debros")
// Try bootstrap.yaml first, then node.yaml, node2.yaml, etc.
configFiles := []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"}
// Try bootstrap.yaml first, then bootstrap2.yaml, node.yaml, node2.yaml, node3.yaml, node4.yaml
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"}
for _, filename := range configFiles {
configPath := filepath.Join(configDir, filename)

View File

@ -83,26 +83,18 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
}
// Determine cluster path based on data directory structure
// Check if dataDir contains specific node names (e.g., ~/.debros/bootstrap, ~/.debros/node2)
// Check if dataDir contains specific node names (e.g., ~/.debros/bootstrap, ~/.debros/bootstrap2, ~/.debros/node2-4)
clusterPath := filepath.Join(dataDir, "ipfs-cluster")
if strings.Contains(dataDir, "bootstrap") {
// Check if bootstrap is a direct child
if filepath.Base(filepath.Dir(dataDir)) == "bootstrap" || filepath.Base(dataDir) == "bootstrap" {
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
} else {
clusterPath = filepath.Join(dataDir, "bootstrap", "ipfs-cluster")
}
} else if strings.Contains(dataDir, "node2") {
if filepath.Base(filepath.Dir(dataDir)) == "node2" || filepath.Base(dataDir) == "node2" {
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
} else {
clusterPath = filepath.Join(dataDir, "node2", "ipfs-cluster")
}
} else if strings.Contains(dataDir, "node3") {
if filepath.Base(filepath.Dir(dataDir)) == "node3" || filepath.Base(dataDir) == "node3" {
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
} else {
clusterPath = filepath.Join(dataDir, "node3", "ipfs-cluster")
nodeNames := []string{"bootstrap", "bootstrap2", "node2", "node3", "node4"}
for _, nodeName := range nodeNames {
if strings.Contains(dataDir, nodeName) {
// Check if this is a direct child
if filepath.Base(filepath.Dir(dataDir)) == nodeName || filepath.Base(dataDir) == nodeName {
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
} else {
clusterPath = filepath.Join(dataDir, nodeName, "ipfs-cluster")
}
break
}
}
@ -151,14 +143,17 @@ func (cm *ClusterConfigManager) EnsureConfig() error {
// Determine node name
nodeName := cm.cfg.Node.Type
if nodeName == "node" {
if nodeName == "node" || nodeName == "bootstrap" {
// Try to extract from data dir or ID
if strings.Contains(cm.cfg.Node.DataDir, "node2") || strings.Contains(cm.cfg.Node.ID, "node2") {
nodeName = "node2"
} else if strings.Contains(cm.cfg.Node.DataDir, "node3") || strings.Contains(cm.cfg.Node.ID, "node3") {
nodeName = "node3"
} else {
nodeName = "node"
possibleNames := []string{"bootstrap", "bootstrap2", "node2", "node3", "node4"}
for _, name := range possibleNames {
if strings.Contains(cm.cfg.Node.DataDir, name) || strings.Contains(cm.cfg.Node.ID, name) {
nodeName = name
break
}
}
if nodeName == "node" || nodeName == "bootstrap" {
nodeName = cm.cfg.Node.Type
}
}

View File

@ -3,8 +3,31 @@ set -euo pipefail
echo "Force killing all processes on dev ports..."
# Define all dev ports
PORTS=(4001 4002 4003 4101 4102 4103 4501 4502 4503 5001 5002 5003 6001 7001 7002 7003 7501 7502 7503 8080 8081 8082 9094 9095 9096 9097 9104 9105 9106 9107 9114 9115 9116 9117 3320 3322 9050)
# Define all dev ports (5 nodes topology: bootstrap, bootstrap2, node2, node3, node4)
PORTS=(
# LibP2P
4001 4011 4002 4003 4004
# IPFS Swarm
4101 4111 4102 4103 4104
# IPFS API
4501 4511 4502 4503 4504
# RQLite HTTP
5001 5011 5002 5003 5004
# RQLite Raft
7001 7011 7002 7003 7004
# IPFS Gateway
7501 7511 7502 7503 7504
# Gateway
6001
# Olric
3320 3322
# Anon SOCKS
9050
# IPFS Cluster REST API
9094 9104 9114 9124 9134
# IPFS Cluster P2P
9096 9106 9116 9126 9136
)
killed_count=0
killed_pids=()
@ -35,7 +58,7 @@ for cmd in "${COMMANDS[@]}"; do
if [[ -n "$all_pids" ]]; then
for pid in $all_pids; do
# Check if this process is using any of our dev ports
port_match=$(lsof -nP -p "$pid" -iTCP 2>/dev/null | grep -E ":(400[1-3]|410[1-3]|450[1-3]|500[1-3]|6001|700[1-3]|750[1-3]|808[0-2]|909[4-7]|910[4-7]|911[4-7]|332[02]|9050)" || true)
port_match=$(lsof -nP -p "$pid" -iTCP 2>/dev/null | grep -E ":(400[1-4]|401[1-1]|410[1-4]|411[1-1]|450[1-4]|451[1-1]|500[1-4]|501[1-1]|600[1-1]|700[1-4]|701[1-1]|750[1-4]|751[1-1]|332[02]|9050|909[4-9]|910[4-9]|911[4-9]|912[4-9]|913[4-9]|909[6-9]|910[6-9]|911[6-9]|912[6-9]|913[6-9])" || true)
if [[ -n "$port_match" ]]; then
echo "Killing orphaned $cmd process (PID: $pid) using dev ports"
kill -9 "$pid" 2>/dev/null || true