From 3196e91e8562975791c1e90848c85a456e72b55a Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Mon, 3 Nov 2025 15:30:08 +0200 Subject: [PATCH 01/57] feat: integrate Olric distributed cache support - Added Olric cache server integration, including configuration options for Olric servers and timeout settings. - Implemented HTTP handlers for cache operations: health check, get, put, delete, and scan. - Enhanced Makefile with commands to run the Olric server and manage its configuration. - Updated README and setup scripts to include Olric installation and configuration instructions. - Introduced tests for cache handlers to ensure proper functionality and error handling. --- CHANGELOG.md | 17 + Makefile | 2 +- README.md | 1003 +++------------------------- cmd/gateway/config.go | 17 + go.mod | 27 +- go.sum | 161 +++++ pkg/cli/setup.go | 233 ++++++- pkg/config/config.go | 8 + pkg/gateway/cache_handlers.go | 356 ++++++++++ pkg/gateway/cache_handlers_test.go | 202 ++++++ pkg/gateway/gateway.go | 146 ++++ pkg/gateway/routes.go | 7 + pkg/olric/client.go | 103 +++ scripts/install-debros-network.sh | 44 ++ 14 files changed, 1409 insertions(+), 917 deletions(-) create mode 100644 pkg/gateway/cache_handlers.go create mode 100644 pkg/gateway/cache_handlers_test.go create mode 100644 pkg/olric/client.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 62ed13e..ac7a4f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,23 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed +## [0.54.0] - 2025-11-03 + +### Added +- Integrated Olric distributed cache for high-speed key-value storage and caching. +- Added new HTTP Gateway endpoints for cache operations (GET, PUT, DELETE, SCAN) via `/v1/cache/`. +- Added `olric_servers` and `olric_timeout` configuration options to the Gateway. +- Updated the automated installation script (`install-debros-network.sh`) to include Olric installation, configuration, and firewall rules (ports 3320, 3322). + +### Changed +- Refactored README for better clarity and organization, focusing on quick start and core features. + +### Deprecated + +### Removed + +### Fixed +\n ## [0.53.18] - 2025-11-03 ### Added diff --git a/Makefile b/Makefile index 22f1d5c..03a1f0b 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ test-e2e: .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks -VERSION := 0.53.18 +VERSION := 0.54.0 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' diff --git a/README.md b/README.md index f0826fd..54fe138 100644 --- a/README.md +++ b/README.md @@ -1,966 +1,155 @@ # DeBros Network - Distributed P2P Database System -A robust, decentralized peer-to-peer network built in Go, providing distributed SQL database, key-value storage, pub/sub messaging, and resilient peer management. Designed for applications needing reliable, scalable, and secure data sharing without centralized infrastructure. - ---- +DeBros Network is a decentralized peer-to-peer data platform built in Go. It combines distributed SQL (RQLite), pub/sub messaging, and resilient peer discovery so applications can share state without central infrastructure. ## Table of Contents -- [Features](#features) -- [Architecture Overview](#architecture-overview) -- [System Requirements](#system-requirements) +- [At a Glance](#at-a-glance) - [Quick Start](#quick-start) -- [Deployment & Installation](#deployment--installation) -- [Configuration](#configuration) -- [CLI Usage](#cli-usage) +- [Components & Ports](#components--ports) +- [Configuration Cheatsheet](#configuration-cheatsheet) +- [CLI Highlights](#cli-highlights) - [HTTP Gateway](#http-gateway) -- [Development](#development) -- [Database Client (Go ORM-like)](#database-client-go-orm-like) - [Troubleshooting](#troubleshooting) -- [License](#license) +- [Resources](#resources) ---- +## At a Glance -## Features - -- **Distributed SQL Database:** RQLite-backed, Raft-consensus, ACID transactions, automatic failover. -- **Pub/Sub Messaging:** Topic-based, real-time, namespaced, automatic cleanup. -- **Peer Discovery & Management:** Nodes discover peers, bootstrap support, health monitoring. -- **Application Isolation:** Namespace-based multi-tenancy, per-app config. -- **Secure by Default:** Noise/TLS transport, peer identity, systemd hardening. -- **Simple Client API:** Lightweight Go client for apps and CLI tools. - ---- - -## Architecture Overview - -``` -┌─────────────────────────────────────────────────────────────┐ -│ DeBros Network Cluster │ -├─────────────────────────────────────────────────────────────┤ -│ Application Layer │ -│ ┌─────────────┐ ┌─────────────┐ ┌────────────────────────┐ │ -│ │ Anchat │ │ Custom App │ │ CLI Tools │ │ -│ └─────────────┘ └─────────────┘ └────────────────────────┘ │ -├─────────────────────────────────────────────────────────────┤ -│ Client API │ -│ ┌─────────────┐ ┌────────────────────────┐ │ -│ │ Database │ │ PubSub │ │ -│ │ Client │ │ Client │ │ -│ └─────────────┘ └────────────────────────┘ │ -├─────────────────────────────────────────────────────────────┤ -│ Network Node Layer │ -│ ┌─────────────┐ ┌─────────────┐ ┌────────────────────────┐ │ -│ │ Discovery │ │ PubSub │ │ Database │ │ -│ │ Manager │ │ Manager │ │ (RQLite) │ │ -│ └─────────────┘ └─────────────┘ └────────────────────────┘ │ -├─────────────────────────────────────────────────────────────┤ -│ Transport Layer │ -│ ┌─────────────┐ ┌─────────────┐ ┌────────────────────────┐ │ -│ │ LibP2P │ │ Noise/TLS │ │ RQLite │ │ -│ │ Host │ │ Encryption │ │ Database │ │ -│ └─────────────┘ └─────────────┘ └────────────────────────┘ │ -└─────────────────────────────────────────────────────────────┘ -``` - -- **Node:** Full P2P participant, runs services, handles peer discovery, database, pubsub. -- **Client:** Lightweight, connects only to bootstrap peers, consumes services, no peer discovery. - ---- - -## System Requirements - -### Software - -- **Go:** 1.21+ (recommended) -- **RQLite:** 8.x (distributed SQLite) -- **Git:** For source management -- **Make:** For build automation (recommended) - -### Hardware - -- **Minimum:** 2 CPU cores, 4GB RAM, 10GB disk, stable internet -- **Recommended:** 4+ cores, 8GB+ RAM, 50GB+ SSD, low-latency network - -### Network Ports - -- **4001:** LibP2P P2P communication -- **5001:** RQLite HTTP API -- **7001:** RQLite Raft consensus - -### Filesystem Permissions - -DeBros Network stores all configuration and data in `~/.debros/` directory. Ensure you have: - -- **Read/Write access** to your home directory (`~`) -- **Available disk space**: At least 10GB for database and logs -- **No restrictive mount options**: The home directory must not be mounted read-only -- **Unix permissions**: Standard user permissions are sufficient (no root/sudo required) - -#### Directory Structure - -DeBros automatically creates the following directory structure: - -``` -~/.debros/ -├── bootstrap.yaml # Bootstrap node config -├── node.yaml # Node config -├── gateway.yaml # Gateway config -├── bootstrap/ # Bootstrap node data (auto-created) -│ ├── rqlite/ # RQLite database files -│ │ ├── db.sqlite # Main database -│ │ ├── raft/ # Raft consensus data -│ │ └── rsnapshots/ # Raft snapshots -│ ├── peer.info # Node multiaddr (created at startup) -│ └── identity.key # Node private key (created at startup) -├── node/ # Node data (auto-created) -│ ├── rqlite/ # RQLite database files -│ ├── raft/ # Raft data -│ ├── peer.info # Node multiaddr (created at startup) -│ └── identity.key # Node private key (created at startup) -└── node2/ # Additional node configs (if running multiple) - └── rqlite/ # RQLite database files -``` - -**Files Created at Startup:** -- `identity.key` - LibP2P private key for the node (generated once, reused) -- `peer.info` - The node's multiaddr (e.g., `/ip4/0.0.0.0/tcp/4001/p2p/12D3KooW...`) - -**Automatic Creation**: The node automatically creates all necessary data directories when started. You only need to ensure: -1. `~/.debros/` is writable -2. Sufficient disk space available -3. Correct config files exist - -**Permission Check:** - -```bash -# Verify home directory is writable -touch ~/test-write && rm ~/test-write && echo "✓ Home directory is writable" - -# Check available disk space -df -h ~ -``` - -**If you get permission errors:** - -``` -Error: Failed to create/access config directory -Please ensure: - 1. Home directory is accessible - 2. You have write permissions to home directory - 3. Disk space is available -``` - -**Solution:** - -- Ensure you're not running with overly restrictive umask: `umask` should show `0022` or similar -- Check home directory permissions: `ls -ld ~` should show your user as owner -- For sandboxed/containerized environments: Ensure `/home/` is writable - ---- +- Distributed SQL backed by RQLite and Raft consensus +- Topic-based pub/sub with automatic cleanup +- Namespace isolation for multi-tenant apps +- Secure transport using libp2p plus Noise/TLS +- Lightweight Go client and CLI tooling ## Quick Start -### 1. Clone and Setup +1. Clone and build the project: -```bash -git clone https://github.com/DeBrosOfficial/network.git -cd network -``` + ```bash + git clone https://github.com/DeBrosOfficial/network.git + cd network + make build + ``` -### 2. Build All Executables +2. Generate local configuration (bootstrap, node2, node3, gateway): -```bash -make build -``` + ```bash + ./bin/network-cli config init + ``` -### 3. Generate Configuration Files +3. Launch the full development stack: -```bash -# Generate all configs (bootstrap, node2, node3, gateway) with one command -./bin/network-cli config init -``` + ```bash + make dev + ``` -This creates: -- `~/.debros/bootstrap.yaml` - Bootstrap node -- `~/.debros/node2.yaml` - Regular node 2 -- `~/.debros/node3.yaml` - Regular node 3 -- `~/.debros/gateway.yaml` - HTTP Gateway + This starts three nodes and the HTTP gateway. Stop with `Ctrl+C`. -Plus auto-generated identities for each node. +4. Validate the network from another terminal: -### 4. Start the Complete Network Stack + ```bash + ./bin/network-cli health + ./bin/network-cli peers + ./bin/network-cli pubsub publish notifications "Hello World" + ./bin/network-cli pubsub subscribe notifications 10s + ``` -```bash -make dev -``` +## Components & Ports -This starts: -- Bootstrap node (P2P: 4001, RQLite HTTP: 5001, Raft: 7001) -- Node 2 (P2P: 4002, RQLite HTTP: 5002, Raft: 7002) -- Node 3 (P2P: 4003, RQLite HTTP: 5003, Raft: 7003) -- Gateway (HTTP: 6001) +- **Bootstrap node**: P2P `4001`, RQLite HTTP `5001`, Raft `7001` +- **Additional nodes** (`node2`, `node3`): Incrementing ports (`400{2,3}`, `500{2,3}`, `700{2,3}`) +- **Gateway**: HTTP `6001` exposes REST/WebSocket APIs +- **Data directory**: `~/.debros/` stores configs, identities, and RQLite data -Logs stream to terminal. Press **Ctrl+C** to stop all processes. +Use `make dev` for the complete stack or run binaries individually with `go run ./cmd/node --config ` and `go run ./cmd/gateway --config gateway.yaml`. -### 5. Test with CLI (in another terminal) +## Configuration Cheatsheet -```bash -./bin/network-cli health -./bin/network-cli peers -./bin/network-cli pubsub publish notifications "Hello World" -./bin/network-cli pubsub subscribe notifications 10s -``` +All runtime configuration lives in `~/.debros/`. ---- +- `bootstrap.yaml`: `type: bootstrap`, blank `database.rqlite_join_address` +- `node*.yaml`: `type: node`, set `database.rqlite_join_address` (e.g. `127.0.0.1:7001`) and include the bootstrap `discovery.bootstrap_peers` +- `gateway.yaml`: configure `gateway.bootstrap_peers`, `gateway.namespace`, and optional auth flags -## Deployment & Installation +Validation reminders: -### Automated Production Install +- HTTP and Raft ports must differ +- Non-bootstrap nodes require a join address and bootstrap peers +- Bootstrap nodes cannot define a join address +- Multiaddrs must end with `/p2p/` -Run the install script for a secure, production-ready setup: +Regenerate configs any time with `./bin/network-cli config init --force`. -```bash -curl -sSL https://github.com/DeBrosOfficial/network/raw/main/scripts/install-debros-network.sh | sudo bash -``` +## CLI Highlights -**What the Script Does:** +All commands accept `--format json`, `--timeout `, and `--bootstrap `. -- Detects OS, installs Go, RQLite, dependencies -- Creates `debros` system user, secure directory structure -- Generates LibP2P identity keys -- Clones source, builds binaries -- Sets up systemd service (`debros-node`) -- Configures firewall (UFW) for required ports -- Generates YAML config in `/opt/debros/configs/node.yaml` +- **Auth** -**Directory Structure:** + ```bash + ./bin/network-cli auth login + ./bin/network-cli auth status + ./bin/network-cli auth logout + ``` -``` -/opt/debros/ -├── bin/ # Binaries -├── configs/ # YAML configs -├── keys/ # Identity keys -├── data/ # RQLite DB, storage -├── logs/ # Node logs -├── src/ # Source code -``` +- **Network** -**Service Management:** + ```bash + ./bin/network-cli health + ./bin/network-cli status + ./bin/network-cli peers + ``` -```bash -sudo systemctl status debros-node -sudo systemctl start debros-node -sudo systemctl stop debros-node -sudo systemctl restart debros-node -sudo journalctl -u debros-node.service -f -``` +- **Database** ---- + ```bash + ./bin/network-cli query "SELECT * FROM users" + ./bin/network-cli query "CREATE TABLE users (id INTEGER PRIMARY KEY)" + ./bin/network-cli transaction --file ops.json + ``` -## Configuration +- **Pub/Sub** -### Configuration Files Location + ```bash + ./bin/network-cli pubsub publish + ./bin/network-cli pubsub subscribe 30s + ./bin/network-cli pubsub topics + ``` -All configuration files are stored in `~/.debros/` for both local development and production deployments: - -- `~/.debros/node.yaml` - Node configuration -- `~/.debros/node.yaml` - Bootstrap node configuration -- `~/.debros/gateway.yaml` - Gateway configuration - -The system will **only** load config from `~/.debros/` and will error if required config files are missing. - -### Generating Configuration Files - -Use the `network-cli config init` command to generate configuration files: - -### Generate Complete Stack (Recommended) - -```bash -# Generate bootstrap, node2, node3, and gateway configs in one command -./bin/network-cli config init - -# Force regenerate (overwrites existing configs) -./bin/network-cli config init --force -``` - -This is the **recommended way** to get started with a local development network. - -### Generate Individual Configs (Advanced) - -For custom setups or production deployments, you can generate individual configs: - -#### Generate a Single Node Config - -```bash -# Generate basic node config with bootstrap peers -./bin/network-cli config init --type node --bootstrap-peers "/ip4/127.0.0.1/tcp/4001/p2p/QmXxx" - -# With custom ports -./bin/network-cli config init --type node --name node2.yaml \ - --listen-port 4002 --rqlite-http-port 5002 --rqlite-raft-port 7002 \ - --join localhost:5001 --bootstrap-peers "/ip4/127.0.0.1/tcp/4001/p2p/QmXxx" - -# Force overwrite existing config -./bin/network-cli config init --type node --force -``` - -#### Generate a Bootstrap Node Config - -```bash -# Generate bootstrap node (no join address required) -./bin/network-cli config init --type bootstrap - -# With custom ports -./bin/network-cli config init --type bootstrap --listen-port 4001 --rqlite-http-port 5001 --rqlite-raft-port 7001 -``` - -#### Generate a Gateway Config - -```bash -# Generate gateway config -./bin/network-cli config init --type gateway - -# With bootstrap peers -./bin/network-cli config init --type gateway --bootstrap-peers "/ip4/127.0.0.1/tcp/4001/p2p/QmXxx" -``` - -### Running the Network - -Once configs are generated, start the complete stack with: - -```bash -make dev -``` - -Or start individual components (in separate terminals): - -```bash -# Terminal 1 - Bootstrap node -go run ./cmd/node --config bootstrap.yaml - -# Terminal 2 - Node 2 -go run ./cmd/node --config node2.yaml - -# Terminal 3 - Node 3 -go run ./cmd/node --config node3.yaml - -# Terminal 4 - Gateway -go run ./cmd/gateway --config gateway.yaml -``` - -### Running Multiple Nodes on the Same Machine - -The default `make dev` creates a 3-node setup. For additional nodes, generate individual configs: - -```bash -# Generate additional node configs with unique ports -./bin/network-cli config init --type node --name node4.yaml \ - --listen-port 4004 --rqlite-http-port 5004 --rqlite-raft-port 7004 \ - --join localhost:5001 \ - --bootstrap-peers "/ip4/127.0.0.1/tcp/4001/p2p/" - -# Start the additional node -go run ./cmd/node --config node4.yaml -``` - -#### Key Points for Multiple Nodes - -- **Each node needs unique ports**: P2P port, RQLite HTTP port, and RQLite Raft port must all be different -- **Join address**: Non-bootstrap nodes need `rqlite_join_address` pointing to the bootstrap or an existing node (use Raft port) -- **Bootstrap peers**: All nodes need the bootstrap node's multiaddr in `discovery.bootstrap_peers` -- **Config files**: Store all configs in `~/.debros/` with different filenames -- **--config flag**: Specify which config file to load - -⚠️ **Common Mistake - Same Ports:** -If all nodes use the same ports (e.g., 5001, 7001), they will try to bind to the same addresses and fail to communicate. Verify each node has unique ports: - -```bash -# Bootstrap -grep "rqlite_port\|rqlite_raft_port" ~/.debros/bootstrap.yaml -# Should show: rqlite_port: 5001, rqlite_raft_port: 7001 - -# Node 2 -grep "rqlite_port\|rqlite_raft_port" ~/.debros/node2.yaml -# Should show: rqlite_port: 5002, rqlite_raft_port: 7002 - -# Node 3 -grep "rqlite_port\|rqlite_raft_port" ~/.debros/node3.yaml -# Should show: rqlite_port: 5003, rqlite_raft_port: 7003 -``` - -If ports are wrong, regenerate the config with `--force`: - -```bash -./bin/network-cli config init --type node --name node.yaml \ - --listen-port 4002 --rqlite-http-port 5002 --rqlite-raft-port 7002 \ - --join localhost:5001 --bootstrap-peers '' --force -``` - -### Validating Configuration - -DeBros Network performs strict validation of all configuration files at startup. This ensures invalid configurations are caught immediately rather than causing silent failures later. - -#### Validation Features - -- **Strict YAML Parsing:** Unknown configuration keys are rejected with helpful error messages -- **Format Validation:** Multiaddrs, ports, durations, and other formats are validated for correctness -- **Cross-Field Validation:** Configuration constraints (e.g., bootstrap nodes don't join clusters) are enforced -- **Aggregated Error Reporting:** All validation errors are reported together, not one-by-one - -#### Common Validation Errors - -**Missing or Invalid `node.type`** -``` -node.type: must be one of [bootstrap node]; got "invalid" -``` -Solution: Set `type: "bootstrap"` or `type: "node"` - -**Invalid Bootstrap Peer Format** -``` -discovery.bootstrap_peers[0]: invalid multiaddr; expected /ip{4,6}/.../tcp//p2p/ -discovery.bootstrap_peers[0]: missing /p2p/ component -``` -Solution: Use full multiaddr format: `/ip4/127.0.0.1/tcp/4001/p2p/12D3KooW...` - -**Port Conflicts** -``` -database.rqlite_raft_port: must differ from database.rqlite_port (5001) -``` -Solution: Use different ports for HTTP and Raft (e.g., 5001 and 7001) - -**RQLite Join Address Issues (Nodes)** -``` -database.rqlite_join_address: required for node type (non-bootstrap) -database.rqlite_join_address: invalid format; expected host:port -``` -Solution: Non-bootstrap nodes must specify where to join the cluster. Use Raft port: `127.0.0.1:7001` - -**Bootstrap Nodes Cannot Join** -``` -database.rqlite_join_address: must be empty for bootstrap type -``` -Solution: Bootstrap nodes should have `rqlite_join_address: ""` - -**Invalid Listen Addresses** -``` -node.listen_addresses[0]: invalid TCP port 99999; port must be between 1 and 65535 -``` -Solution: Use valid ports [1-65535], e.g., `/ip4/0.0.0.0/tcp/4001` - -**Unknown Configuration Keys** -``` -invalid config: yaml: unmarshal errors: - line 42: field migrations_path not found in type config.DatabaseConfig -``` -Solution: Remove unsupported keys. Supported keys are documented in the YAML Reference section above. - ---- - -## CLI Usage - -### Authentication Commands - -```bash -./bin/network-cli auth login # Authenticate with wallet -./bin/network-cli auth whoami # Show current authentication status -./bin/network-cli auth status # Show detailed authentication info -./bin/network-cli auth logout # Clear stored credentials -``` - -### Network Operations - -```bash -./bin/network-cli health # Check network health -./bin/network-cli status # Get network status -./bin/network-cli peers # List connected peers -``` - -### Database Operations - -```bash -./bin/network-cli query "SELECT * FROM table" # Execute SQL -./bin/network-cli query "CREATE TABLE users (id INTEGER)" # DDL operations -``` - -### Pub/Sub Messaging - -```bash -./bin/network-cli pubsub publish # Send message -./bin/network-cli pubsub subscribe [duration] # Listen for messages -./bin/network-cli pubsub topics # List active topics -``` - -### CLI Options - -```bash ---format json # Output in JSON format ---timeout 30s # Set operation timeout ---bootstrap # Override bootstrap peer ---production # Use production bootstrap peers -``` - -### Database Operations (Gateway REST) - -```http -POST /v1/rqlite/exec # Body: {"sql": "INSERT/UPDATE/DELETE/DDL ...", "args": [...]} -POST /v1/rqlite/find # Body: {"table":"...", "criteria":{"col":val,...}, "options":{...}} -POST /v1/rqlite/find-one # Body: same as /find, returns a single row (404 if not found) -POST /v1/rqlite/select # Body: {"table":"...", "select":[...], "where":[...], "joins":[...], "order_by":[...], "limit":N, "offset":N, "one":false} -POST /v1/rqlite/transaction # Body: {"ops":[{"kind":"exec|query","sql":"...","args":[...]}], "return_results": true} -POST /v1/rqlite/query # Body: {"sql": "SELECT ...", "args": [..]} (legacy-friendly SELECT) -GET /v1/rqlite/schema # Returns tables/views + create SQL -POST /v1/rqlite/create-table # Body: {"schema": "CREATE TABLE ..."} -POST /v1/rqlite/drop-table # Body: {"table": "table_name"} -``` - -Common workflows: - -```bash -# Exec (INSERT/UPDATE/DELETE/DDL) -curl -X POST "$GW/v1/rqlite/exec" \ - -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{"sql":"INSERT INTO users(name,email) VALUES(?,?)","args":["Alice","alice@example.com"]}' - -# Find (criteria + options) -curl -X POST "$GW/v1/rqlite/find" \ - -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{ - "table":"users", - "criteria":{"active":true}, - "options":{"select":["id","email"],"order_by":["created_at DESC"],"limit":25} - }' - -# Select (fluent builder via JSON) -curl -X POST "$GW/v1/rqlite/select" \ - -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{ - "table":"orders o", - "select":["o.id","o.total","u.email AS user_email"], - "joins":[{"kind":"INNER","table":"users u","on":"u.id = o.user_id"}], - "where":[{"conj":"AND","expr":"o.total > ?","args":[100]}], - "order_by":["o.created_at DESC"], - "limit":10 - }' - -# Transaction (atomic batch) -curl -X POST "$GW/v1/rqlite/transaction" \ - -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{ - "return_results": true, - "ops": [ - {"kind":"exec","sql":"INSERT INTO users(email) VALUES(?)","args":["bob@example.com"]}, - {"kind":"query","sql":"SELECT last_insert_rowid() AS id","args":[]} - ] - }' - -# Schema -curl "$GW/v1/rqlite/schema" -H "Authorization: Bearer $API_KEY" - -# DDL helpers -curl -X POST "$GW/v1/rqlite/create-table" -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{"schema":"CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)"}' -curl -X POST "$GW/v1/rqlite/drop-table" -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{"table":"users"}' -``` - -### Authentication - -The CLI features an enhanced authentication system with explicit command support and automatic wallet detection: - -#### Explicit Authentication Commands - -Use the `auth` command to manage your credentials: - -```bash -# Authenticate with your wallet (opens browser for signature) -./bin/network-cli auth login - -# Check if you're authenticated -./bin/network-cli auth whoami - -# View detailed authentication info -./bin/network-cli auth status - -# Clear all stored credentials -./bin/network-cli auth logout -``` - -Credentials are stored securely in `~/.debros/credentials.json` with restricted file permissions (readable only by owner). - -#### Key Features - -- **Explicit Authentication:** Use `auth login` command to authenticate with your wallet -- **Automatic Authentication:** Commands that require auth (query, pubsub, etc.) automatically prompt if needed -- **Multi-Wallet Management:** Seamlessly switch between multiple wallet credentials -- **Persistent Sessions:** Wallet credentials are automatically saved and restored between sessions -- **Enhanced User Experience:** Streamlined authentication flow with better error handling and user feedback - -#### Automatic Authentication Flow - -When using operations that require authentication (query, pubsub publish/subscribe), the CLI will automatically: - -1. Check for existing valid credentials -2. Prompt for wallet authentication if needed -3. Handle signature verification -4. Persist credentials for future use - -**Example with automatic authentication:** - -```bash -# First time - will prompt for wallet authentication when needed -./bin/network-cli pubsub publish notifications "Hello World" -``` - -#### Environment Variables - -You can override the gateway URL used for authentication: - -```bash -export DEBROS_GATEWAY_URL="http://localhost:6001" -./bin/network-cli auth login -``` - ---- +Credentials live at `~/.debros/credentials.json` with user-only permissions. ## HTTP Gateway -The DeBros Network includes a powerful HTTP/WebSocket gateway that provides a modern REST API and WebSocket interface over the P2P network, featuring an enhanced authentication system with multi-wallet support. +Start locally with `make run-gateway` or `go run ./cmd/gateway --config gateway.yaml`. -### Quick Start +Environment overrides: ```bash -make run-gateway -# Or manually: -go run ./cmd/gateway -``` - -### Configuration - -The gateway can be configured via configs/gateway.yaml and environment variables (env override YAML): - -```bash -# Basic Configuration export GATEWAY_ADDR="0.0.0.0:6001" export GATEWAY_NAMESPACE="my-app" -export GATEWAY_BOOTSTRAP_PEERS="/ip4/127.0.0.1/tcp/4001/p2p/YOUR_PEER_ID" - -# Authentication Configuration +export GATEWAY_BOOTSTRAP_PEERS="/ip4/127.0.0.1/tcp/4001/p2p/" export GATEWAY_REQUIRE_AUTH=true export GATEWAY_API_KEYS="key1:namespace1,key2:namespace2" ``` -### Enhanced Authentication System +Common endpoints (see `openapi/gateway.yaml` for the full spec): -The gateway features a significantly improved authentication system with the following capabilities: - -#### Key Features - -- **Automatic Authentication:** No manual auth commands required - authentication happens automatically when needed -- **Multi-Wallet Support:** Seamlessly manage multiple wallet credentials with automatic switching -- **Persistent Sessions:** Wallet credentials are automatically saved and restored -- **Enhanced User Experience:** Streamlined authentication flow with better error handling - -#### Authentication Methods - -**Wallet-Based Authentication (Ethereum EIP-191)** - -- Uses `personal_sign` for secure wallet verification -- Supports multiple wallets with automatic detection -- Addresses are case-insensitive with normalized signature handling - -**JWT Tokens** - -- Issued by the gateway with configurable expiration -- JWKS endpoints available at `/v1/auth/jwks` and `/.well-known/jwks.json` -- Automatic refresh capability - -**API Keys** - -- Support for pre-configured API keys via `Authorization: Bearer ` or `X-API-Key` headers -- Optional namespace mapping for multi-tenant applications - -### API Endpoints - -#### Health & Status - -```http -GET /health # Basic health check -GET /v1/health # Detailed health status -GET /v1/status # Network status -GET /v1/version # Version information -``` - -#### Authentication (Public Endpoints) - -```http -POST /v1/auth/challenge # Generate wallet challenge -POST /v1/auth/verify # Verify wallet signature -POST /v1/auth/register # Register new wallet -POST /v1/auth/refresh # Refresh JWT token -POST /v1/auth/logout # Clear authentication -GET /v1/auth/whoami # Current auth status -POST /v1/auth/api-key # Generate API key (authenticated) -``` - -#### RQLite HTTP ORM Gateway (/v1/db) - -The gateway now exposes a full HTTP interface over the Go ORM-like client (see `pkg/rqlite/gateway.go`) so you can build SDKs in any language. - -- Base path: `/v1/db` -- Endpoints: - - `POST /v1/rqlite/exec` — Execute write/DDL SQL; returns `{ rows_affected, last_insert_id }` - - `POST /v1/rqlite/find` — Map-based criteria; returns `{ items: [...], count: N }` - - `POST /v1/rqlite/find-one` — Single row; 404 if not found - - `POST /v1/rqlite/select` — Fluent SELECT via JSON (joins, where, order, group, limit, offset) - - `POST /v1/rqlite/transaction` — Atomic batch of exec/query ops, optional per-op results - - `POST /v1/rqlite/query` — Arbitrary SELECT (legacy-friendly), returns `items` - - `GET /v1/rqlite/schema` — List user tables/views + create SQL - - `POST /v1/rqlite/create-table` — Convenience for DDL - - `POST /v1/rqlite/drop-table` — Safe drop (identifier validated) - -Payload examples are shown in the [Database Operations (Gateway REST)](#database-operations-gateway-rest) section. - -#### Network Operations - -```http -GET /v1/network/status # Network status -GET /v1/network/peers # Connected peers -POST /v1/network/connect # Connect to peer -POST /v1/network/disconnect # Disconnect from peer -``` - -#### Pub/Sub Messaging - -**WebSocket Interface** - -```http -GET /v1/pubsub/ws?topic= # WebSocket connection for real-time messaging -``` - -**REST Interface** - -```http -POST /v1/pubsub/publish # Publish message to topic -GET /v1/pubsub/topics # List active topics -``` - ---- - -## SDK Authoring Guide - -### Base concepts - -- OpenAPI: a machine-readable spec is available at `openapi/gateway.yaml` for SDK code generation. -- **Auth**: send `X-API-Key: ` or `Authorization: Bearer ` with every request. -- **Versioning**: all endpoints are under `/v1/`. -- **Responses**: mutations return `{status:"ok"}`; queries/lists return JSON; errors return `{ "error": "message" }` with proper HTTP status. - -### Key HTTP endpoints for SDKs - -- **Database** - - Exec: `POST /v1/rqlite/exec` `{sql, args?}` → `{rows_affected,last_insert_id}` - - Find: `POST /v1/rqlite/find` `{table, criteria, options?}` → `{items,count}` - - FindOne: `POST /v1/rqlite/find-one` `{table, criteria, options?}` → single object or 404 - - Select: `POST /v1/rqlite/select` `{table, select?, joins?, where?, order_by?, group_by?, limit?, offset?, one?}` - - Transaction: `POST /v1/rqlite/transaction` `{ops:[{kind,sql,args?}], return_results?}` - - Query: `POST /v1/rqlite/query` `{sql, args?}` → `{items,count}` - - Schema: `GET /v1/rqlite/schema` - - Create Table: `POST /v1/rqlite/create-table` `{schema}` - - Drop Table: `POST /v1/rqlite/drop-table` `{table}` -- **PubSub** - - WS Subscribe: `GET /v1/pubsub/ws?topic=` - - Publish: `POST /v1/pubsub/publish` `{topic, data_base64}` → `{status:"ok"}` - - Topics: `GET /v1/pubsub/topics` → `{topics:[...]}` - ---- +- `GET /health`, `GET /v1/status`, `GET /v1/version` +- `POST /v1/auth/challenge`, `POST /v1/auth/verify`, `POST /v1/auth/refresh` +- `POST /v1/rqlite/exec`, `POST /v1/rqlite/find`, `POST /v1/rqlite/select`, `POST /v1/rqlite/transaction` +- `GET /v1/rqlite/schema` +- `POST /v1/pubsub/publish`, `GET /v1/pubsub/topics`, `GET /v1/pubsub/ws?topic=` ## Troubleshooting -### Configuration & Permissions +- **Config directory errors**: Ensure `~/.debros/` exists, is writable, and has free disk space (`touch ~/.debros/test && rm ~/.debros/test`). +- **Port conflicts**: Inspect with `lsof -i :4001` (or other ports) and stop conflicting processes or regenerate configs with new ports. +- **Missing configs**: Run `./bin/network-cli config init` before starting nodes. +- **Cluster join issues**: Confirm the bootstrap node is running, `peer.info` multiaddr matches `bootstrap_peers`, and firewall rules allow the P2P ports. -**Error: "Failed to create/access config directory"** +## Resources -This happens when DeBros cannot access or create `~/.debros/` directory. - -**Causes:** -1. Home directory is not writable -2. Home directory doesn't exist -3. Filesystem is read-only (sandboxed/containerized environment) -4. Permission denied (running with wrong user/umask) - -**Solutions:** - -```bash -# Check home directory exists and is writable -ls -ld ~ -touch ~/test-write && rm ~/test-write - -# Check umask (should be 0022 or 0002) -umask - -# If umask is too restrictive, change it -umask 0022 - -# Check disk space -df -h ~ - -# For containerized environments, ensure /home/ is mounted with write permissions -docker run -v /home:/home --user $(id -u):$(id -g) debros-network -``` - -**Error: "Config file not found at ~/.debros/node.yaml"** - -The node requires a config file to exist before starting. - -**Solution:** - -Generate config files first: - -```bash -# Build CLI -make build - -# Generate configs -./bin/network-cli config init --type bootstrap -./bin/network-cli config init --type node --bootstrap-peers '' -./bin/network-cli config init --type gateway -``` - -### Node Startup Issues - -**Error: "node.data_dir: parent directory not writable"** - -The data directory parent is not accessible. - -**Solution:** - -Ensure `~/.debros` is writable and has at least 10GB free space: - -```bash -# Check permissions -ls -ld ~/.debros - -# Check available space -df -h ~/.debros - -# Recreate if corrupted -rm -rf ~/.debros -./bin/network-cli config init --type bootstrap -``` - -**Error: "failed to create data directory"** - -The node cannot create its data directory in `~/.debros`. - -**Causes:** -1. `~/.debros` is not writable -2. Parent directory path in config uses `~` which isn't expanded properly -3. Disk is full - -**Solutions:** - -```bash -# Check ~/.debros exists and is writable -mkdir -p ~/.debros -ls -ld ~/.debros - -# Verify data_dir in config uses ~ (e.g., ~/.debros/node) -cat ~/.debros/node.yaml | grep data_dir - -# Check disk space -df -h ~ - -# Ensure user owns ~/.debros -chown -R $(whoami) ~/.debros - -# Retry node startup -make run-node -``` - -**Error: "stat ~/.debros: no such file or directory"** - -**Port Already in Use** - -If you get "address already in use" errors: - -```bash -# Find processes using ports -lsof -i :4001 # P2P port -lsof -i :5001 # RQLite HTTP -lsof -i :7001 # RQLite Raft - -# Kill if needed -kill -9 - -# Or use different ports in config -./bin/network-cli config init --type node --listen-port 4002 --rqlite-http-port 5002 --rqlite-raft-port 7002 -``` - -### Common Configuration Errors - -**Error: "discovery.bootstrap_peers: required for node type"** - -Nodes (non-bootstrap) must specify bootstrap peers to discover the network. - -**Solution:** - -Generate node config with bootstrap peers: - -```bash -./bin/network-cli config init --type node --bootstrap-peers '/ip4/127.0.0.1/tcp/4001/p2p/12D3KooW...' -``` - -**Error: "database.rqlite_join_address: required for node type"** - -Non-bootstrap nodes must specify which node to join in the Raft cluster. - -**Solution:** - -Generate config with join address: - -```bash -./bin/network-cli config init --type node --join localhost:5001 -``` - -**Error: "database.rqlite_raft_port: must differ from database.rqlite_port"** - -HTTP and Raft ports cannot be the same. - -**Solution:** - -Use different ports (RQLite HTTP and Raft must be on different ports): - -```bash -./bin/network-cli config init --type node \ - --rqlite-http-port 5001 \ - --rqlite-raft-port 7001 -``` - -### Peer Discovery Issues - -If nodes can't find each other: - -1. **Verify bootstrap node is running:** - ```bash - ./bin/network-cli health - ./bin/network-cli peers - ``` - -2. **Check bootstrap peer multiaddr is correct:** - ```bash - cat ~/.debros/bootstrap/peer.info # On bootstrap node - # Should match value in other nodes' discovery.bootstrap_peers - ``` - -3. **Ensure all nodes have same bootstrap peers in config** - -4. **Check firewall/network:** - ```bash - # Verify P2P port is open - nc -zv 127.0.0.1 4001 - ``` - ---- - -## License \ No newline at end of file +- Go modules: `go mod tidy`, `go test ./...` +- Automation: `make build`, `make dev`, `make run-gateway`, `make lint` +- API reference: `openapi/gateway.yaml` +- Code of Conduct: [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) diff --git a/cmd/gateway/config.go b/cmd/gateway/config.go index e10763c..d8d1864 100644 --- a/cmd/gateway/config.go +++ b/cmd/gateway/config.go @@ -5,6 +5,7 @@ import ( "os" "path/filepath" "strings" + "time" "github.com/DeBrosOfficial/network/pkg/config" "github.com/DeBrosOfficial/network/pkg/gateway" @@ -57,6 +58,8 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { EnableHTTPS bool `yaml:"enable_https"` DomainName string `yaml:"domain_name"` TLSCacheDir string `yaml:"tls_cache_dir"` + OlricServers []string `yaml:"olric_servers"` + OlricTimeout string `yaml:"olric_timeout"` } data, err := os.ReadFile(configPath) @@ -86,6 +89,8 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { EnableHTTPS: false, DomainName: "", TLSCacheDir: "", + OlricServers: nil, + OlricTimeout: 0, } if v := strings.TrimSpace(y.ListenAddr); v != "" { @@ -125,6 +130,18 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { } } + // Olric configuration + if len(y.OlricServers) > 0 { + cfg.OlricServers = y.OlricServers + } + if v := strings.TrimSpace(y.OlricTimeout); v != "" { + if parsed, err := time.ParseDuration(v); err == nil { + cfg.OlricTimeout = parsed + } else { + logger.ComponentWarn(logging.ComponentGeneral, "invalid olric_timeout, using default", zap.String("value", v), zap.Error(err)) + } + } + // Validate configuration if errs := cfg.ValidateConfig(); len(errs) > 0 { fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs)) diff --git a/go.mod b/go.mod index 0b1a4b6..d2cec41 100644 --- a/go.mod +++ b/go.mod @@ -11,21 +11,28 @@ require ( github.com/libp2p/go-libp2p-pubsub v0.14.2 github.com/mackerelio/go-osstat v0.2.6 github.com/multiformats/go-multiaddr v0.15.0 + github.com/olric-data/olric v0.7.0 github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.40.0 golang.org/x/net v0.42.0 gopkg.in/yaml.v3 v3.0.1 ) require ( + github.com/RoaringBitmap/roaring v1.9.4 // indirect + github.com/armon/go-metrics v0.4.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.22.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect + github.com/buraksezer/consistent v0.10.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/go-units v0.5.0 // indirect github.com/elastic/gosigar v0.14.3 // indirect github.com/flynn/noise v1.1.0 // indirect @@ -33,10 +40,20 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.3 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-sockaddr v1.0.7 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/logutils v1.0.0 // indirect + github.com/hashicorp/memberlist v0.5.3 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/go-cid v0.5.0 // indirect @@ -60,6 +77,7 @@ require ( github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect + github.com/mschoch/smat v0.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect @@ -101,14 +119,20 @@ require ( github.com/quic-go/quic-go v0.50.1 // indirect github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/redis/go-redis/v9 v9.8.0 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/tidwall/btree v1.7.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/redcon v1.6.2 // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/wlynxg/anet v0.0.5 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.40.0 // indirect golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect golang.org/x/mod v0.26.0 // indirect golang.org/x/sync v0.16.0 // indirect @@ -116,5 +140,6 @@ require ( golang.org/x/text v0.27.0 // indirect golang.org/x/tools v0.35.0 // indirect google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect lukechampine.com/blake3 v1.4.1 // indirect ) diff --git a/go.sum b/go.sum index 33dd50c..69f9844 100644 --- a/go.sum +++ b/go.sum @@ -8,22 +8,45 @@ dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1 dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ= +github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4= +github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buraksezer/consistent v0.10.0 h1:hqBgz1PvNLC5rkWcEBVAL9dFMBWz6I0VgUCW25rrZlU= +github.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0maujuPowduSpZqmw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= @@ -43,6 +66,8 @@ github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -61,8 +86,15 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -79,13 +111,29 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -101,8 +149,33 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack/v2 v2.1.3 h1:cB1w4Zrk0O3jQBTcFMKqYQWRFfsSQ/TYKNyUUVyCP2c= +github.com/hashicorp/go-msgpack/v2 v2.1.3/go.mod h1:SjlwKKFnwBXvxD/I1bEcfJIBbEJ+MCUn39TxymNR5ZU= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk= +github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE= github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= @@ -116,8 +189,14 @@ github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+ github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -125,8 +204,11 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk= github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -178,11 +260,15 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= @@ -207,8 +293,12 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/olric-data/olric v0.7.0 h1:EKN2T6ZTtdu8Un0jV0KOWVxWm9odptJpefmDivfZdjE= +github.com/olric-data/olric v0.7.0/go.mod h1:+ZnPpgc8JkNkza8rETCKGn0P/QPF6HhZY0EbCKAOslo= github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= @@ -217,6 +307,8 @@ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= @@ -261,21 +353,38 @@ github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM= github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA= github.com/pion/webrtc/v4 v4.0.10 h1:Hq/JLjhqLxi+NmCtE8lnRPDr8H4LcNvwg8OxVcdv56Q= github.com/pion/webrtc/v4 v4.0.10/go.mod h1:ViHLVaNpiuvaH8pdiuQxuA9awuE6KVzAXx3vVWilOck= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= @@ -286,12 +395,16 @@ github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6 github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE= github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8/go.mod h1:xF/KoXmrRyahPfo5L7Szb5cAAUl53dMWBh9cMruGEZg= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -316,16 +429,22 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go. github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -333,9 +452,21 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/btree v1.1.0/go.mod h1:TzIRzen6yHbibdSfK6t8QimqbUnoxUSrZfeW7Uob0q4= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/redcon v1.6.2 h1:5qfvrrybgtO85jnhSravmkZyC0D+7WstbfCs3MmPhow= +github.com/tidwall/redcon v1.6.2/go.mod h1:p5Wbsgeyi2VSTBWOcA5vRXrOb9arFTcU2+ZzFjqV75Y= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= @@ -357,6 +488,7 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -390,12 +522,15 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -419,6 +554,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -426,16 +562,26 @@ golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -456,6 +602,7 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -502,15 +649,29 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/cli/setup.go b/pkg/cli/setup.go index c1a7ed7..bc245d1 100644 --- a/pkg/cli/setup.go +++ b/pkg/cli/setup.go @@ -62,11 +62,12 @@ func HandleSetupCommand(args []string) { fmt.Printf(" 3. Install Go 1.21+ (if needed)\n") fmt.Printf(" 4. Install RQLite database\n") fmt.Printf(" 5. Install Anyone Relay (Anon) for anonymous networking\n") - fmt.Printf(" 6. Create directories (/home/debros/bin, /home/debros/src)\n") - fmt.Printf(" 7. Clone and build DeBros Network\n") - fmt.Printf(" 8. Generate configuration files\n") - fmt.Printf(" 9. Create systemd services (debros-node, debros-gateway)\n") - fmt.Printf(" 10. Start and enable services\n") + fmt.Printf(" 6. Install Olric cache server\n") + fmt.Printf(" 7. Create directories (/home/debros/bin, /home/debros/src)\n") + fmt.Printf(" 8. Clone and build DeBros Network\n") + fmt.Printf(" 9. Generate configuration files\n") + fmt.Printf(" 10. Create systemd services (debros-node, debros-gateway, debros-olric)\n") + fmt.Printf(" 11. Start and enable services\n") fmt.Printf(strings.Repeat("=", 70) + "\n\n") fmt.Printf("Ready to begin setup? (yes/no): ") @@ -92,6 +93,9 @@ func HandleSetupCommand(args []string) { // Step 4.5: Install Anon (Anyone relay) installAnon() + // Step 4.6: Install Olric cache server + installOlric() + // Step 5: Setup directories setupDirectories() @@ -1037,6 +1041,132 @@ func configureFirewallForAnon() { fmt.Printf(" No active firewall detected\n") } +func installOlric() { + fmt.Printf("💾 Installing Olric cache server...\n") + + // Check if already installed + if _, err := exec.LookPath("olric-server"); err == nil { + fmt.Printf(" ✓ Olric already installed\n") + configureFirewallForOlric() + return + } + + // Ensure Go is available (required for go install) + if _, err := exec.LookPath("go"); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Go not found - cannot install Olric. Please install Go first.\n") + return + } + + fmt.Printf(" Installing Olric server via go install...\n") + cmd := exec.Command("go", "install", "github.com/olric-data/olric/cmd/olric-server@v0.7.0") + cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin") + if output, err := cmd.CombinedOutput(); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Failed to install Olric: %v\n", err) + if len(output) > 0 { + fmt.Fprintf(os.Stderr, " Output: %s\n", string(output)) + } + fmt.Fprintf(os.Stderr, " You can manually install with: go install github.com/olric-data/olric/cmd/olric-server@v0.7.0\n") + return + } + + // Verify installation + if _, err := exec.LookPath("olric-server"); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Olric installation verification failed: binary not found in PATH\n") + fmt.Fprintf(os.Stderr, " Make sure /usr/local/bin is in PATH\n") + return + } + + fmt.Printf(" ✓ Olric installed\n") + + // Configure firewall + configureFirewallForOlric() + + // Create Olric config directory + olricConfigDir := "/home/debros/.debros/olric" + if err := os.MkdirAll(olricConfigDir, 0755); err == nil { + configPath := olricConfigDir + "/config.yaml" + if _, err := os.Stat(configPath); os.IsNotExist(err) { + configContent := `memberlist: + bind-addr: "0.0.0.0" + bind-port: 3322 +client: + bind-addr: "0.0.0.0" + bind-port: 3320 + +# Durability and replication configuration +# Replicates data across entire network for fault tolerance +dmaps: + default: + replication: + mode: sync # Synchronous replication for durability + replica_count: 2 # Replicate to 2 backup nodes (3 total copies: 1 primary + 2 backups) + write_quorum: 2 # Require 2 nodes to acknowledge writes + read_quorum: 1 # Read from 1 node (faster reads) + read_repair: true # Enable read-repair for consistency + +# Split-brain protection +member_count_quorum: 2 # Require at least 2 nodes to operate (prevents split-brain) +` + if err := os.WriteFile(configPath, []byte(configContent), 0644); err == nil { + exec.Command("chown", "debros:debros", configPath).Run() + fmt.Printf(" ✓ Olric config created at %s\n", configPath) + } + } + exec.Command("chown", "-R", "debros:debros", olricConfigDir).Run() + } +} + +func configureFirewallForOlric() { + fmt.Printf(" Checking firewall configuration for Olric...\n") + + // Check for UFW + if _, err := exec.LookPath("ufw"); err == nil { + output, _ := exec.Command("ufw", "status").CombinedOutput() + if strings.Contains(string(output), "Status: active") { + fmt.Printf(" Adding UFW rules for Olric...\n") + exec.Command("ufw", "allow", "3320/tcp", "comment", "Olric HTTP API").Run() + exec.Command("ufw", "allow", "3322/tcp", "comment", "Olric Memberlist").Run() + fmt.Printf(" ✓ UFW rules added for Olric\n") + return + } + } + + // Check for firewalld + if _, err := exec.LookPath("firewall-cmd"); err == nil { + output, _ := exec.Command("firewall-cmd", "--state").CombinedOutput() + if strings.Contains(string(output), "running") { + fmt.Printf(" Adding firewalld rules for Olric...\n") + exec.Command("firewall-cmd", "--permanent", "--add-port=3320/tcp").Run() + exec.Command("firewall-cmd", "--permanent", "--add-port=3322/tcp").Run() + exec.Command("firewall-cmd", "--reload").Run() + fmt.Printf(" ✓ firewalld rules added for Olric\n") + return + } + } + + // Check for iptables + if _, err := exec.LookPath("iptables"); err == nil { + output, _ := exec.Command("iptables", "-L", "-n").CombinedOutput() + if strings.Contains(string(output), "Chain INPUT") { + fmt.Printf(" Adding iptables rules for Olric...\n") + exec.Command("iptables", "-A", "INPUT", "-p", "tcp", "--dport", "3320", "-j", "ACCEPT", "-m", "comment", "--comment", "Olric HTTP API").Run() + exec.Command("iptables", "-A", "INPUT", "-p", "tcp", "--dport", "3322", "-j", "ACCEPT", "-m", "comment", "--comment", "Olric Memberlist").Run() + + // Try to save rules + if _, err := exec.LookPath("netfilter-persistent"); err == nil { + exec.Command("netfilter-persistent", "save").Run() + } else if _, err := exec.LookPath("iptables-save"); err == nil { + cmd := exec.Command("sh", "-c", "iptables-save > /etc/iptables/rules.v4") + cmd.Run() + } + fmt.Printf(" ✓ iptables rules added for Olric\n") + return + } + } + + fmt.Printf(" No active firewall detected for Olric\n") +} + func setupDirectories() { fmt.Printf("📁 Creating directories...\n") @@ -1285,6 +1415,19 @@ func generateConfigsInteractive(force bool) { // Fix ownership exec.Command("chown", "debros:debros", nodeConfigPath).Run() fmt.Printf(" ✓ Node config created: %s\n", nodeConfigPath) + + // Generate Olric config file for this node (uses multicast discovery) + var olricConfigPath string + if isBootstrap { + olricConfigPath = "/home/debros/.debros/bootstrap/olric-config.yaml" + } else { + olricConfigPath = "/home/debros/.debros/node/olric-config.yaml" + } + if err := generateOlricConfig(olricConfigPath, vpsIP, 3320, 3322); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Failed to generate Olric config: %v\n", err) + } else { + fmt.Printf(" ✓ Olric config created: %s\n", olricConfigPath) + } } // Generate gateway config @@ -1334,9 +1477,20 @@ func generateConfigsInteractive(force bool) { } } + // For Olric servers, use localhost for local dev, or current node IP + // In production, gateway will discover Olric nodes via LibP2P network + var olricServers []string + if bootstrapPeers == "" { + // Local development - use localhost + olricServers = []string{"localhost:3320"} + } else { + // Production - start with current node, will discover others via LibP2P + olricServers = []string{fmt.Sprintf("%s:3320", vpsIP)} + } + // Gateway config should include bootstrap peers if this is a regular node // (bootstrap nodes don't need bootstrap peers since they are the bootstrap) - gatewayConfig := generateGatewayConfigDirect(bootstrapPeers, enableHTTPS, domain, tlsCacheDir) + gatewayConfig := generateGatewayConfigDirect(bootstrapPeers, enableHTTPS, domain, tlsCacheDir, olricServers) if err := os.WriteFile(gatewayPath, []byte(gatewayConfig), 0644); err != nil { fmt.Fprintf(os.Stderr, "❌ Failed to write gateway config: %v\n", err) os.Exit(1) @@ -1429,6 +1583,10 @@ func generateNodeConfigWithIP(name, id string, listenPort, rqliteHTTPPort, rqlit joinAddr = fmt.Sprintf("localhost:%d", rqliteHTTPPort) } + // Generate Olric config file for regular node (uses multicast discovery) + olricConfigPath := "/home/debros/.debros/node/olric-config.yaml" + generateOlricConfig(olricConfigPath, ipAddr, 3320, 3322) + return fmt.Sprintf(`node: id: "%s" type: "node" @@ -1468,7 +1626,7 @@ logging: } // generateGatewayConfigDirect generates gateway config directly -func generateGatewayConfigDirect(bootstrapPeers string, enableHTTPS bool, domain, tlsCacheDir string) string { +func generateGatewayConfigDirect(bootstrapPeers string, enableHTTPS bool, domain, tlsCacheDir string, olricServers []string) string { var peers []string if bootstrapPeers != "" { for _, p := range strings.Split(bootstrapPeers, ",") { @@ -1499,12 +1657,71 @@ func generateGatewayConfigDirect(bootstrapPeers string, enableHTTPS bool, domain fmt.Fprintf(&httpsYAML, "enable_https: false\n") } + // Olric servers configuration + var olricYAML strings.Builder + if len(olricServers) > 0 { + olricYAML.WriteString("olric_servers:\n") + for _, server := range olricServers { + fmt.Fprintf(&olricYAML, " - \"%s\"\n", server) + } + } else { + // Default to localhost for local development + olricYAML.WriteString("olric_servers:\n") + olricYAML.WriteString(" - \"localhost:3320\"\n") + } + return fmt.Sprintf(`listen_addr: ":6001" client_namespace: "default" rqlite_dsn: "" %s %s -`, peersYAML.String(), httpsYAML.String()) +%s +`, peersYAML.String(), httpsYAML.String(), olricYAML.String()) +} + +// generateOlricConfig generates an Olric configuration file +// Uses multicast discovery - peers will be discovered dynamically via LibP2P network +func generateOlricConfig(configPath, bindIP string, httpPort, memberlistPort int) error { + // Ensure directory exists + dir := filepath.Dir(configPath) + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create Olric config directory: %w", err) + } + + var config strings.Builder + config.WriteString("memberlist:\n") + config.WriteString(fmt.Sprintf(" bind-addr: \"%s\"\n", bindIP)) + config.WriteString(fmt.Sprintf(" bind-port: %d\n", memberlistPort)) + config.WriteString(" # Multicast discovery enabled - peers discovered dynamically via LibP2P network\n") + + config.WriteString("client:\n") + config.WriteString(fmt.Sprintf(" bind-addr: \"%s\"\n", bindIP)) + config.WriteString(fmt.Sprintf(" bind-port: %d\n", httpPort)) + + // Durability and replication settings + config.WriteString("\n# Durability and replication configuration\n") + config.WriteString("# Replicates data across entire network for fault tolerance\n") + config.WriteString("dmaps:\n") + config.WriteString(" default:\n") + config.WriteString(" replication:\n") + config.WriteString(" mode: sync # Synchronous replication for durability\n") + config.WriteString(" replica_count: 2 # Replicate to 2 backup nodes (3 total copies: 1 primary + 2 backups)\n") + config.WriteString(" write_quorum: 2 # Require 2 nodes to acknowledge writes\n") + config.WriteString(" read_quorum: 1 # Read from 1 node (faster reads)\n") + config.WriteString(" read_repair: true # Enable read-repair for consistency\n") + + // Split-brain protection + config.WriteString("\n# Split-brain protection\n") + config.WriteString("member_count_quorum: 2 # Require at least 2 nodes to operate (prevents split-brain)\n") + + // Write config file + if err := os.WriteFile(configPath, []byte(config.String()), 0644); err != nil { + return fmt.Errorf("failed to write Olric config: %w", err) + } + + // Fix ownership + exec.Command("chown", "debros:debros", configPath).Run() + return nil } func createSystemdServices() { diff --git a/pkg/config/config.go b/pkg/config/config.go index 5784597..4314198 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -41,6 +41,10 @@ type DatabaseConfig struct { ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s PeerInactivityLimit time.Duration `yaml:"peer_inactivity_limit"` // default: 24h MinClusterSize int `yaml:"min_cluster_size"` // default: 1 + + // Olric cache configuration + OlricHTTPPort int `yaml:"olric_http_port"` // Olric HTTP API port (default: 3320) + OlricMemberlistPort int `yaml:"olric_memberlist_port"` // Olric memberlist port (default: 3322) } // DiscoveryConfig contains peer discovery configuration @@ -116,6 +120,10 @@ func DefaultConfig() *Config { ClusterSyncInterval: 30 * time.Second, PeerInactivityLimit: 24 * time.Hour, MinClusterSize: 1, + + // Olric cache configuration + OlricHTTPPort: 3320, + OlricMemberlistPort: 3322, }, Discovery: DiscoveryConfig{ BootstrapPeers: []string{}, diff --git a/pkg/gateway/cache_handlers.go b/pkg/gateway/cache_handlers.go new file mode 100644 index 0000000..1796b7e --- /dev/null +++ b/pkg/gateway/cache_handlers.go @@ -0,0 +1,356 @@ +package gateway + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + olriclib "github.com/olric-data/olric" +) + +// Cache HTTP handlers for Olric distributed cache + +func (g *Gateway) cacheHealthHandler(w http.ResponseWriter, r *http.Request) { + if g.olricClient == nil { + writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized") + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + + err := g.olricClient.Health(ctx) + if err != nil { + writeError(w, http.StatusServiceUnavailable, fmt.Sprintf("cache health check failed: %v", err)) + return + } + + writeJSON(w, http.StatusOK, map[string]any{ + "status": "ok", + "service": "olric", + }) +} + +func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) { + if g.olricClient == nil { + writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized") + return + } + + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + var req struct { + DMap string `json:"dmap"` // Distributed map name + Key string `json:"key"` // Key to retrieve + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid json body") + return + } + + if strings.TrimSpace(req.DMap) == "" || strings.TrimSpace(req.Key) == "" { + writeError(w, http.StatusBadRequest, "dmap and key are required") + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second) + defer cancel() + + client := g.olricClient.GetClient() + dm, err := client.NewDMap(req.DMap) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err)) + return + } + + gr, err := dm.Get(ctx, req.Key) + if err != nil { + if err == olriclib.ErrKeyNotFound { + writeError(w, http.StatusNotFound, "key not found") + return + } + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get key: %v", err)) + return + } + + // Try to decode the value from Olric + // Values stored as JSON bytes need to be deserialized, while basic types + // (strings, numbers, bools) can be retrieved directly + var value any + + // First, try to get as bytes (for JSON-serialized complex types) + var bytesVal []byte + if err := gr.Scan(&bytesVal); err == nil && len(bytesVal) > 0 { + // Try to deserialize as JSON + var jsonVal any + if err := json.Unmarshal(bytesVal, &jsonVal); err == nil { + value = jsonVal + } else { + // If JSON unmarshal fails, treat as string + value = string(bytesVal) + } + } else { + // Try as string (for simple string values) + if strVal, err := gr.String(); err == nil { + value = strVal + } else { + // Fallback: try to scan as any type + var anyVal any + if err := gr.Scan(&anyVal); err == nil { + value = anyVal + } else { + // Last resort: try String() again, ignoring error + strVal, _ := gr.String() + value = strVal + } + } + } + + writeJSON(w, http.StatusOK, map[string]any{ + "key": req.Key, + "value": value, + "dmap": req.DMap, + }) +} + +func (g *Gateway) cachePutHandler(w http.ResponseWriter, r *http.Request) { + if g.olricClient == nil { + writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized") + return + } + + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + var req struct { + DMap string `json:"dmap"` // Distributed map name + Key string `json:"key"` // Key to store + Value any `json:"value"` // Value to store + TTL string `json:"ttl"` // Optional TTL (duration string like "1h", "30m") + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid json body") + return + } + + if strings.TrimSpace(req.DMap) == "" || strings.TrimSpace(req.Key) == "" { + writeError(w, http.StatusBadRequest, "dmap and key are required") + return + } + + if req.Value == nil { + writeError(w, http.StatusBadRequest, "value is required") + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second) + defer cancel() + + client := g.olricClient.GetClient() + dm, err := client.NewDMap(req.DMap) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err)) + return + } + + // TODO: TTL support - need to check Olric v0.7 API for TTL/expiry options + // For now, ignore TTL if provided + if req.TTL != "" { + _, err := time.ParseDuration(req.TTL) + if err != nil { + writeError(w, http.StatusBadRequest, fmt.Sprintf("invalid ttl format: %v", err)) + return + } + // TTL parsing succeeded but not yet implemented in API + // Will be added once we confirm the correct Olric API method + } + + // Serialize complex types (maps, slices) to JSON bytes for Olric storage + // Olric can handle basic types (string, number, bool) directly, but complex + // types need to be serialized to bytes + var valueToStore any + switch req.Value.(type) { + case map[string]any: + // Serialize maps to JSON bytes + jsonBytes, err := json.Marshal(req.Value) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to marshal value: %v", err)) + return + } + valueToStore = jsonBytes + case []any: + // Serialize slices to JSON bytes + jsonBytes, err := json.Marshal(req.Value) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to marshal value: %v", err)) + return + } + valueToStore = jsonBytes + case string: + // Basic string type can be stored directly + valueToStore = req.Value + case float64: + // Basic number type can be stored directly + valueToStore = req.Value + case int: + // Basic int type can be stored directly + valueToStore = req.Value + case int64: + // Basic int64 type can be stored directly + valueToStore = req.Value + case bool: + // Basic bool type can be stored directly + valueToStore = req.Value + case nil: + // Nil can be stored directly + valueToStore = req.Value + default: + // For any other type, serialize to JSON to be safe + jsonBytes, err := json.Marshal(req.Value) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to marshal value: %v", err)) + return + } + valueToStore = jsonBytes + } + + err = dm.Put(ctx, req.Key, valueToStore) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to put key: %v", err)) + return + } + + writeJSON(w, http.StatusOK, map[string]any{ + "status": "ok", + "key": req.Key, + "dmap": req.DMap, + }) +} + +func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) { + if g.olricClient == nil { + writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized") + return + } + + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + var req struct { + DMap string `json:"dmap"` // Distributed map name + Key string `json:"key"` // Key to delete + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid json body") + return + } + + if strings.TrimSpace(req.DMap) == "" || strings.TrimSpace(req.Key) == "" { + writeError(w, http.StatusBadRequest, "dmap and key are required") + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second) + defer cancel() + + client := g.olricClient.GetClient() + dm, err := client.NewDMap(req.DMap) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err)) + return + } + + deletedCount, err := dm.Delete(ctx, req.Key) + if err != nil { + if err == olriclib.ErrKeyNotFound { + writeError(w, http.StatusNotFound, "key not found") + return + } + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to delete key: %v", err)) + return + } + if deletedCount == 0 { + writeError(w, http.StatusNotFound, "key not found") + return + } + + writeJSON(w, http.StatusOK, map[string]any{ + "status": "ok", + "key": req.Key, + "dmap": req.DMap, + }) +} + +func (g *Gateway) cacheScanHandler(w http.ResponseWriter, r *http.Request) { + if g.olricClient == nil { + writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized") + return + } + + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + var req struct { + DMap string `json:"dmap"` // Distributed map name + Match string `json:"match"` // Optional regex pattern to match keys + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid json body") + return + } + + if strings.TrimSpace(req.DMap) == "" { + writeError(w, http.StatusBadRequest, "dmap is required") + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second) + defer cancel() + + client := g.olricClient.GetClient() + dm, err := client.NewDMap(req.DMap) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err)) + return + } + + var iterator olriclib.Iterator + if req.Match != "" { + iterator, err = dm.Scan(ctx, olriclib.Match(req.Match)) + } else { + iterator, err = dm.Scan(ctx) + } + + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to scan: %v", err)) + return + } + defer iterator.Close() + + var keys []string + for iterator.Next() { + keys = append(keys, iterator.Key()) + } + + writeJSON(w, http.StatusOK, map[string]any{ + "keys": keys, + "count": len(keys), + "dmap": req.DMap, + }) +} diff --git a/pkg/gateway/cache_handlers_test.go b/pkg/gateway/cache_handlers_test.go new file mode 100644 index 0000000..6f2a5f8 --- /dev/null +++ b/pkg/gateway/cache_handlers_test.go @@ -0,0 +1,202 @@ +package gateway + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/DeBrosOfficial/network/pkg/logging" + "github.com/DeBrosOfficial/network/pkg/olric" + "go.uber.org/zap" +) + +func TestCacheHealthHandler(t *testing.T) { + // Create a test logger + logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral) + + // Create gateway without Olric client (should return service unavailable) + cfg := &Config{ + ListenAddr: ":6001", + ClientNamespace: "test", + } + gw := &Gateway{ + logger: logger, + cfg: cfg, + } + + req := httptest.NewRequest("GET", "/v1/cache/health", nil) + w := httptest.NewRecorder() + + gw.cacheHealthHandler(w, req) + + if w.Code != http.StatusServiceUnavailable { + t.Errorf("expected status %d, got %d", http.StatusServiceUnavailable, w.Code) + } + + var resp map[string]any + if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["error"] == nil { + t.Error("expected error in response") + } +} + +func TestCacheGetHandler_MissingClient(t *testing.T) { + logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral) + + cfg := &Config{ + ListenAddr: ":6001", + ClientNamespace: "test", + } + gw := &Gateway{ + logger: logger, + cfg: cfg, + } + + reqBody := map[string]string{ + "dmap": "test-dmap", + "key": "test-key", + } + bodyBytes, _ := json.Marshal(reqBody) + req := httptest.NewRequest("POST", "/v1/cache/get", bytes.NewReader(bodyBytes)) + w := httptest.NewRecorder() + + gw.cacheGetHandler(w, req) + + if w.Code != http.StatusServiceUnavailable { + t.Errorf("expected status %d, got %d", http.StatusServiceUnavailable, w.Code) + } +} + +func TestCacheGetHandler_InvalidBody(t *testing.T) { + logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral) + + cfg := &Config{ + ListenAddr: ":6001", + ClientNamespace: "test", + } + gw := &Gateway{ + logger: logger, + cfg: cfg, + olricClient: &olric.Client{}, // Mock client + } + + req := httptest.NewRequest("POST", "/v1/cache/get", bytes.NewReader([]byte("invalid json"))) + w := httptest.NewRecorder() + + gw.cacheGetHandler(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code) + } +} + +func TestCachePutHandler_MissingFields(t *testing.T) { + logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral) + + cfg := &Config{ + ListenAddr: ":6001", + ClientNamespace: "test", + } + gw := &Gateway{ + logger: logger, + cfg: cfg, + olricClient: &olric.Client{}, + } + + // Test missing dmap + reqBody := map[string]string{ + "key": "test-key", + } + bodyBytes, _ := json.Marshal(reqBody) + req := httptest.NewRequest("POST", "/v1/cache/put", bytes.NewReader(bodyBytes)) + w := httptest.NewRecorder() + + gw.cachePutHandler(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code) + } + + // Test missing key + reqBody = map[string]string{ + "dmap": "test-dmap", + } + bodyBytes, _ = json.Marshal(reqBody) + req = httptest.NewRequest("POST", "/v1/cache/put", bytes.NewReader(bodyBytes)) + w = httptest.NewRecorder() + + gw.cachePutHandler(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code) + } +} + +func TestCacheDeleteHandler_WrongMethod(t *testing.T) { + logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral) + + cfg := &Config{ + ListenAddr: ":6001", + ClientNamespace: "test", + } + gw := &Gateway{ + logger: logger, + cfg: cfg, + olricClient: &olric.Client{}, + } + + req := httptest.NewRequest("GET", "/v1/cache/delete", nil) + w := httptest.NewRecorder() + + gw.cacheDeleteHandler(w, req) + + if w.Code != http.StatusMethodNotAllowed { + t.Errorf("expected status %d, got %d", http.StatusMethodNotAllowed, w.Code) + } +} + +func TestCacheScanHandler_InvalidBody(t *testing.T) { + logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral) + + cfg := &Config{ + ListenAddr: ":6001", + ClientNamespace: "test", + } + gw := &Gateway{ + logger: logger, + cfg: cfg, + olricClient: &olric.Client{}, + } + + req := httptest.NewRequest("POST", "/v1/cache/scan", bytes.NewReader([]byte("invalid"))) + w := httptest.NewRecorder() + + gw.cacheScanHandler(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code) + } +} + +// Test Olric client wrapper +func TestOlricClientConfig(t *testing.T) { + logger := zap.NewNop() + + // Test default servers + cfg := olric.Config{} + client, err := olric.NewClient(cfg, logger) + if err == nil { + // If client creation succeeds, test that it has default servers + // This will fail if Olric server is not running, which is expected in tests + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + _ = client.Close(ctx) + } +} diff --git a/pkg/gateway/gateway.go b/pkg/gateway/gateway.go index 62354cb..f941c42 100644 --- a/pkg/gateway/gateway.go +++ b/pkg/gateway/gateway.go @@ -5,13 +5,16 @@ import ( "crypto/rand" "crypto/rsa" "database/sql" + "net" "strconv" "sync" "time" "github.com/DeBrosOfficial/network/pkg/client" "github.com/DeBrosOfficial/network/pkg/logging" + "github.com/DeBrosOfficial/network/pkg/olric" "github.com/DeBrosOfficial/network/pkg/rqlite" + "github.com/multiformats/go-multiaddr" "go.uber.org/zap" _ "github.com/rqlite/gorqlite/stdlib" @@ -31,6 +34,10 @@ type Config struct { EnableHTTPS bool // Enable HTTPS with ACME (Let's Encrypt) DomainName string // Domain name for HTTPS certificate TLSCacheDir string // Directory to cache TLS certificates (default: ~/.debros/tls-cache) + + // Olric cache configuration + OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"] + OlricTimeout time.Duration // Timeout for Olric operations (default: 10s) } type Gateway struct { @@ -46,6 +53,9 @@ type Gateway struct { ormClient rqlite.Client ormHTTP *rqlite.HTTPGateway + // Olric cache client + olricClient *olric.Client + // Local pub/sub bypass for same-gateway subscribers localSubscribers map[string][]*localSubscriber // topic+namespace -> subscribers mu sync.RWMutex @@ -132,6 +142,42 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) { ) } + logger.ComponentInfo(logging.ComponentGeneral, "Initializing Olric cache client...") + + // Discover Olric servers dynamically from LibP2P peers if not explicitly configured + olricServers := cfg.OlricServers + if len(olricServers) == 0 { + logger.ComponentInfo(logging.ComponentGeneral, "Olric servers not configured, discovering from LibP2P peers...") + discovered := discoverOlricServers(c, logger.Logger) + if len(discovered) > 0 { + olricServers = discovered + logger.ComponentInfo(logging.ComponentGeneral, "Discovered Olric servers from LibP2P peers", + zap.Strings("servers", olricServers)) + } else { + // Fallback to localhost for local development + olricServers = []string{"localhost:3320"} + logger.ComponentInfo(logging.ComponentGeneral, "No Olric servers discovered, using localhost fallback") + } + } else { + logger.ComponentInfo(logging.ComponentGeneral, "Using explicitly configured Olric servers", + zap.Strings("servers", olricServers)) + } + + olricCfg := olric.Config{ + Servers: olricServers, + Timeout: cfg.OlricTimeout, + } + olricClient, olricErr := olric.NewClient(olricCfg, logger.Logger) + if olricErr != nil { + logger.ComponentWarn(logging.ComponentGeneral, "failed to initialize Olric cache client; cache endpoints disabled", zap.Error(olricErr)) + } else { + gw.olricClient = olricClient + logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client ready", + zap.Strings("servers", olricCfg.Servers), + zap.Duration("timeout", olricCfg.Timeout), + ) + } + logger.ComponentInfo(logging.ComponentGeneral, "Gateway creation completed, returning...") return gw, nil } @@ -151,6 +197,13 @@ func (g *Gateway) Close() { if g.sqlDB != nil { _ = g.sqlDB.Close() } + if g.olricClient != nil { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := g.olricClient.Close(ctx); err != nil { + g.logger.ComponentWarn(logging.ComponentGeneral, "error during Olric client close", zap.Error(err)) + } + } } // getLocalSubscribers returns all local subscribers for a given topic and namespace @@ -161,3 +214,96 @@ func (g *Gateway) getLocalSubscribers(topic, namespace string) []*localSubscribe } return nil } + +// discoverOlricServers discovers Olric server addresses from LibP2P peers +// Returns a list of IP:port addresses where Olric servers are expected to run (port 3320) +func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger) []string { + // Get network info to access peer information + networkInfo := networkClient.Network() + if networkInfo == nil { + logger.Debug("Network info not available for Olric discovery") + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + peers, err := networkInfo.GetPeers(ctx) + if err != nil { + logger.Debug("Failed to get peers for Olric discovery", zap.Error(err)) + return nil + } + + olricServers := make([]string, 0) + seen := make(map[string]bool) + + for _, peer := range peers { + for _, addrStr := range peer.Addresses { + // Parse multiaddr + ma, err := multiaddr.NewMultiaddr(addrStr) + if err != nil { + continue + } + + // Extract IP address + var ip string + if ipv4, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ipv4 != "" { + ip = ipv4 + } else if ipv6, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ipv6 != "" { + ip = ipv6 + } else { + continue + } + + // Skip localhost loopback addresses (we'll use localhost:3320 as fallback) + if ip == "127.0.0.1" || ip == "::1" || ip == "localhost" { + continue + } + + // Build Olric server address (standard port 3320) + olricAddr := net.JoinHostPort(ip, "3320") + if !seen[olricAddr] { + olricServers = append(olricServers, olricAddr) + seen[olricAddr] = true + } + } + } + + // Also check bootstrap peers from config + if cfg := networkClient.Config(); cfg != nil { + for _, bootstrapAddr := range cfg.BootstrapPeers { + ma, err := multiaddr.NewMultiaddr(bootstrapAddr) + if err != nil { + continue + } + + var ip string + if ipv4, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ipv4 != "" { + ip = ipv4 + } else if ipv6, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ipv6 != "" { + ip = ipv6 + } else { + continue + } + + // Skip localhost + if ip == "127.0.0.1" || ip == "::1" || ip == "localhost" { + continue + } + + olricAddr := net.JoinHostPort(ip, "3320") + if !seen[olricAddr] { + olricServers = append(olricServers, olricAddr) + seen[olricAddr] = true + } + } + } + + // If we found servers, log them + if len(olricServers) > 0 { + logger.Info("Discovered Olric servers from LibP2P network", + zap.Strings("servers", olricServers)) + } + + return olricServers +} diff --git a/pkg/gateway/routes.go b/pkg/gateway/routes.go index cce24e8..25d09a0 100644 --- a/pkg/gateway/routes.go +++ b/pkg/gateway/routes.go @@ -47,5 +47,12 @@ func (g *Gateway) Routes() http.Handler { // anon proxy (authenticated users only) mux.HandleFunc("/v1/proxy/anon", g.anonProxyHandler) + // cache endpoints (Olric) + mux.HandleFunc("/v1/cache/health", g.cacheHealthHandler) + mux.HandleFunc("/v1/cache/get", g.cacheGetHandler) + mux.HandleFunc("/v1/cache/put", g.cachePutHandler) + mux.HandleFunc("/v1/cache/delete", g.cacheDeleteHandler) + mux.HandleFunc("/v1/cache/scan", g.cacheScanHandler) + return g.withMiddleware(mux) } diff --git a/pkg/olric/client.go b/pkg/olric/client.go new file mode 100644 index 0000000..d2b78bd --- /dev/null +++ b/pkg/olric/client.go @@ -0,0 +1,103 @@ +package olric + +import ( + "context" + "fmt" + "time" + + olriclib "github.com/olric-data/olric" + "go.uber.org/zap" +) + +// Client wraps an Olric cluster client for distributed cache operations +type Client struct { + client olriclib.Client + logger *zap.Logger +} + +// Config holds configuration for the Olric client +type Config struct { + // Servers is a list of Olric server addresses (e.g., ["localhost:3320"]) + // If empty, defaults to ["localhost:3320"] + Servers []string + + // Timeout is the timeout for client operations + // If zero, defaults to 10 seconds + Timeout time.Duration +} + +// NewClient creates a new Olric client wrapper +func NewClient(cfg Config, logger *zap.Logger) (*Client, error) { + servers := cfg.Servers + if len(servers) == 0 { + servers = []string{"localhost:3320"} + } + + client, err := olriclib.NewClusterClient(servers) + if err != nil { + return nil, fmt.Errorf("failed to create Olric cluster client: %w", err) + } + + timeout := cfg.Timeout + if timeout == 0 { + timeout = 10 * time.Second + } + + return &Client{ + client: client, + logger: logger, + }, nil +} + +// Health checks if the Olric client is healthy +func (c *Client) Health(ctx context.Context) error { + // Create a DMap to test connectivity + dm, err := c.client.NewDMap("_health_check") + if err != nil { + return fmt.Errorf("failed to create DMap for health check: %w", err) + } + + // Try a simple put/get operation + testKey := fmt.Sprintf("_health_%d", time.Now().UnixNano()) + testValue := "ok" + + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + err = dm.Put(ctx, testKey, testValue) + if err != nil { + return fmt.Errorf("health check put failed: %w", err) + } + + gr, err := dm.Get(ctx, testKey) + if err != nil { + return fmt.Errorf("health check get failed: %w", err) + } + + val, err := gr.String() + if err != nil { + return fmt.Errorf("health check value decode failed: %w", err) + } + + if val != testValue { + return fmt.Errorf("health check value mismatch: expected %q, got %q", testValue, val) + } + + // Clean up test key + _, _ = dm.Delete(ctx, testKey) + + return nil +} + +// Close closes the Olric client connection +func (c *Client) Close(ctx context.Context) error { + if c.client == nil { + return nil + } + return c.client.Close(ctx) +} + +// GetClient returns the underlying Olric client +func (c *Client) GetClient() olriclib.Client { + return c.client +} diff --git a/scripts/install-debros-network.sh b/scripts/install-debros-network.sh index efbc1ba..a1bd4e5 100755 --- a/scripts/install-debros-network.sh +++ b/scripts/install-debros-network.sh @@ -396,6 +396,50 @@ configure_firewall_for_anon() { log "No active firewall detected, skipping firewall configuration" } +configure_firewall_for_olric() { + log "Checking firewall configuration for Olric..." + + # Check for UFW + if command -v ufw &>/dev/null && sudo ufw status | grep -q "Status: active"; then + log "UFW detected and active, adding Olric ports..." + sudo ufw allow 3320/tcp comment 'Olric HTTP API' 2>/dev/null || true + sudo ufw allow 3322/tcp comment 'Olric Memberlist' 2>/dev/null || true + success "UFW rules added for Olric" + return 0 + fi + + # Check for firewalld + if command -v firewall-cmd &>/dev/null && sudo firewall-cmd --state 2>/dev/null | grep -q "running"; then + log "firewalld detected and active, adding Olric ports..." + sudo firewall-cmd --permanent --add-port=3320/tcp 2>/dev/null || true + sudo firewall-cmd --permanent --add-port=3322/tcp 2>/dev/null || true + sudo firewall-cmd --reload 2>/dev/null || true + success "firewalld rules added for Olric" + return 0 + fi + + # Check for iptables + if command -v iptables &>/dev/null; then + # Check if iptables has any rules (indicating it's in use) + if sudo iptables -L -n | grep -q "Chain INPUT"; then + log "iptables detected, adding Olric ports..." + sudo iptables -A INPUT -p tcp --dport 3320 -j ACCEPT -m comment --comment "Olric HTTP API" 2>/dev/null || true + sudo iptables -A INPUT -p tcp --dport 3322 -j ACCEPT -m comment --comment "Olric Memberlist" 2>/dev/null || true + + # Try to save rules if iptables-persistent is available + if command -v netfilter-persistent &>/dev/null; then + sudo netfilter-persistent save 2>/dev/null || true + elif command -v iptables-save &>/dev/null; then + sudo iptables-save | sudo tee /etc/iptables/rules.v4 >/dev/null 2>&1 || true + fi + success "iptables rules added for Olric" + return 0 + fi + fi + + log "No active firewall detected for Olric, skipping firewall configuration" +} + run_setup() { echo -e "" echo -e "${BLUE}========================================${NOCOLOR}" From cf26c1af2cb7bc01814a640d76843e98df310d03 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Wed, 5 Nov 2025 07:31:50 +0200 Subject: [PATCH 02/57] feat: integrate Olric distributed cache support - Added Olric cache server integration, including configuration options for Olric servers and timeout settings. - Implemented HTTP handlers for cache operations: health check, get, put, delete, and scan. - Enhanced Makefile with commands to run the Olric server and manage its configuration. - Updated README and setup scripts to include Olric installation and configuration instructions. - Introduced tests for cache handlers to ensure proper functionality and error handling. --- CHANGELOG.md | 36 ++ Makefile | 89 ++++- README.md | 1 + cmd/gateway/config.go | 62 ++- e2e/gateway_e2e_test.go | 198 ++++++++++ pkg/cli/setup.go | 90 +++-- pkg/client/client.go | 7 + pkg/client/interface.go | 49 +++ pkg/client/storage_client.go | 245 ++++++++++++ pkg/client/storage_client_test.go | 378 ++++++++++++++++++ pkg/config/config.go | 39 +- pkg/gateway/gateway.go | 170 ++++++++ pkg/gateway/middleware_test.go | 9 - pkg/gateway/routes.go | 7 + pkg/gateway/storage_handlers.go | 341 ++++++++++++++++- pkg/gateway/storage_handlers_test.go | 554 +++++++++++++++++++++++++++ pkg/ipfs/client.go | 345 +++++++++++++++++ pkg/ipfs/client_test.go | 483 +++++++++++++++++++++++ 18 files changed, 3009 insertions(+), 94 deletions(-) create mode 100644 pkg/client/storage_client.go create mode 100644 pkg/client/storage_client_test.go create mode 100644 pkg/gateway/storage_handlers_test.go create mode 100644 pkg/ipfs/client.go create mode 100644 pkg/ipfs/client_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index ac7a4f1..20b5347 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,42 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed +## [0.56.0] - 2025-11-05 + +### Added +- Added IPFS storage endpoints to the Gateway for content upload, pinning, status, retrieval, and unpinning. +- Introduced `StorageClient` interface and implementation in the Go client library for interacting with the new IPFS storage endpoints. +- Added support for automatically starting IPFS daemon, IPFS Cluster daemon, and Olric cache server in the `dev` environment setup. + +### Changed +- Updated Gateway configuration to include settings for IPFS Cluster API URL, IPFS API URL, timeout, and replication factor. +- Refactored Olric configuration generation to use a simpler, local-environment focused setup. +- Improved IPFS content retrieval (`Get`) to fall back to the IPFS Gateway (port 8080) if the IPFS API (port 5001) returns a 404. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.55.0] - 2025-11-05 + +### Added +- Added IPFS storage endpoints to the Gateway for content upload, pinning, status, retrieval, and unpinning. +- Introduced `StorageClient` interface and implementation in the Go client library for interacting with the new IPFS storage endpoints. +- Added support for automatically starting IPFS daemon, IPFS Cluster daemon, and Olric cache server in the `dev` environment setup. + +### Changed +- Updated Gateway configuration to include settings for IPFS Cluster API URL, IPFS API URL, timeout, and replication factor. +- Refactored Olric configuration generation to use a simpler, local-environment focused setup. +- Improved `dev` environment logging to include logs from IPFS and Olric services when running. + +### Deprecated + +### Removed + +### Fixed +\n ## [0.54.0] - 2025-11-03 ### Added diff --git a/Makefile b/Makefile index 03a1f0b..bc9bbb2 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ test-e2e: .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks -VERSION := 0.54.0 +VERSION := 0.56.0 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' @@ -119,6 +119,58 @@ dev: build @echo "Starting node3..." @nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid @sleep 1 + @echo "Starting IPFS daemon..." + @if command -v ipfs >/dev/null 2>&1; then \ + if [ ! -d $$HOME/.debros/ipfs ]; then \ + echo " Initializing IPFS repository..."; \ + IPFS_PATH=$$HOME/.debros/ipfs ipfs init 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ + fi; \ + if ! pgrep -f "ipfs daemon" >/dev/null 2>&1; then \ + IPFS_PATH=$$HOME/.debros/ipfs nohup ipfs daemon > $$HOME/.debros/logs/ipfs.log 2>&1 & echo $$! > .dev/pids/ipfs.pid; \ + echo " IPFS daemon started (PID: $$(cat .dev/pids/ipfs.pid))"; \ + sleep 5; \ + else \ + echo " ✓ IPFS daemon already running"; \ + fi; \ + else \ + echo " ⚠️ ipfs command not found - skipping IPFS (storage endpoints will be disabled)"; \ + echo " Install with: https://docs.ipfs.tech/install/"; \ + fi + @echo "Starting IPFS Cluster daemon..." + @if command -v ipfs-cluster-service >/dev/null 2>&1; then \ + if [ ! -d $$HOME/.debros/ipfs-cluster ]; then \ + echo " Initializing IPFS Cluster..."; \ + CLUSTER_PATH=$$HOME/.debros/ipfs-cluster ipfs-cluster-service init --force 2>&1 | grep -v "peer identity" || true; \ + fi; \ + if ! pgrep -f "ipfs-cluster-service" >/dev/null 2>&1; then \ + CLUSTER_PATH=$$HOME/.debros/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster.pid; \ + echo " IPFS Cluster daemon started (PID: $$(cat .dev/pids/ipfs-cluster.pid))"; \ + sleep 5; \ + else \ + echo " ✓ IPFS Cluster daemon already running"; \ + fi; \ + else \ + echo " ⚠️ ipfs-cluster-service command not found - skipping IPFS Cluster (storage endpoints will be disabled)"; \ + echo " Install with: https://ipfscluster.io/documentation/guides/install/"; \ + fi + @echo "Starting Olric cache server..." + @if command -v olric-server >/dev/null 2>&1; then \ + if [ ! -f $$HOME/.debros/olric-config.yaml ]; then \ + echo " Creating Olric config..."; \ + mkdir -p $$HOME/.debros; \ + fi; \ + if ! pgrep -f "olric-server" >/dev/null 2>&1; then \ + OLRIC_SERVER_CONFIG=$$HOME/.debros/olric-config.yaml nohup olric-server > $$HOME/.debros/logs/olric.log 2>&1 & echo $$! > .dev/pids/olric.pid; \ + echo " Olric cache server started (PID: $$(cat .dev/pids/olric.pid))"; \ + sleep 3; \ + else \ + echo " ✓ Olric cache server already running"; \ + fi; \ + else \ + echo " ⚠️ olric-server command not found - skipping Olric (cache endpoints will be disabled)"; \ + echo " Install with: go install github.com/olric-data/olric/cmd/olric-server@v0.7.0"; \ + fi + @sleep 1 @echo "Starting gateway..." @nohup ./bin/gateway --config gateway.yaml > $$HOME/.debros/logs/gateway.log 2>&1 & echo $$! > .dev/pids/gateway.pid @echo "" @@ -130,6 +182,15 @@ dev: build @if [ -f .dev/pids/anon.pid ]; then \ echo " Anon: PID=$$(cat .dev/pids/anon.pid) (SOCKS: 9050)"; \ fi + @if [ -f .dev/pids/ipfs.pid ]; then \ + echo " IPFS: PID=$$(cat .dev/pids/ipfs.pid) (API: 5001)"; \ + fi + @if [ -f .dev/pids/ipfs-cluster.pid ]; then \ + echo " IPFS Cluster: PID=$$(cat .dev/pids/ipfs-cluster.pid) (API: 9094)"; \ + fi + @if [ -f .dev/pids/olric.pid ]; then \ + echo " Olric: PID=$$(cat .dev/pids/olric.pid) (API: 3320)"; \ + fi @echo " Bootstrap: PID=$$(cat .dev/pids/bootstrap.pid)" @echo " Node2: PID=$$(cat .dev/pids/node2.pid)" @echo " Node3: PID=$$(cat .dev/pids/node3.pid)" @@ -137,6 +198,13 @@ dev: build @echo "" @echo "Ports:" @echo " Anon SOCKS: 9050 (proxy endpoint: POST /v1/proxy/anon)" + @if [ -f .dev/pids/ipfs.pid ]; then \ + echo " IPFS API: 5001 (content retrieval)"; \ + echo " IPFS Cluster: 9094 (pin management)"; \ + fi + @if [ -f .dev/pids/olric.pid ]; then \ + echo " Olric: 3320 (cache API)"; \ + fi @echo " Bootstrap P2P: 4001, HTTP: 5001, Raft: 7001" @echo " Node2 P2P: 4002, HTTP: 5002, Raft: 7002" @echo " Node3 P2P: 4003, HTTP: 5003, Raft: 7003" @@ -145,13 +213,18 @@ dev: build @echo "Press Ctrl+C to stop all processes" @echo "============================================================" @echo "" - @if [ -f .dev/pids/anon.pid ]; then \ - trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \ - tail -f $$HOME/.debros/logs/anon.log $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \ - else \ - trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \ - tail -f $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \ - fi + @LOGS="$$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log"; \ + if [ -f .dev/pids/anon.pid ]; then \ + LOGS="$$LOGS $$HOME/.debros/logs/anon.log"; \ + fi; \ + if [ -f .dev/pids/ipfs.pid ]; then \ + LOGS="$$LOGS $$HOME/.debros/logs/ipfs.log"; \ + fi; \ + if [ -f .dev/pids/ipfs-cluster.pid ]; then \ + LOGS="$$LOGS $$HOME/.debros/logs/ipfs-cluster.log"; \ + fi; \ + trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \ + tail -f $$LOGS # Help help: diff --git a/README.md b/README.md index 54fe138..b325374 100644 --- a/README.md +++ b/README.md @@ -139,6 +139,7 @@ Common endpoints (see `openapi/gateway.yaml` for the full spec): - `POST /v1/rqlite/exec`, `POST /v1/rqlite/find`, `POST /v1/rqlite/select`, `POST /v1/rqlite/transaction` - `GET /v1/rqlite/schema` - `POST /v1/pubsub/publish`, `GET /v1/pubsub/topics`, `GET /v1/pubsub/ws?topic=` +- `POST /v1/storage/upload`, `POST /v1/storage/pin`, `GET /v1/storage/status/:cid`, `GET /v1/storage/get/:cid`, `DELETE /v1/storage/unpin/:cid` ## Troubleshooting diff --git a/cmd/gateway/config.go b/cmd/gateway/config.go index d8d1864..cf71959 100644 --- a/cmd/gateway/config.go +++ b/cmd/gateway/config.go @@ -51,15 +51,19 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { // Load YAML type yamlCfg struct { - ListenAddr string `yaml:"listen_addr"` - ClientNamespace string `yaml:"client_namespace"` - RQLiteDSN string `yaml:"rqlite_dsn"` - BootstrapPeers []string `yaml:"bootstrap_peers"` - EnableHTTPS bool `yaml:"enable_https"` - DomainName string `yaml:"domain_name"` - TLSCacheDir string `yaml:"tls_cache_dir"` - OlricServers []string `yaml:"olric_servers"` - OlricTimeout string `yaml:"olric_timeout"` + ListenAddr string `yaml:"listen_addr"` + ClientNamespace string `yaml:"client_namespace"` + RQLiteDSN string `yaml:"rqlite_dsn"` + BootstrapPeers []string `yaml:"bootstrap_peers"` + EnableHTTPS bool `yaml:"enable_https"` + DomainName string `yaml:"domain_name"` + TLSCacheDir string `yaml:"tls_cache_dir"` + OlricServers []string `yaml:"olric_servers"` + OlricTimeout string `yaml:"olric_timeout"` + IPFSClusterAPIURL string `yaml:"ipfs_cluster_api_url"` + IPFSAPIURL string `yaml:"ipfs_api_url"` + IPFSTimeout string `yaml:"ipfs_timeout"` + IPFSReplicationFactor int `yaml:"ipfs_replication_factor"` } data, err := os.ReadFile(configPath) @@ -82,15 +86,19 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { // Build config from YAML cfg := &gateway.Config{ - ListenAddr: ":6001", - ClientNamespace: "default", - BootstrapPeers: nil, - RQLiteDSN: "", - EnableHTTPS: false, - DomainName: "", - TLSCacheDir: "", - OlricServers: nil, - OlricTimeout: 0, + ListenAddr: ":6001", + ClientNamespace: "default", + BootstrapPeers: nil, + RQLiteDSN: "", + EnableHTTPS: false, + DomainName: "", + TLSCacheDir: "", + OlricServers: nil, + OlricTimeout: 0, + IPFSClusterAPIURL: "", + IPFSAPIURL: "", + IPFSTimeout: 0, + IPFSReplicationFactor: 0, } if v := strings.TrimSpace(y.ListenAddr); v != "" { @@ -142,6 +150,24 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { } } + // IPFS configuration + if v := strings.TrimSpace(y.IPFSClusterAPIURL); v != "" { + cfg.IPFSClusterAPIURL = v + } + if v := strings.TrimSpace(y.IPFSAPIURL); v != "" { + cfg.IPFSAPIURL = v + } + if v := strings.TrimSpace(y.IPFSTimeout); v != "" { + if parsed, err := time.ParseDuration(v); err == nil { + cfg.IPFSTimeout = parsed + } else { + logger.ComponentWarn(logging.ComponentGeneral, "invalid ipfs_timeout, using default", zap.String("value", v), zap.Error(err)) + } + } + if y.IPFSReplicationFactor > 0 { + cfg.IPFSReplicationFactor = y.IPFSReplicationFactor + } + // Validate configuration if errs := cfg.ValidateConfig(); len(errs) > 0 { fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs)) diff --git a/e2e/gateway_e2e_test.go b/e2e/gateway_e2e_test.go index 82e7f27..8c6cb27 100644 --- a/e2e/gateway_e2e_test.go +++ b/e2e/gateway_e2e_test.go @@ -3,10 +3,13 @@ package e2e import ( + "bytes" "crypto/rand" "encoding/base64" "encoding/json" "fmt" + "io" + "mime/multipart" "net/http" "net/url" "os" @@ -407,6 +410,201 @@ func TestGateway_Database_RecreateWithFK(t *testing.T) { } } +func TestGateway_Storage_UploadMultipart(t *testing.T) { + key := requireAPIKey(t) + base := gatewayBaseURL() + + // Create multipart form data using proper multipart writer + content := []byte("test file content for IPFS upload") + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + part, err := writer.CreateFormFile("file", "test.txt") + if err != nil { + t.Fatalf("create form file: %v", err) + } + if _, err := part.Write(content); err != nil { + t.Fatalf("write content: %v", err) + } + if err := writer.Close(); err != nil { + t.Fatalf("close writer: %v", err) + } + + req, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/upload", &buf) + req.Header = authHeader(key) + req.Header.Set("Content-Type", writer.FormDataContentType()) + resp, err := httpClient().Do(req) + if err != nil { + t.Fatalf("upload do: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusServiceUnavailable { + t.Skip("IPFS storage not available; skipping storage tests") + } + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload status: %d, body: %s", resp.StatusCode, string(body)) + } + + var uploadResp struct { + Cid string `json:"cid"` + Name string `json:"name"` + Size int64 `json:"size"` + } + if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil { + t.Fatalf("upload decode: %v", err) + } + if uploadResp.Cid == "" { + t.Fatalf("upload returned empty CID") + } + if uploadResp.Name != "test.txt" { + t.Fatalf("upload name mismatch: got %s", uploadResp.Name) + } + if uploadResp.Size == 0 { + t.Fatalf("upload size is zero") + } + + // Test pinning the uploaded content + pinBody := fmt.Sprintf(`{"cid":"%s","name":"test-pinned"}`, uploadResp.Cid) + req2, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/pin", strings.NewReader(pinBody)) + req2.Header = authHeader(key) + resp2, err := httpClient().Do(req2) + if err != nil { + t.Fatalf("pin do: %v", err) + } + defer resp2.Body.Close() + if resp2.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp2.Body) + t.Fatalf("pin status: %d, body: %s", resp2.StatusCode, string(body)) + } + + // Test getting pin status + req3, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/status/"+uploadResp.Cid, nil) + req3.Header = authHeader(key) + resp3, err := httpClient().Do(req3) + if err != nil { + t.Fatalf("status do: %v", err) + } + defer resp3.Body.Close() + if resp3.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp3.Body) + t.Fatalf("status status: %d, body: %s", resp3.StatusCode, string(body)) + } + + var statusResp struct { + Cid string `json:"cid"` + Status string `json:"status"` + ReplicationFactor int `json:"replication_factor"` + Peers []string `json:"peers"` + } + if err := json.NewDecoder(resp3.Body).Decode(&statusResp); err != nil { + t.Fatalf("status decode: %v", err) + } + if statusResp.Cid != uploadResp.Cid { + t.Fatalf("status CID mismatch: got %s", statusResp.Cid) + } + + // Test retrieving content + req4, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/get/"+uploadResp.Cid, nil) + req4.Header = authHeader(key) + resp4, err := httpClient().Do(req4) + if err != nil { + t.Fatalf("get do: %v", err) + } + defer resp4.Body.Close() + if resp4.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp4.Body) + t.Fatalf("get status: %d, body: %s", resp4.StatusCode, string(body)) + } + + retrieved, err := io.ReadAll(resp4.Body) + if err != nil { + t.Fatalf("get read: %v", err) + } + if string(retrieved) != string(content) { + t.Fatalf("retrieved content mismatch: got %q", string(retrieved)) + } + + // Test unpinning + req5, _ := http.NewRequest(http.MethodDelete, base+"/v1/storage/unpin/"+uploadResp.Cid, nil) + req5.Header = authHeader(key) + resp5, err := httpClient().Do(req5) + if err != nil { + t.Fatalf("unpin do: %v", err) + } + defer resp5.Body.Close() + if resp5.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp5.Body) + t.Fatalf("unpin status: %d, body: %s", resp5.StatusCode, string(body)) + } +} + +func TestGateway_Storage_UploadJSON(t *testing.T) { + key := requireAPIKey(t) + base := gatewayBaseURL() + + // Test JSON upload with base64 data + content := []byte("test json upload content") + b64 := base64.StdEncoding.EncodeToString(content) + body := fmt.Sprintf(`{"name":"test.json","data":"%s"}`, b64) + + req, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/upload", strings.NewReader(body)) + req.Header = authHeader(key) + resp, err := httpClient().Do(req) + if err != nil { + t.Fatalf("upload json do: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusServiceUnavailable { + t.Skip("IPFS storage not available; skipping storage tests") + } + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload json status: %d, body: %s", resp.StatusCode, string(body)) + } + + var uploadResp struct { + Cid string `json:"cid"` + Name string `json:"name"` + Size int64 `json:"size"` + } + if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil { + t.Fatalf("upload json decode: %v", err) + } + if uploadResp.Cid == "" { + t.Fatalf("upload json returned empty CID") + } + if uploadResp.Name != "test.json" { + t.Fatalf("upload json name mismatch: got %s", uploadResp.Name) + } +} + +func TestGateway_Storage_InvalidCID(t *testing.T) { + key := requireAPIKey(t) + base := gatewayBaseURL() + + // Test status with invalid CID + req, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/status/QmInvalidCID123", nil) + req.Header = authHeader(key) + resp, err := httpClient().Do(req) + if err != nil { + t.Fatalf("status invalid do: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusServiceUnavailable { + t.Skip("IPFS storage not available; skipping storage tests") + } + + // Should return error but not crash + if resp.StatusCode != http.StatusNotFound && resp.StatusCode != http.StatusInternalServerError { + t.Fatalf("expected error status for invalid CID, got %d", resp.StatusCode) + } +} + func toWSURL(httpURL string) string { u, err := url.Parse(httpURL) if err != nil { diff --git a/pkg/cli/setup.go b/pkg/cli/setup.go index bc245d1..c681554 100644 --- a/pkg/cli/setup.go +++ b/pkg/cli/setup.go @@ -1086,26 +1086,15 @@ func installOlric() { if err := os.MkdirAll(olricConfigDir, 0755); err == nil { configPath := olricConfigDir + "/config.yaml" if _, err := os.Stat(configPath); os.IsNotExist(err) { - configContent := `memberlist: - bind-addr: "0.0.0.0" - bind-port: 3322 -client: - bind-addr: "0.0.0.0" - bind-port: 3320 + configContent := `server: + bindAddr: "127.0.0.1" + bindPort: 3320 -# Durability and replication configuration -# Replicates data across entire network for fault tolerance -dmaps: - default: - replication: - mode: sync # Synchronous replication for durability - replica_count: 2 # Replicate to 2 backup nodes (3 total copies: 1 primary + 2 backups) - write_quorum: 2 # Require 2 nodes to acknowledge writes - read_quorum: 1 # Read from 1 node (faster reads) - read_repair: true # Enable read-repair for consistency +memberlist: + environment: local + bindAddr: "127.0.0.1" + bindPort: 3322 -# Split-brain protection -member_count_quorum: 2 # Require at least 2 nodes to operate (prevents split-brain) ` if err := os.WriteFile(configPath, []byte(configContent), 0644); err == nil { exec.Command("chown", "debros:debros", configPath).Run() @@ -1532,6 +1521,17 @@ database: cluster_sync_interval: "30s" peer_inactivity_limit: "24h" min_cluster_size: 1 + ipfs: + # IPFS Cluster API endpoint for pin management (leave empty to disable) + cluster_api_url: "http://localhost:9094" + # IPFS HTTP API endpoint for content retrieval + api_url: "http://localhost:5001" + # Timeout for IPFS operations + timeout: "60s" + # Replication factor for pinned content + replication_factor: 3 + # Enable client-side encryption before upload + enable_encryption: true discovery: bootstrap_peers: [] @@ -1607,6 +1607,17 @@ database: cluster_sync_interval: "30s" peer_inactivity_limit: "24h" min_cluster_size: 1 + ipfs: + # IPFS Cluster API endpoint for pin management (leave empty to disable) + cluster_api_url: "http://localhost:9094" + # IPFS HTTP API endpoint for content retrieval + api_url: "http://localhost:5001" + # Timeout for IPFS operations + timeout: "60s" + # Replication factor for pinned content + replication_factor: 3 + # Enable client-side encryption before upload + enable_encryption: true discovery: %s @@ -1670,13 +1681,23 @@ func generateGatewayConfigDirect(bootstrapPeers string, enableHTTPS bool, domain olricYAML.WriteString(" - \"localhost:3320\"\n") } + // IPFS Cluster configuration (defaults - can be customized later) + ipfsYAML := `# IPFS Cluster configuration (optional) +# Uncomment and configure if you have IPFS Cluster running: +# ipfs_cluster_api_url: "http://localhost:9094" +# ipfs_api_url: "http://localhost:5001" +# ipfs_timeout: "60s" +# ipfs_replication_factor: 3 +` + return fmt.Sprintf(`listen_addr: ":6001" client_namespace: "default" rqlite_dsn: "" %s %s %s -`, peersYAML.String(), httpsYAML.String(), olricYAML.String()) +%s +`, peersYAML.String(), httpsYAML.String(), olricYAML.String(), ipfsYAML) } // generateOlricConfig generates an Olric configuration file @@ -1689,30 +1710,15 @@ func generateOlricConfig(configPath, bindIP string, httpPort, memberlistPort int } var config strings.Builder + config.WriteString("server:\n") + config.WriteString(fmt.Sprintf(" bindAddr: \"%s\"\n", bindIP)) + config.WriteString(fmt.Sprintf(" bindPort: %d\n", httpPort)) + config.WriteString("\n") config.WriteString("memberlist:\n") - config.WriteString(fmt.Sprintf(" bind-addr: \"%s\"\n", bindIP)) - config.WriteString(fmt.Sprintf(" bind-port: %d\n", memberlistPort)) - config.WriteString(" # Multicast discovery enabled - peers discovered dynamically via LibP2P network\n") - - config.WriteString("client:\n") - config.WriteString(fmt.Sprintf(" bind-addr: \"%s\"\n", bindIP)) - config.WriteString(fmt.Sprintf(" bind-port: %d\n", httpPort)) - - // Durability and replication settings - config.WriteString("\n# Durability and replication configuration\n") - config.WriteString("# Replicates data across entire network for fault tolerance\n") - config.WriteString("dmaps:\n") - config.WriteString(" default:\n") - config.WriteString(" replication:\n") - config.WriteString(" mode: sync # Synchronous replication for durability\n") - config.WriteString(" replica_count: 2 # Replicate to 2 backup nodes (3 total copies: 1 primary + 2 backups)\n") - config.WriteString(" write_quorum: 2 # Require 2 nodes to acknowledge writes\n") - config.WriteString(" read_quorum: 1 # Read from 1 node (faster reads)\n") - config.WriteString(" read_repair: true # Enable read-repair for consistency\n") - - // Split-brain protection - config.WriteString("\n# Split-brain protection\n") - config.WriteString("member_count_quorum: 2 # Require at least 2 nodes to operate (prevents split-brain)\n") + config.WriteString(" environment: local\n") + config.WriteString(fmt.Sprintf(" bindAddr: \"%s\"\n", bindIP)) + config.WriteString(fmt.Sprintf(" bindPort: %d\n", memberlistPort)) + config.WriteString("\n") // Write config file if err := os.WriteFile(configPath, []byte(config.String()), 0644); err != nil { diff --git a/pkg/client/client.go b/pkg/client/client.go index a0b06dd..8a2aa45 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -35,6 +35,7 @@ type Client struct { database *DatabaseClientImpl network *NetworkInfoImpl pubsub *pubSubBridge + storage *StorageClientImpl // State connected bool @@ -70,6 +71,7 @@ func NewClient(config *ClientConfig) (NetworkClient, error) { // Initialize components (will be configured when connected) client.database = &DatabaseClientImpl{client: client} client.network = &NetworkInfoImpl{client: client} + client.storage = &StorageClientImpl{client: client} return client, nil } @@ -89,6 +91,11 @@ func (c *Client) Network() NetworkInfo { return c.network } +// Storage returns the storage client +func (c *Client) Storage() StorageClient { + return c.storage +} + // Config returns a snapshot copy of the client's configuration func (c *Client) Config() *ClientConfig { c.mu.RLock() diff --git a/pkg/client/interface.go b/pkg/client/interface.go index 328a0cd..31cdd9c 100644 --- a/pkg/client/interface.go +++ b/pkg/client/interface.go @@ -3,6 +3,7 @@ package client import ( "context" "fmt" + "io" "time" ) @@ -17,6 +18,9 @@ type NetworkClient interface { // Network information Network() NetworkInfo + // Storage operations (IPFS) + Storage() StorageClient + // Lifecycle Connect() error Disconnect() error @@ -51,6 +55,24 @@ type NetworkInfo interface { DisconnectFromPeer(ctx context.Context, peerID string) error } +// StorageClient provides IPFS storage operations +type StorageClient interface { + // Upload uploads content to IPFS and pins it + Upload(ctx context.Context, reader io.Reader, name string) (*StorageUploadResult, error) + + // Pin pins an existing CID + Pin(ctx context.Context, cid string, name string) (*StoragePinResult, error) + + // Status gets the pin status for a CID + Status(ctx context.Context, cid string) (*StorageStatus, error) + + // Get retrieves content from IPFS by CID + Get(ctx context.Context, cid string) (io.ReadCloser, error) + + // Unpin removes a pin from a CID + Unpin(ctx context.Context, cid string) error +} + // MessageHandler is called when a pub/sub message is received type MessageHandler func(topic string, data []byte) error @@ -107,12 +129,38 @@ type HealthStatus struct { ResponseTime time.Duration `json:"response_time"` } +// StorageUploadResult represents the result of uploading content to IPFS +type StorageUploadResult struct { + Cid string `json:"cid"` + Name string `json:"name"` + Size int64 `json:"size"` +} + +// StoragePinResult represents the result of pinning a CID +type StoragePinResult struct { + Cid string `json:"cid"` + Name string `json:"name"` +} + +// StorageStatus represents the status of a pinned CID +type StorageStatus struct { + Cid string `json:"cid"` + Name string `json:"name"` + Status string `json:"status"` // "pinned", "pinning", "queued", "unpinned", "error" + ReplicationMin int `json:"replication_min"` + ReplicationMax int `json:"replication_max"` + ReplicationFactor int `json:"replication_factor"` + Peers []string `json:"peers"` + Error string `json:"error,omitempty"` +} + // ClientConfig represents configuration for network clients type ClientConfig struct { AppName string `json:"app_name"` DatabaseName string `json:"database_name"` BootstrapPeers []string `json:"bootstrap_peers"` DatabaseEndpoints []string `json:"database_endpoints"` + GatewayURL string `json:"gateway_url"` // Gateway URL for HTTP API access (e.g., "http://localhost:6001") ConnectTimeout time.Duration `json:"connect_timeout"` RetryAttempts int `json:"retry_attempts"` RetryDelay time.Duration `json:"retry_delay"` @@ -132,6 +180,7 @@ func DefaultClientConfig(appName string) *ClientConfig { DatabaseName: fmt.Sprintf("%s_db", appName), BootstrapPeers: peers, DatabaseEndpoints: endpoints, + GatewayURL: "http://localhost:6001", ConnectTimeout: time.Second * 30, RetryAttempts: 3, RetryDelay: time.Second * 5, diff --git a/pkg/client/storage_client.go b/pkg/client/storage_client.go new file mode 100644 index 0000000..93cceb3 --- /dev/null +++ b/pkg/client/storage_client.go @@ -0,0 +1,245 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "strings" + "time" +) + +// StorageClientImpl implements StorageClient using HTTP requests to the gateway +type StorageClientImpl struct { + client *Client +} + +// Upload uploads content to IPFS and pins it +func (s *StorageClientImpl) Upload(ctx context.Context, reader io.Reader, name string) (*StorageUploadResult, error) { + if err := s.client.requireAccess(ctx); err != nil { + return nil, fmt.Errorf("authentication required: %w", err) + } + + gatewayURL := s.getGatewayURL() + + // Create multipart form + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + // Add file field + part, err := writer.CreateFormFile("file", name) + if err != nil { + return nil, fmt.Errorf("failed to create form file: %w", err) + } + + if _, err := io.Copy(part, reader); err != nil { + return nil, fmt.Errorf("failed to copy data: %w", err) + } + + if err := writer.Close(); err != nil { + return nil, fmt.Errorf("failed to close writer: %w", err) + } + + // Create request + req, err := http.NewRequestWithContext(ctx, "POST", gatewayURL+"/v1/storage/upload", &buf) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + s.addAuthHeaders(req) + + // Execute request + client := &http.Client{Timeout: 5 * time.Minute} // Large timeout for file uploads + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result StorageUploadResult + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &result, nil +} + +// Pin pins an existing CID +func (s *StorageClientImpl) Pin(ctx context.Context, cid string, name string) (*StoragePinResult, error) { + if err := s.client.requireAccess(ctx); err != nil { + return nil, fmt.Errorf("authentication required: %w", err) + } + + gatewayURL := s.getGatewayURL() + + reqBody := map[string]interface{}{ + "cid": cid, + } + if name != "" { + reqBody["name"] = name + } + + jsonBody, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", gatewayURL+"/v1/storage/pin", bytes.NewReader(jsonBody)) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + s.addAuthHeaders(req) + + client := &http.Client{Timeout: 60 * time.Second} + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("pin failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result StoragePinResult + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &result, nil +} + +// Status gets the pin status for a CID +func (s *StorageClientImpl) Status(ctx context.Context, cid string) (*StorageStatus, error) { + if err := s.client.requireAccess(ctx); err != nil { + return nil, fmt.Errorf("authentication required: %w", err) + } + + gatewayURL := s.getGatewayURL() + + req, err := http.NewRequestWithContext(ctx, "GET", gatewayURL+"/v1/storage/status/"+cid, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + s.addAuthHeaders(req) + + client := &http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("status failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result StorageStatus + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode response: %w", err) + } + + return &result, nil +} + +// Get retrieves content from IPFS by CID +func (s *StorageClientImpl) Get(ctx context.Context, cid string) (io.ReadCloser, error) { + if err := s.client.requireAccess(ctx); err != nil { + return nil, fmt.Errorf("authentication required: %w", err) + } + + gatewayURL := s.getGatewayURL() + + req, err := http.NewRequestWithContext(ctx, "GET", gatewayURL+"/v1/storage/get/"+cid, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + s.addAuthHeaders(req) + + client := &http.Client{Timeout: 5 * time.Minute} // Large timeout for file downloads + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, fmt.Errorf("get failed with status %d", resp.StatusCode) + } + + return resp.Body, nil +} + +// Unpin removes a pin from a CID +func (s *StorageClientImpl) Unpin(ctx context.Context, cid string) error { + if err := s.client.requireAccess(ctx); err != nil { + return fmt.Errorf("authentication required: %w", err) + } + + gatewayURL := s.getGatewayURL() + + req, err := http.NewRequestWithContext(ctx, "DELETE", gatewayURL+"/v1/storage/unpin/"+cid, nil) + if err != nil { + return fmt.Errorf("failed to create request: %w", err) + } + + s.addAuthHeaders(req) + + client := &http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unpin failed with status %d: %s", resp.StatusCode, string(body)) + } + + return nil +} + +// getGatewayURL returns the gateway URL from config, defaulting to localhost:6001 +func (s *StorageClientImpl) getGatewayURL() string { + cfg := s.client.Config() + if cfg != nil && cfg.GatewayURL != "" { + return strings.TrimSuffix(cfg.GatewayURL, "/") + } + return "http://localhost:6001" +} + +// addAuthHeaders adds authentication headers to the request +func (s *StorageClientImpl) addAuthHeaders(req *http.Request) { + cfg := s.client.Config() + if cfg == nil { + return + } + + // Prefer JWT if available + if cfg.JWT != "" { + req.Header.Set("Authorization", "Bearer "+cfg.JWT) + return + } + + // Fallback to API key + if cfg.APIKey != "" { + req.Header.Set("Authorization", "Bearer "+cfg.APIKey) + req.Header.Set("X-API-Key", cfg.APIKey) + } +} diff --git a/pkg/client/storage_client_test.go b/pkg/client/storage_client_test.go new file mode 100644 index 0000000..34127e7 --- /dev/null +++ b/pkg/client/storage_client_test.go @@ -0,0 +1,378 @@ +package client + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestStorageClientImpl_Upload(t *testing.T) { + t.Run("success", func(t *testing.T) { + expectedCID := "QmUpload123" + expectedName := "test.txt" + expectedSize := int64(100) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v1/storage/upload" { + t.Errorf("Expected path '/v1/storage/upload', got %s", r.URL.Path) + } + + // Verify multipart form + if err := r.ParseMultipartForm(32 << 20); err != nil { + t.Errorf("Failed to parse multipart form: %v", err) + return + } + + file, header, err := r.FormFile("file") + if err != nil { + t.Errorf("Failed to get file: %v", err) + return + } + defer file.Close() + + if header.Filename != expectedName { + t.Errorf("Expected filename %s, got %s", expectedName, header.Filename) + } + + response := StorageUploadResult{ + Cid: expectedCID, + Name: expectedName, + Size: expectedSize, + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + })) + defer server.Close() + + cfg := &ClientConfig{ + GatewayURL: server.URL, + AppName: "test-app", + APIKey: "ak_test:test-app", // Required for requireAccess check + } + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + reader := strings.NewReader("test content") + result, err := storage.Upload(context.Background(), reader, expectedName) + if err != nil { + t.Fatalf("Failed to upload: %v", err) + } + + if result.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, result.Cid) + } + if result.Name != expectedName { + t.Errorf("Expected name %s, got %s", expectedName, result.Name) + } + if result.Size != expectedSize { + t.Errorf("Expected size %d, got %d", expectedSize, result.Size) + } + }) + + t.Run("server_error", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("internal error")) + })) + defer server.Close() + + cfg := &ClientConfig{ + GatewayURL: server.URL, + AppName: "test-app", + } + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + reader := strings.NewReader("test") + _, err := storage.Upload(context.Background(), reader, "test.txt") + if err == nil { + t.Error("Expected error for server error") + } + }) + + t.Run("missing_credentials", func(t *testing.T) { + cfg := &ClientConfig{ + GatewayURL: "http://localhost:6001", + // No AppName, JWT, or APIKey + } + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + reader := strings.NewReader("test") + _, err := storage.Upload(context.Background(), reader, "test.txt") + if err == nil { + t.Error("Expected error for missing credentials") + } + }) +} + +func TestStorageClientImpl_Pin(t *testing.T) { + t.Run("success", func(t *testing.T) { + expectedCID := "QmPin123" + expectedName := "pinned-file" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v1/storage/pin" { + t.Errorf("Expected path '/v1/storage/pin', got %s", r.URL.Path) + } + + var reqBody map[string]interface{} + if err := json.NewDecoder(r.Body).Decode(&reqBody); err != nil { + t.Errorf("Failed to decode request: %v", err) + return + } + + if reqBody["cid"] != expectedCID { + t.Errorf("Expected CID %s, got %v", expectedCID, reqBody["cid"]) + } + + response := StoragePinResult{ + Cid: expectedCID, + Name: expectedName, + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + })) + defer server.Close() + + cfg := &ClientConfig{ + GatewayURL: server.URL, + AppName: "test-app", + APIKey: "ak_test:test-app", // Required for requireAccess check + } + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + result, err := storage.Pin(context.Background(), expectedCID, expectedName) + if err != nil { + t.Fatalf("Failed to pin: %v", err) + } + + if result.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, result.Cid) + } + if result.Name != expectedName { + t.Errorf("Expected name %s, got %s", expectedName, result.Name) + } + }) +} + +func TestStorageClientImpl_Status(t *testing.T) { + t.Run("success", func(t *testing.T) { + expectedCID := "QmStatus123" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/v1/storage/status/") { + t.Errorf("Expected path '/v1/storage/status/', got %s", r.URL.Path) + } + + response := StorageStatus{ + Cid: expectedCID, + Name: "test-file", + Status: "pinned", + ReplicationMin: 3, + ReplicationMax: 3, + ReplicationFactor: 3, + Peers: []string{"peer1", "peer2", "peer3"}, + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + })) + defer server.Close() + + cfg := &ClientConfig{ + GatewayURL: server.URL, + AppName: "test-app", + APIKey: "ak_test:test-app", // Required for requireAccess check + } + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + status, err := storage.Status(context.Background(), expectedCID) + if err != nil { + t.Fatalf("Failed to get status: %v", err) + } + + if status.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, status.Cid) + } + if status.Status != "pinned" { + t.Errorf("Expected status 'pinned', got %s", status.Status) + } + }) +} + +func TestStorageClientImpl_Get(t *testing.T) { + t.Run("success", func(t *testing.T) { + expectedCID := "QmGet123" + expectedContent := "test content" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/v1/storage/get/") { + t.Errorf("Expected path '/v1/storage/get/', got %s", r.URL.Path) + } + w.Write([]byte(expectedContent)) + })) + defer server.Close() + + cfg := &ClientConfig{ + GatewayURL: server.URL, + AppName: "test-app", + APIKey: "ak_test:test-app", // Required for requireAccess check + } + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + reader, err := storage.Get(context.Background(), expectedCID) + if err != nil { + t.Fatalf("Failed to get content: %v", err) + } + defer reader.Close() + + data, err := io.ReadAll(reader) + if err != nil { + t.Fatalf("Failed to read content: %v", err) + } + + if string(data) != expectedContent { + t.Errorf("Expected content %s, got %s", expectedContent, string(data)) + } + }) +} + +func TestStorageClientImpl_Unpin(t *testing.T) { + t.Run("success", func(t *testing.T) { + expectedCID := "QmUnpin123" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/v1/storage/unpin/") { + t.Errorf("Expected path '/v1/storage/unpin/', got %s", r.URL.Path) + } + if r.Method != "DELETE" { + t.Errorf("Expected method DELETE, got %s", r.Method) + } + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + cfg := &ClientConfig{ + GatewayURL: server.URL, + AppName: "test-app", + APIKey: "ak_test:test-app", // Required for requireAccess check + } + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + err := storage.Unpin(context.Background(), expectedCID) + if err != nil { + t.Fatalf("Failed to unpin: %v", err) + } + }) +} + +func TestStorageClientImpl_getGatewayURL(t *testing.T) { + storage := &StorageClientImpl{} + + t.Run("from_config", func(t *testing.T) { + cfg := &ClientConfig{GatewayURL: "http://custom:6001"} + client := &Client{config: cfg} + storage.client = client + + url := storage.getGatewayURL() + if url != "http://custom:6001" { + t.Errorf("Expected 'http://custom:6001', got %s", url) + } + }) + + t.Run("default", func(t *testing.T) { + cfg := &ClientConfig{} + client := &Client{config: cfg} + storage.client = client + + url := storage.getGatewayURL() + if url != "http://localhost:6001" { + t.Errorf("Expected 'http://localhost:6001', got %s", url) + } + }) + + t.Run("nil_config", func(t *testing.T) { + client := &Client{config: nil} + storage.client = client + + url := storage.getGatewayURL() + if url != "http://localhost:6001" { + t.Errorf("Expected 'http://localhost:6001', got %s", url) + } + }) +} + +func TestStorageClientImpl_addAuthHeaders(t *testing.T) { + t.Run("jwt_preferred", func(t *testing.T) { + cfg := &ClientConfig{ + JWT: "test-jwt-token", + APIKey: "test-api-key", + } + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + req := httptest.NewRequest("POST", "/test", nil) + storage.addAuthHeaders(req) + + auth := req.Header.Get("Authorization") + if auth != "Bearer test-jwt-token" { + t.Errorf("Expected JWT in Authorization header, got %s", auth) + } + }) + + t.Run("apikey_fallback", func(t *testing.T) { + cfg := &ClientConfig{ + APIKey: "test-api-key", + } + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + req := httptest.NewRequest("POST", "/test", nil) + storage.addAuthHeaders(req) + + auth := req.Header.Get("Authorization") + if auth != "Bearer test-api-key" { + t.Errorf("Expected API key in Authorization header, got %s", auth) + } + + apiKey := req.Header.Get("X-API-Key") + if apiKey != "test-api-key" { + t.Errorf("Expected API key in X-API-Key header, got %s", apiKey) + } + }) + + t.Run("no_auth", func(t *testing.T) { + cfg := &ClientConfig{} + client := &Client{config: cfg} + storage := &StorageClientImpl{client: client} + + req := httptest.NewRequest("POST", "/test", nil) + storage.addAuthHeaders(req) + + auth := req.Header.Get("Authorization") + if auth != "" { + t.Errorf("Expected no Authorization header, got %s", auth) + } + }) + + t.Run("nil_config", func(t *testing.T) { + client := &Client{config: nil} + storage := &StorageClientImpl{client: client} + + req := httptest.NewRequest("POST", "/test", nil) + storage.addAuthHeaders(req) + + auth := req.Header.Get("Authorization") + if auth != "" { + t.Errorf("Expected no Authorization header, got %s", auth) + } + }) +} diff --git a/pkg/config/config.go b/pkg/config/config.go index 4314198..4d00115 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -36,7 +36,7 @@ type DatabaseConfig struct { RQLitePort int `yaml:"rqlite_port"` // RQLite HTTP API port RQLiteRaftPort int `yaml:"rqlite_raft_port"` // RQLite Raft consensus port RQLiteJoinAddress string `yaml:"rqlite_join_address"` // Address to join RQLite cluster - + // Dynamic discovery configuration (always enabled) ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s PeerInactivityLimit time.Duration `yaml:"peer_inactivity_limit"` // default: 24h @@ -45,6 +45,32 @@ type DatabaseConfig struct { // Olric cache configuration OlricHTTPPort int `yaml:"olric_http_port"` // Olric HTTP API port (default: 3320) OlricMemberlistPort int `yaml:"olric_memberlist_port"` // Olric memberlist port (default: 3322) + + // IPFS storage configuration + IPFS IPFSConfig `yaml:"ipfs"` +} + +// IPFSConfig contains IPFS storage configuration +type IPFSConfig struct { + // ClusterAPIURL is the IPFS Cluster HTTP API URL (e.g., "http://localhost:9094") + // If empty, IPFS storage is disabled for this node + ClusterAPIURL string `yaml:"cluster_api_url"` + + // APIURL is the IPFS HTTP API URL for content retrieval (e.g., "http://localhost:5001") + // If empty, defaults to "http://localhost:5001" + APIURL string `yaml:"api_url"` + + // Timeout for IPFS operations + // If zero, defaults to 60 seconds + Timeout time.Duration `yaml:"timeout"` + + // ReplicationFactor is the replication factor for pinned content + // If zero, defaults to 3 + ReplicationFactor int `yaml:"replication_factor"` + + // EnableEncryption enables client-side encryption before upload + // Defaults to true + EnableEncryption bool `yaml:"enable_encryption"` } // DiscoveryConfig contains peer discovery configuration @@ -115,7 +141,7 @@ func DefaultConfig() *Config { RQLitePort: 5001, RQLiteRaftPort: 7001, RQLiteJoinAddress: "", // Empty for bootstrap node - + // Dynamic discovery (always enabled) ClusterSyncInterval: 30 * time.Second, PeerInactivityLimit: 24 * time.Hour, @@ -124,6 +150,15 @@ func DefaultConfig() *Config { // Olric cache configuration OlricHTTPPort: 3320, OlricMemberlistPort: 3322, + + // IPFS storage configuration + IPFS: IPFSConfig{ + ClusterAPIURL: "", // Empty = disabled + APIURL: "http://localhost:5001", + Timeout: 60 * time.Second, + ReplicationFactor: 3, + EnableEncryption: true, + }, }, Discovery: DiscoveryConfig{ BootstrapPeers: []string{}, diff --git a/pkg/gateway/gateway.go b/pkg/gateway/gateway.go index f941c42..fc2dce1 100644 --- a/pkg/gateway/gateway.go +++ b/pkg/gateway/gateway.go @@ -6,11 +6,16 @@ import ( "crypto/rsa" "database/sql" "net" + "os" + "path/filepath" "strconv" + "strings" "sync" "time" "github.com/DeBrosOfficial/network/pkg/client" + "github.com/DeBrosOfficial/network/pkg/config" + "github.com/DeBrosOfficial/network/pkg/ipfs" "github.com/DeBrosOfficial/network/pkg/logging" "github.com/DeBrosOfficial/network/pkg/olric" "github.com/DeBrosOfficial/network/pkg/rqlite" @@ -38,6 +43,13 @@ type Config struct { // Olric cache configuration OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"] OlricTimeout time.Duration // Timeout for Olric operations (default: 10s) + + // IPFS Cluster configuration + IPFSClusterAPIURL string // IPFS Cluster HTTP API URL (e.g., "http://localhost:9094"). If empty, gateway will discover from node configs + IPFSAPIURL string // IPFS HTTP API URL for content retrieval (e.g., "http://localhost:5001"). If empty, gateway will discover from node configs + IPFSTimeout time.Duration // Timeout for IPFS operations (default: 60s) + IPFSReplicationFactor int // Replication factor for pins (default: 3) + IPFSEnableEncryption bool // Enable client-side encryption before upload (default: true, discovered from node configs) } type Gateway struct { @@ -56,6 +68,9 @@ type Gateway struct { // Olric cache client olricClient *olric.Client + // IPFS storage client + ipfsClient ipfs.IPFSClient + // Local pub/sub bypass for same-gateway subscribers localSubscribers map[string][]*localSubscriber // topic+namespace -> subscribers mu sync.RWMutex @@ -178,6 +193,80 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) { ) } + logger.ComponentInfo(logging.ComponentGeneral, "Initializing IPFS Cluster client...") + + // Discover IPFS endpoints from node configs if not explicitly configured + ipfsClusterURL := cfg.IPFSClusterAPIURL + ipfsAPIURL := cfg.IPFSAPIURL + ipfsTimeout := cfg.IPFSTimeout + ipfsReplicationFactor := cfg.IPFSReplicationFactor + ipfsEnableEncryption := cfg.IPFSEnableEncryption + + if ipfsClusterURL == "" { + logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster URL not configured, discovering from node configs...") + discovered := discoverIPFSFromNodeConfigs(logger.Logger) + if discovered.clusterURL != "" { + ipfsClusterURL = discovered.clusterURL + ipfsAPIURL = discovered.apiURL + if discovered.timeout > 0 { + ipfsTimeout = discovered.timeout + } + if discovered.replicationFactor > 0 { + ipfsReplicationFactor = discovered.replicationFactor + } + ipfsEnableEncryption = discovered.enableEncryption + logger.ComponentInfo(logging.ComponentGeneral, "Discovered IPFS endpoints from node configs", + zap.String("cluster_url", ipfsClusterURL), + zap.String("api_url", ipfsAPIURL), + zap.Bool("encryption_enabled", ipfsEnableEncryption)) + } else { + // Fallback to localhost defaults + ipfsClusterURL = "http://localhost:9094" + ipfsAPIURL = "http://localhost:5001" + ipfsEnableEncryption = true // Default to true + logger.ComponentInfo(logging.ComponentGeneral, "No IPFS config found in node configs, using localhost defaults") + } + } + + if ipfsAPIURL == "" { + ipfsAPIURL = "http://localhost:5001" + } + if ipfsTimeout == 0 { + ipfsTimeout = 60 * time.Second + } + if ipfsReplicationFactor == 0 { + ipfsReplicationFactor = 3 + } + if !cfg.IPFSEnableEncryption && !ipfsEnableEncryption { + // Only disable if explicitly set to false in both places + ipfsEnableEncryption = false + } else { + // Default to true if not explicitly disabled + ipfsEnableEncryption = true + } + + ipfsCfg := ipfs.Config{ + ClusterAPIURL: ipfsClusterURL, + Timeout: ipfsTimeout, + } + ipfsClient, ipfsErr := ipfs.NewClient(ipfsCfg, logger.Logger) + if ipfsErr != nil { + logger.ComponentWarn(logging.ComponentGeneral, "failed to initialize IPFS Cluster client; storage endpoints disabled", zap.Error(ipfsErr)) + } else { + gw.ipfsClient = ipfsClient + logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster client ready", + zap.String("cluster_api_url", ipfsCfg.ClusterAPIURL), + zap.String("ipfs_api_url", ipfsAPIURL), + zap.Duration("timeout", ipfsCfg.Timeout), + zap.Int("replication_factor", ipfsReplicationFactor), + zap.Bool("encryption_enabled", ipfsEnableEncryption), + ) + } + // Store IPFS settings in gateway for use by handlers + gw.cfg.IPFSAPIURL = ipfsAPIURL + gw.cfg.IPFSReplicationFactor = ipfsReplicationFactor + gw.cfg.IPFSEnableEncryption = ipfsEnableEncryption + logger.ComponentInfo(logging.ComponentGeneral, "Gateway creation completed, returning...") return gw, nil } @@ -204,6 +293,13 @@ func (g *Gateway) Close() { g.logger.ComponentWarn(logging.ComponentGeneral, "error during Olric client close", zap.Error(err)) } } + if g.ipfsClient != nil { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := g.ipfsClient.Close(ctx); err != nil { + g.logger.ComponentWarn(logging.ComponentGeneral, "error during IPFS client close", zap.Error(err)) + } + } } // getLocalSubscribers returns all local subscribers for a given topic and namespace @@ -307,3 +403,77 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger return olricServers } + +// ipfsDiscoveryResult holds discovered IPFS configuration +type ipfsDiscoveryResult struct { + clusterURL string + apiURL string + timeout time.Duration + replicationFactor int + enableEncryption bool +} + +// discoverIPFSFromNodeConfigs discovers IPFS configuration from node.yaml files +// Checks bootstrap.yaml first, then node.yaml, node2.yaml, etc. +func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult { + homeDir, err := os.UserHomeDir() + if err != nil { + logger.Debug("Failed to get home directory for IPFS discovery", zap.Error(err)) + return ipfsDiscoveryResult{} + } + + configDir := filepath.Join(homeDir, ".debros") + + // Try bootstrap.yaml first, then node.yaml, node2.yaml, etc. + configFiles := []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"} + + for _, filename := range configFiles { + configPath := filepath.Join(configDir, filename) + data, err := os.ReadFile(configPath) + if err != nil { + continue + } + + var nodeCfg config.Config + if err := config.DecodeStrict(strings.NewReader(string(data)), &nodeCfg); err != nil { + logger.Debug("Failed to parse node config for IPFS discovery", + zap.String("file", filename), zap.Error(err)) + continue + } + + // Check if IPFS is configured + if nodeCfg.Database.IPFS.ClusterAPIURL != "" { + result := ipfsDiscoveryResult{ + clusterURL: nodeCfg.Database.IPFS.ClusterAPIURL, + apiURL: nodeCfg.Database.IPFS.APIURL, + timeout: nodeCfg.Database.IPFS.Timeout, + replicationFactor: nodeCfg.Database.IPFS.ReplicationFactor, + enableEncryption: nodeCfg.Database.IPFS.EnableEncryption, + } + + if result.apiURL == "" { + result.apiURL = "http://localhost:5001" + } + if result.timeout == 0 { + result.timeout = 60 * time.Second + } + if result.replicationFactor == 0 { + result.replicationFactor = 3 + } + // Default encryption to true if not set + if !result.enableEncryption { + result.enableEncryption = true + } + + logger.Info("Discovered IPFS config from node config", + zap.String("file", filename), + zap.String("cluster_url", result.clusterURL), + zap.String("api_url", result.apiURL), + zap.Bool("encryption_enabled", result.enableEncryption)) + + return result + } + } + + return ipfsDiscoveryResult{} +} diff --git a/pkg/gateway/middleware_test.go b/pkg/gateway/middleware_test.go index 51e64e7..91e2b5a 100644 --- a/pkg/gateway/middleware_test.go +++ b/pkg/gateway/middleware_test.go @@ -26,12 +26,3 @@ func TestExtractAPIKey(t *testing.T) { t.Fatalf("got %q", got) } } - -func TestValidateNamespaceParam(t *testing.T) { - g := &Gateway{} - r := httptest.NewRequest(http.MethodGet, "/v1/storage/get?namespace=ns1&key=k", nil) - // no context namespace: should be false - if g.validateNamespaceParam(r) { - t.Fatalf("expected false without context ns") - } -} diff --git a/pkg/gateway/routes.go b/pkg/gateway/routes.go index 25d09a0..7ab103a 100644 --- a/pkg/gateway/routes.go +++ b/pkg/gateway/routes.go @@ -54,5 +54,12 @@ func (g *Gateway) Routes() http.Handler { mux.HandleFunc("/v1/cache/delete", g.cacheDeleteHandler) mux.HandleFunc("/v1/cache/scan", g.cacheScanHandler) + // storage endpoints (IPFS) + mux.HandleFunc("/v1/storage/upload", g.storageUploadHandler) + mux.HandleFunc("/v1/storage/pin", g.storagePinHandler) + mux.HandleFunc("/v1/storage/status/", g.storageStatusHandler) + mux.HandleFunc("/v1/storage/get/", g.storageGetHandler) + mux.HandleFunc("/v1/storage/unpin/", g.storageUnpinHandler) + return g.withMiddleware(mux) } diff --git a/pkg/gateway/storage_handlers.go b/pkg/gateway/storage_handlers.go index 3c283e1..13269e1 100644 --- a/pkg/gateway/storage_handlers.go +++ b/pkg/gateway/storage_handlers.go @@ -1,13 +1,338 @@ package gateway import ( + "bytes" + "context" + "encoding/base64" "encoding/json" + "fmt" + "io" "net/http" + "strings" "github.com/DeBrosOfficial/network/pkg/client" + "github.com/DeBrosOfficial/network/pkg/logging" + "go.uber.org/zap" ) -// Database HTTP handlers +// StorageUploadRequest represents a request to upload content to IPFS +type StorageUploadRequest struct { + Name string `json:"name,omitempty"` + Data string `json:"data,omitempty"` // Base64 encoded data (alternative to multipart) +} + +// StorageUploadResponse represents the response from uploading content +type StorageUploadResponse struct { + Cid string `json:"cid"` + Name string `json:"name"` + Size int64 `json:"size"` +} + +// StoragePinRequest represents a request to pin a CID +type StoragePinRequest struct { + Cid string `json:"cid"` + Name string `json:"name,omitempty"` +} + +// StoragePinResponse represents the response from pinning a CID +type StoragePinResponse struct { + Cid string `json:"cid"` + Name string `json:"name"` +} + +// StorageStatusResponse represents the status of a pinned CID +type StorageStatusResponse struct { + Cid string `json:"cid"` + Name string `json:"name"` + Status string `json:"status"` + ReplicationMin int `json:"replication_min"` + ReplicationMax int `json:"replication_max"` + ReplicationFactor int `json:"replication_factor"` + Peers []string `json:"peers"` + Error string `json:"error,omitempty"` +} + +// storageUploadHandler handles POST /v1/storage/upload +func (g *Gateway) storageUploadHandler(w http.ResponseWriter, r *http.Request) { + if g.ipfsClient == nil { + writeError(w, http.StatusServiceUnavailable, "IPFS storage not available") + return + } + + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + // Get namespace from context + namespace := g.getNamespaceFromContext(r.Context()) + if namespace == "" { + writeError(w, http.StatusUnauthorized, "namespace required") + return + } + + // Get replication factor from config (default: 3) + replicationFactor := g.cfg.IPFSReplicationFactor + if replicationFactor == 0 { + replicationFactor = 3 + } + + // Check if it's multipart/form-data or JSON + contentType := r.Header.Get("Content-Type") + var reader io.Reader + var name string + + if strings.HasPrefix(contentType, "multipart/form-data") { + // Handle multipart upload + if err := r.ParseMultipartForm(32 << 20); err != nil { // 32MB max + writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to parse multipart form: %v", err)) + return + } + + file, header, err := r.FormFile("file") + if err != nil { + writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to get file: %v", err)) + return + } + defer file.Close() + + reader = file + name = header.Filename + } else { + // Handle JSON request with base64 data + var req StorageUploadRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to decode request: %v", err)) + return + } + + if req.Data == "" { + writeError(w, http.StatusBadRequest, "data field required") + return + } + + // Decode base64 data + data, err := base64Decode(req.Data) + if err != nil { + writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to decode base64 data: %v", err)) + return + } + + reader = bytes.NewReader(data) + name = req.Name + } + + // Add to IPFS + ctx := r.Context() + addResp, err := g.ipfsClient.Add(ctx, reader, name) + if err != nil { + g.logger.ComponentError(logging.ComponentGeneral, "failed to add content to IPFS", zap.Error(err)) + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to add content: %v", err)) + return + } + + // Pin with replication factor + _, err = g.ipfsClient.Pin(ctx, addResp.Cid, name, replicationFactor) + if err != nil { + g.logger.ComponentWarn(logging.ComponentGeneral, "failed to pin content", zap.Error(err), zap.String("cid", addResp.Cid)) + // Still return success, but log the pin failure + } + + response := StorageUploadResponse{ + Cid: addResp.Cid, + Name: addResp.Name, + Size: addResp.Size, + } + + writeJSON(w, http.StatusOK, response) +} + +// storagePinHandler handles POST /v1/storage/pin +func (g *Gateway) storagePinHandler(w http.ResponseWriter, r *http.Request) { + if g.ipfsClient == nil { + writeError(w, http.StatusServiceUnavailable, "IPFS storage not available") + return + } + + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + var req StoragePinRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to decode request: %v", err)) + return + } + + if req.Cid == "" { + writeError(w, http.StatusBadRequest, "cid required") + return + } + + // Get replication factor from config (default: 3) + replicationFactor := g.cfg.IPFSReplicationFactor + if replicationFactor == 0 { + replicationFactor = 3 + } + + ctx := r.Context() + pinResp, err := g.ipfsClient.Pin(ctx, req.Cid, req.Name, replicationFactor) + if err != nil { + g.logger.ComponentError(logging.ComponentGeneral, "failed to pin CID", zap.Error(err), zap.String("cid", req.Cid)) + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to pin: %v", err)) + return + } + + // Use name from request if response doesn't have it + name := pinResp.Name + if name == "" { + name = req.Name + } + + response := StoragePinResponse{ + Cid: pinResp.Cid, + Name: name, + } + + writeJSON(w, http.StatusOK, response) +} + +// storageStatusHandler handles GET /v1/storage/status/:cid +func (g *Gateway) storageStatusHandler(w http.ResponseWriter, r *http.Request) { + if g.ipfsClient == nil { + writeError(w, http.StatusServiceUnavailable, "IPFS storage not available") + return + } + + if r.Method != http.MethodGet { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + // Extract CID from path + path := strings.TrimPrefix(r.URL.Path, "/v1/storage/status/") + if path == "" { + writeError(w, http.StatusBadRequest, "cid required") + return + } + + ctx := r.Context() + status, err := g.ipfsClient.PinStatus(ctx, path) + if err != nil { + g.logger.ComponentError(logging.ComponentGeneral, "failed to get pin status", zap.Error(err), zap.String("cid", path)) + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get status: %v", err)) + return + } + + response := StorageStatusResponse{ + Cid: status.Cid, + Name: status.Name, + Status: status.Status, + ReplicationMin: status.ReplicationMin, + ReplicationMax: status.ReplicationMax, + ReplicationFactor: status.ReplicationFactor, + Peers: status.Peers, + Error: status.Error, + } + + writeJSON(w, http.StatusOK, response) +} + +// storageGetHandler handles GET /v1/storage/get/:cid +func (g *Gateway) storageGetHandler(w http.ResponseWriter, r *http.Request) { + if g.ipfsClient == nil { + writeError(w, http.StatusServiceUnavailable, "IPFS storage not available") + return + } + + if r.Method != http.MethodGet { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + // Extract CID from path + path := strings.TrimPrefix(r.URL.Path, "/v1/storage/get/") + if path == "" { + writeError(w, http.StatusBadRequest, "cid required") + return + } + + // Get namespace from context + namespace := g.getNamespaceFromContext(r.Context()) + if namespace == "" { + writeError(w, http.StatusUnauthorized, "namespace required") + return + } + + // Get IPFS API URL from config + ipfsAPIURL := g.cfg.IPFSAPIURL + if ipfsAPIURL == "" { + ipfsAPIURL = "http://localhost:5001" + } + + ctx := r.Context() + reader, err := g.ipfsClient.Get(ctx, path, ipfsAPIURL) + if err != nil { + g.logger.ComponentError(logging.ComponentGeneral, "failed to get content from IPFS", zap.Error(err), zap.String("cid", path)) + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get content: %v", err)) + return + } + defer reader.Close() + + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", path)) + + if _, err := io.Copy(w, reader); err != nil { + g.logger.ComponentError(logging.ComponentGeneral, "failed to write content", zap.Error(err)) + } +} + +// storageUnpinHandler handles DELETE /v1/storage/unpin/:cid +func (g *Gateway) storageUnpinHandler(w http.ResponseWriter, r *http.Request) { + if g.ipfsClient == nil { + writeError(w, http.StatusServiceUnavailable, "IPFS storage not available") + return + } + + if r.Method != http.MethodDelete { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + // Extract CID from path + path := strings.TrimPrefix(r.URL.Path, "/v1/storage/unpin/") + if path == "" { + writeError(w, http.StatusBadRequest, "cid required") + return + } + + ctx := r.Context() + if err := g.ipfsClient.Unpin(ctx, path); err != nil { + g.logger.ComponentError(logging.ComponentGeneral, "failed to unpin CID", zap.Error(err), zap.String("cid", path)) + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to unpin: %v", err)) + return + } + + writeJSON(w, http.StatusOK, map[string]any{"status": "ok", "cid": path}) +} + +// base64Decode decodes base64 string to bytes +func base64Decode(s string) ([]byte, error) { + return base64.StdEncoding.DecodeString(s) +} + +// getNamespaceFromContext extracts namespace from request context +func (g *Gateway) getNamespaceFromContext(ctx context.Context) string { + if v := ctx.Value(ctxKeyNamespaceOverride); v != nil { + if s, ok := v.(string); ok && s != "" { + return s + } + } + return "" +} + +// Network HTTP handlers func (g *Gateway) networkStatusHandler(w http.ResponseWriter, r *http.Request) { if g.client == nil { @@ -84,17 +409,3 @@ func (g *Gateway) networkDisconnectHandler(w http.ResponseWriter, r *http.Reques } writeJSON(w, http.StatusOK, map[string]any{"status": "ok"}) } - -func (g *Gateway) validateNamespaceParam(r *http.Request) bool { - qns := r.URL.Query().Get("namespace") - if qns == "" { - return true - } - if v := r.Context().Value(ctxKeyNamespaceOverride); v != nil { - if s, ok := v.(string); ok && s != "" { - return s == qns - } - } - // If no namespace in context, disallow explicit namespace param - return false -} diff --git a/pkg/gateway/storage_handlers_test.go b/pkg/gateway/storage_handlers_test.go new file mode 100644 index 0000000..30dd839 --- /dev/null +++ b/pkg/gateway/storage_handlers_test.go @@ -0,0 +1,554 @@ +package gateway + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "io" + "mime/multipart" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/DeBrosOfficial/network/pkg/ipfs" + "github.com/DeBrosOfficial/network/pkg/logging" +) + +// mockIPFSClient is a mock implementation of ipfs.IPFSClient for testing +type mockIPFSClient struct { + addFunc func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) + pinFunc func(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error) + pinStatusFunc func(ctx context.Context, cid string) (*ipfs.PinStatus, error) + getFunc func(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) + unpinFunc func(ctx context.Context, cid string) error +} + +func (m *mockIPFSClient) Add(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) { + if m.addFunc != nil { + return m.addFunc(ctx, reader, name) + } + return &ipfs.AddResponse{Cid: "QmTest123", Name: name, Size: 100}, nil +} + +func (m *mockIPFSClient) Pin(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error) { + if m.pinFunc != nil { + return m.pinFunc(ctx, cid, name, replicationFactor) + } + return &ipfs.PinResponse{Cid: cid, Name: name}, nil +} + +func (m *mockIPFSClient) PinStatus(ctx context.Context, cid string) (*ipfs.PinStatus, error) { + if m.pinStatusFunc != nil { + return m.pinStatusFunc(ctx, cid) + } + return &ipfs.PinStatus{ + Cid: cid, + Name: "test", + Status: "pinned", + ReplicationMin: 3, + ReplicationMax: 3, + ReplicationFactor: 3, + Peers: []string{"peer1", "peer2", "peer3"}, + }, nil +} + +func (m *mockIPFSClient) Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) { + if m.getFunc != nil { + return m.getFunc(ctx, cid, ipfsAPIURL) + } + return io.NopCloser(strings.NewReader("test content")), nil +} + +func (m *mockIPFSClient) Unpin(ctx context.Context, cid string) error { + if m.unpinFunc != nil { + return m.unpinFunc(ctx, cid) + } + return nil +} + +func (m *mockIPFSClient) Health(ctx context.Context) error { + return nil +} + +func (m *mockIPFSClient) Close(ctx context.Context) error { + return nil +} + +func newTestGatewayWithIPFS(t *testing.T, ipfsClient ipfs.IPFSClient) *Gateway { + logger, err := logging.NewColoredLogger(logging.ComponentGeneral, true) + if err != nil { + t.Fatalf("Failed to create logger: %v", err) + } + + cfg := &Config{ + ListenAddr: ":6001", + ClientNamespace: "test", + IPFSReplicationFactor: 3, + IPFSEnableEncryption: true, + IPFSAPIURL: "http://localhost:5001", + } + + gw := &Gateway{ + logger: logger, + cfg: cfg, + } + + if ipfsClient != nil { + gw.ipfsClient = ipfsClient + } + + return gw +} + +func TestStorageUploadHandler_MissingIPFSClient(t *testing.T) { + gw := newTestGatewayWithIPFS(t, nil) + + req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", nil) + ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns") + req = req.WithContext(ctx) + w := httptest.NewRecorder() + + gw.storageUploadHandler(w, req) + + if w.Code != http.StatusServiceUnavailable { + t.Errorf("Expected status %d, got %d", http.StatusServiceUnavailable, w.Code) + } +} + +func TestStorageUploadHandler_MethodNotAllowed(t *testing.T) { + gw := newTestGatewayWithIPFS(t, &mockIPFSClient{}) + + req := httptest.NewRequest(http.MethodGet, "/v1/storage/upload", nil) + ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns") + req = req.WithContext(ctx) + w := httptest.NewRecorder() + + gw.storageUploadHandler(w, req) + + if w.Code != http.StatusMethodNotAllowed { + t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, w.Code) + } +} + +func TestStorageUploadHandler_MissingNamespace(t *testing.T) { + gw := newTestGatewayWithIPFS(t, &mockIPFSClient{}) + + req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", nil) + w := httptest.NewRecorder() + + gw.storageUploadHandler(w, req) + + if w.Code != http.StatusUnauthorized { + t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, w.Code) + } +} + +func TestStorageUploadHandler_MultipartUpload(t *testing.T) { + expectedCID := "QmTest456" + expectedName := "test.txt" + expectedSize := int64(200) + + mockClient := &mockIPFSClient{ + addFunc: func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) { + // Read and verify content + data, _ := io.ReadAll(reader) + if len(data) == 0 { + return nil, io.ErrUnexpectedEOF + } + return &ipfs.AddResponse{ + Cid: expectedCID, + Name: name, + Size: expectedSize, + }, nil + }, + } + + gw := newTestGatewayWithIPFS(t, mockClient) + + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + part, _ := writer.CreateFormFile("file", expectedName) + part.Write([]byte("test file content")) + writer.Close() + + req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", &buf) + req.Header.Set("Content-Type", writer.FormDataContentType()) + ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns") + req = req.WithContext(ctx) + w := httptest.NewRecorder() + + gw.storageUploadHandler(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var resp StorageUploadResponse + if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { + t.Fatalf("Failed to decode response: %v", err) + } + + if resp.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid) + } + if resp.Name != expectedName { + t.Errorf("Expected name %s, got %s", expectedName, resp.Name) + } + if resp.Size != expectedSize { + t.Errorf("Expected size %d, got %d", expectedSize, resp.Size) + } +} + +func TestStorageUploadHandler_JSONUpload(t *testing.T) { + expectedCID := "QmTest789" + expectedName := "test.json" + testData := []byte("test json data") + base64Data := base64.StdEncoding.EncodeToString(testData) + + mockClient := &mockIPFSClient{ + addFunc: func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) { + data, _ := io.ReadAll(reader) + if string(data) != string(testData) { + return nil, io.ErrUnexpectedEOF + } + return &ipfs.AddResponse{ + Cid: expectedCID, + Name: name, + Size: int64(len(testData)), + }, nil + }, + } + + gw := newTestGatewayWithIPFS(t, mockClient) + + reqBody := StorageUploadRequest{ + Name: expectedName, + Data: base64Data, + } + bodyBytes, _ := json.Marshal(reqBody) + + req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", bytes.NewReader(bodyBytes)) + req.Header.Set("Content-Type", "application/json") + ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns") + req = req.WithContext(ctx) + w := httptest.NewRecorder() + + gw.storageUploadHandler(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var resp StorageUploadResponse + if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { + t.Fatalf("Failed to decode response: %v", err) + } + + if resp.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid) + } +} + +func TestStorageUploadHandler_InvalidBase64(t *testing.T) { + gw := newTestGatewayWithIPFS(t, &mockIPFSClient{}) + + reqBody := StorageUploadRequest{ + Name: "test.txt", + Data: "invalid base64!!!", + } + bodyBytes, _ := json.Marshal(reqBody) + + req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", bytes.NewReader(bodyBytes)) + req.Header.Set("Content-Type", "application/json") + ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns") + req = req.WithContext(ctx) + w := httptest.NewRecorder() + + gw.storageUploadHandler(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code) + } +} + +func TestStorageUploadHandler_IPFSError(t *testing.T) { + mockClient := &mockIPFSClient{ + addFunc: func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) { + return nil, io.ErrUnexpectedEOF + }, + } + + gw := newTestGatewayWithIPFS(t, mockClient) + + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + part, _ := writer.CreateFormFile("file", "test.txt") + part.Write([]byte("test")) + writer.Close() + + req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", &buf) + req.Header.Set("Content-Type", writer.FormDataContentType()) + ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns") + req = req.WithContext(ctx) + w := httptest.NewRecorder() + + gw.storageUploadHandler(w, req) + + if w.Code != http.StatusInternalServerError { + t.Errorf("Expected status %d, got %d", http.StatusInternalServerError, w.Code) + } +} + +func TestStoragePinHandler_Success(t *testing.T) { + expectedCID := "QmPin123" + expectedName := "pinned-file" + + mockClient := &mockIPFSClient{ + pinFunc: func(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error) { + if cid != expectedCID { + return nil, io.ErrUnexpectedEOF + } + if replicationFactor != 3 { + return nil, io.ErrUnexpectedEOF + } + return &ipfs.PinResponse{Cid: cid, Name: name}, nil + }, + } + + gw := newTestGatewayWithIPFS(t, mockClient) + + reqBody := StoragePinRequest{ + Cid: expectedCID, + Name: expectedName, + } + bodyBytes, _ := json.Marshal(reqBody) + + req := httptest.NewRequest(http.MethodPost, "/v1/storage/pin", bytes.NewReader(bodyBytes)) + w := httptest.NewRecorder() + + gw.storagePinHandler(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var resp StoragePinResponse + if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { + t.Fatalf("Failed to decode response: %v", err) + } + + if resp.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid) + } + if resp.Name != expectedName { + t.Errorf("Expected name %s, got %s", expectedName, resp.Name) + } +} + +func TestStoragePinHandler_MissingCID(t *testing.T) { + gw := newTestGatewayWithIPFS(t, &mockIPFSClient{}) + + reqBody := StoragePinRequest{} + bodyBytes, _ := json.Marshal(reqBody) + + req := httptest.NewRequest(http.MethodPost, "/v1/storage/pin", bytes.NewReader(bodyBytes)) + w := httptest.NewRecorder() + + gw.storagePinHandler(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code) + } +} + +func TestStorageStatusHandler_Success(t *testing.T) { + expectedCID := "QmStatus123" + mockClient := &mockIPFSClient{ + pinStatusFunc: func(ctx context.Context, cid string) (*ipfs.PinStatus, error) { + return &ipfs.PinStatus{ + Cid: cid, + Name: "test-file", + Status: "pinned", + ReplicationMin: 3, + ReplicationMax: 3, + ReplicationFactor: 3, + Peers: []string{"peer1", "peer2", "peer3"}, + }, nil + }, + } + + gw := newTestGatewayWithIPFS(t, mockClient) + + req := httptest.NewRequest(http.MethodGet, "/v1/storage/status/"+expectedCID, nil) + w := httptest.NewRecorder() + + gw.storageStatusHandler(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var resp StorageStatusResponse + if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { + t.Fatalf("Failed to decode response: %v", err) + } + + if resp.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid) + } + if resp.Status != "pinned" { + t.Errorf("Expected status 'pinned', got %s", resp.Status) + } + if resp.ReplicationFactor != 3 { + t.Errorf("Expected replication factor 3, got %d", resp.ReplicationFactor) + } +} + +func TestStorageStatusHandler_MissingCID(t *testing.T) { + gw := newTestGatewayWithIPFS(t, &mockIPFSClient{}) + + req := httptest.NewRequest(http.MethodGet, "/v1/storage/status/", nil) + w := httptest.NewRecorder() + + gw.storageStatusHandler(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code) + } +} + +func TestStorageGetHandler_Success(t *testing.T) { + expectedCID := "QmGet123" + expectedContent := "test content from IPFS" + + mockClient := &mockIPFSClient{ + getFunc: func(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) { + if cid != expectedCID { + return nil, io.ErrUnexpectedEOF + } + return io.NopCloser(strings.NewReader(expectedContent)), nil + }, + } + + gw := newTestGatewayWithIPFS(t, mockClient) + + req := httptest.NewRequest(http.MethodGet, "/v1/storage/get/"+expectedCID, nil) + ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns") + req = req.WithContext(ctx) + w := httptest.NewRecorder() + + gw.storageGetHandler(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + if w.Body.String() != expectedContent { + t.Errorf("Expected content %s, got %s", expectedContent, w.Body.String()) + } + + if w.Header().Get("Content-Type") != "application/octet-stream" { + t.Errorf("Expected Content-Type 'application/octet-stream', got %s", w.Header().Get("Content-Type")) + } +} + +func TestStorageGetHandler_MissingNamespace(t *testing.T) { + gw := newTestGatewayWithIPFS(t, &mockIPFSClient{}) + + req := httptest.NewRequest(http.MethodGet, "/v1/storage/get/QmTest123", nil) + w := httptest.NewRecorder() + + gw.storageGetHandler(w, req) + + if w.Code != http.StatusUnauthorized { + t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, w.Code) + } +} + +func TestStorageUnpinHandler_Success(t *testing.T) { + expectedCID := "QmUnpin123" + + mockClient := &mockIPFSClient{ + unpinFunc: func(ctx context.Context, cid string) error { + if cid != expectedCID { + return io.ErrUnexpectedEOF + } + return nil + }, + } + + gw := newTestGatewayWithIPFS(t, mockClient) + + req := httptest.NewRequest(http.MethodDelete, "/v1/storage/unpin/"+expectedCID, nil) + w := httptest.NewRecorder() + + gw.storageUnpinHandler(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var resp map[string]any + if err := json.NewDecoder(w.Body).Decode(&resp); err != nil { + t.Fatalf("Failed to decode response: %v", err) + } + + if resp["cid"] != expectedCID { + t.Errorf("Expected CID %s, got %v", expectedCID, resp["cid"]) + } +} + +func TestStorageUnpinHandler_MissingCID(t *testing.T) { + gw := newTestGatewayWithIPFS(t, &mockIPFSClient{}) + + req := httptest.NewRequest(http.MethodDelete, "/v1/storage/unpin/", nil) + w := httptest.NewRecorder() + + gw.storageUnpinHandler(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code) + } +} + +// Test helper functions + +func TestBase64Decode(t *testing.T) { + testData := []byte("test data") + encoded := base64.StdEncoding.EncodeToString(testData) + + decoded, err := base64Decode(encoded) + if err != nil { + t.Fatalf("Failed to decode: %v", err) + } + + if string(decoded) != string(testData) { + t.Errorf("Expected %s, got %s", string(testData), string(decoded)) + } + + // Test invalid base64 + _, err = base64Decode("invalid!!!") + if err == nil { + t.Error("Expected error for invalid base64") + } +} + +func TestGetNamespaceFromContext(t *testing.T) { + gw := newTestGatewayWithIPFS(t, nil) + + // Test with namespace in context + ctx := context.WithValue(context.Background(), ctxKeyNamespaceOverride, "test-ns") + ns := gw.getNamespaceFromContext(ctx) + if ns != "test-ns" { + t.Errorf("Expected 'test-ns', got %s", ns) + } + + // Test without namespace + ctx2 := context.Background() + ns2 := gw.getNamespaceFromContext(ctx2) + if ns2 != "" { + t.Errorf("Expected empty namespace, got %s", ns2) + } +} diff --git a/pkg/ipfs/client.go b/pkg/ipfs/client.go new file mode 100644 index 0000000..b415fd0 --- /dev/null +++ b/pkg/ipfs/client.go @@ -0,0 +1,345 @@ +package ipfs + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "time" + + "go.uber.org/zap" +) + +// IPFSClient defines the interface for IPFS operations +type IPFSClient interface { + Add(ctx context.Context, reader io.Reader, name string) (*AddResponse, error) + Pin(ctx context.Context, cid string, name string, replicationFactor int) (*PinResponse, error) + PinStatus(ctx context.Context, cid string) (*PinStatus, error) + Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) + Unpin(ctx context.Context, cid string) error + Health(ctx context.Context) error + Close(ctx context.Context) error +} + +// Client wraps an IPFS Cluster HTTP API client for storage operations +type Client struct { + apiURL string + httpClient *http.Client + logger *zap.Logger +} + +// Config holds configuration for the IPFS client +type Config struct { + // ClusterAPIURL is the base URL for IPFS Cluster HTTP API (e.g., "http://localhost:9094") + // If empty, defaults to "http://localhost:9094" + ClusterAPIURL string + + // Timeout is the timeout for client operations + // If zero, defaults to 60 seconds + Timeout time.Duration +} + +// PinStatus represents the status of a pinned CID +type PinStatus struct { + Cid string `json:"cid"` + Name string `json:"name"` + Status string `json:"status"` // "pinned", "pinning", "queued", "unpinned", "error" + ReplicationMin int `json:"replication_min"` + ReplicationMax int `json:"replication_max"` + ReplicationFactor int `json:"replication_factor"` + Peers []string `json:"peers"` + Error string `json:"error,omitempty"` +} + +// AddResponse represents the response from adding content to IPFS +type AddResponse struct { + Name string `json:"name"` + Cid string `json:"cid"` + Size int64 `json:"size"` +} + +// PinResponse represents the response from pinning a CID +type PinResponse struct { + Cid string `json:"cid"` + Name string `json:"name"` +} + +// NewClient creates a new IPFS Cluster client wrapper +func NewClient(cfg Config, logger *zap.Logger) (*Client, error) { + apiURL := cfg.ClusterAPIURL + if apiURL == "" { + apiURL = "http://localhost:9094" + } + + timeout := cfg.Timeout + if timeout == 0 { + timeout = 60 * time.Second + } + + httpClient := &http.Client{ + Timeout: timeout, + } + + return &Client{ + apiURL: apiURL, + httpClient: httpClient, + logger: logger, + }, nil +} + +// Health checks if the IPFS Cluster API is healthy +func (c *Client) Health(ctx context.Context) error { + req, err := http.NewRequestWithContext(ctx, "GET", c.apiURL+"/id", nil) + if err != nil { + return fmt.Errorf("failed to create health check request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("health check request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("health check failed with status: %d", resp.StatusCode) + } + + return nil +} + +// Add adds content to IPFS and returns the CID +func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddResponse, error) { + // Create multipart form request for IPFS Cluster API + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + // Create form file field + part, err := writer.CreateFormFile("file", name) + if err != nil { + return nil, fmt.Errorf("failed to create form file: %w", err) + } + + if _, err := io.Copy(part, reader); err != nil { + return nil, fmt.Errorf("failed to copy data: %w", err) + } + + if err := writer.Close(); err != nil { + return nil, fmt.Errorf("failed to close writer: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", c.apiURL+"/add", &buf) + if err != nil { + return nil, fmt.Errorf("failed to create add request: %w", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("add request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("add failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result AddResponse + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode add response: %w", err) + } + + return &result, nil +} + +// Pin pins a CID with specified replication factor +func (c *Client) Pin(ctx context.Context, cid string, name string, replicationFactor int) (*PinResponse, error) { + reqBody := map[string]interface{}{ + "cid": cid, + "replication_factor_min": replicationFactor, + "replication_factor_max": replicationFactor, + } + if name != "" { + reqBody["name"] = name + } + + jsonBody, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("failed to marshal pin request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", c.apiURL+"/pins/"+cid, bytes.NewReader(jsonBody)) + if err != nil { + return nil, fmt.Errorf("failed to create pin request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("pin request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("pin failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result PinResponse + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("failed to decode pin response: %w", err) + } + + // If IPFS Cluster doesn't return the name in the response, use the one from the request + if result.Name == "" && name != "" { + result.Name = name + } + // Ensure CID is set + if result.Cid == "" { + result.Cid = cid + } + + return &result, nil +} + +// PinStatus retrieves the status of a pinned CID +func (c *Client) PinStatus(ctx context.Context, cid string) (*PinStatus, error) { + req, err := http.NewRequestWithContext(ctx, "GET", c.apiURL+"/pins/"+cid, nil) + if err != nil { + return nil, fmt.Errorf("failed to create pin status request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("pin status request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("pin not found: %s", cid) + } + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("pin status failed with status %d: %s", resp.StatusCode, string(body)) + } + + // IPFS Cluster returns GlobalPinInfo, we need to map it to our PinStatus + var gpi struct { + Cid string `json:"cid"` + Name string `json:"name"` + PeerMap map[string]struct { + Status interface{} `json:"status"` // TrackerStatus can be string or int + Error string `json:"error,omitempty"` + } `json:"peer_map"` + } + if err := json.NewDecoder(resp.Body).Decode(&gpi); err != nil { + return nil, fmt.Errorf("failed to decode pin status response: %w", err) + } + + // Extract status from peer map (use first peer's status, or aggregate) + status := "unknown" + peers := make([]string, 0, len(gpi.PeerMap)) + var errorMsg string + for peerID, pinInfo := range gpi.PeerMap { + peers = append(peers, peerID) + if pinInfo.Status != nil { + // Convert status to string + if s, ok := pinInfo.Status.(string); ok { + if status == "unknown" || s != "" { + status = s + } + } else if status == "unknown" { + // If status is not a string, try to convert it + status = fmt.Sprintf("%v", pinInfo.Status) + } + } + if pinInfo.Error != "" { + errorMsg = pinInfo.Error + } + } + + // Normalize status string (common IPFS Cluster statuses) + if status == "" || status == "unknown" { + status = "pinned" // Default to pinned if we have peers + if len(peers) == 0 { + status = "unknown" + } + } + + result := &PinStatus{ + Cid: gpi.Cid, + Name: gpi.Name, + Status: status, + ReplicationMin: 0, // Not available in GlobalPinInfo + ReplicationMax: 0, // Not available in GlobalPinInfo + ReplicationFactor: len(peers), + Peers: peers, + Error: errorMsg, + } + + // Ensure CID is set + if result.Cid == "" { + result.Cid = cid + } + + return result, nil +} + +// Unpin removes a pin from a CID +func (c *Client) Unpin(ctx context.Context, cid string) error { + req, err := http.NewRequestWithContext(ctx, "DELETE", c.apiURL+"/pins/"+cid, nil) + if err != nil { + return fmt.Errorf("failed to create unpin request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("unpin request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unpin failed with status %d: %s", resp.StatusCode, string(body)) + } + + return nil +} + +// Get retrieves content from IPFS by CID +// Note: This uses the IPFS HTTP API (typically on port 5001), not the Cluster API +func (c *Client) Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) { + if ipfsAPIURL == "" { + ipfsAPIURL = "http://localhost:5001" + } + + url := fmt.Sprintf("%s/api/v0/cat?arg=%s", ipfsAPIURL, cid) + req, err := http.NewRequestWithContext(ctx, "POST", url, nil) + if err != nil { + return nil, fmt.Errorf("failed to create get request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("get request failed: %w", err) + } + + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, fmt.Errorf("get failed with status %d", resp.StatusCode) + } + + return resp.Body, nil +} + +// Close closes the IPFS client connection +func (c *Client) Close(ctx context.Context) error { + // HTTP client doesn't need explicit closing + return nil +} diff --git a/pkg/ipfs/client_test.go b/pkg/ipfs/client_test.go new file mode 100644 index 0000000..344dad1 --- /dev/null +++ b/pkg/ipfs/client_test.go @@ -0,0 +1,483 @@ +package ipfs + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "go.uber.org/zap" +) + +func TestNewClient(t *testing.T) { + logger := zap.NewNop() + + t.Run("default_config", func(t *testing.T) { + cfg := Config{} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + if client.apiURL != "http://localhost:9094" { + t.Errorf("Expected default API URL 'http://localhost:9094', got %s", client.apiURL) + } + + if client.httpClient.Timeout != 60*time.Second { + t.Errorf("Expected default timeout 60s, got %v", client.httpClient.Timeout) + } + }) + + t.Run("custom_config", func(t *testing.T) { + cfg := Config{ + ClusterAPIURL: "http://custom:9094", + Timeout: 30 * time.Second, + } + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + if client.apiURL != "http://custom:9094" { + t.Errorf("Expected API URL 'http://custom:9094', got %s", client.apiURL) + } + + if client.httpClient.Timeout != 30*time.Second { + t.Errorf("Expected timeout 30s, got %v", client.httpClient.Timeout) + } + }) +} + +func TestClient_Add(t *testing.T) { + logger := zap.NewNop() + + t.Run("success", func(t *testing.T) { + expectedCID := "QmTest123" + expectedName := "test.txt" + expectedSize := int64(100) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/add" { + t.Errorf("Expected path '/add', got %s", r.URL.Path) + } + if r.Method != "POST" { + t.Errorf("Expected method POST, got %s", r.Method) + } + + // Verify multipart form + if err := r.ParseMultipartForm(32 << 20); err != nil { + t.Errorf("Failed to parse multipart form: %v", err) + return + } + + file, header, err := r.FormFile("file") + if err != nil { + t.Errorf("Failed to get file: %v", err) + return + } + defer file.Close() + + if header.Filename != expectedName { + t.Errorf("Expected filename %s, got %s", expectedName, header.Filename) + } + + // Read file content + _, _ = io.ReadAll(file) + + response := AddResponse{ + Cid: expectedCID, + Name: expectedName, + Size: expectedSize, + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + reader := strings.NewReader("test content") + resp, err := client.Add(context.Background(), reader, expectedName) + if err != nil { + t.Fatalf("Failed to add content: %v", err) + } + + if resp.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid) + } + if resp.Name != expectedName { + t.Errorf("Expected name %s, got %s", expectedName, resp.Name) + } + if resp.Size != expectedSize { + t.Errorf("Expected size %d, got %d", expectedSize, resp.Size) + } + }) + + t.Run("server_error", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("internal error")) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + reader := strings.NewReader("test") + _, err = client.Add(context.Background(), reader, "test.txt") + if err == nil { + t.Error("Expected error for server error") + } + }) +} + +func TestClient_Pin(t *testing.T) { + logger := zap.NewNop() + + t.Run("success", func(t *testing.T) { + expectedCID := "QmPin123" + expectedName := "pinned-file" + expectedReplicationFactor := 3 + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/pins/") { + t.Errorf("Expected path '/pins/', got %s", r.URL.Path) + } + if r.Method != "POST" { + t.Errorf("Expected method POST, got %s", r.Method) + } + + var reqBody map[string]interface{} + if err := json.NewDecoder(r.Body).Decode(&reqBody); err != nil { + t.Errorf("Failed to decode request: %v", err) + return + } + + if reqBody["cid"] != expectedCID { + t.Errorf("Expected CID %s, got %v", expectedCID, reqBody["cid"]) + } + + response := PinResponse{ + Cid: expectedCID, + Name: expectedName, + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + resp, err := client.Pin(context.Background(), expectedCID, expectedName, expectedReplicationFactor) + if err != nil { + t.Fatalf("Failed to pin: %v", err) + } + + if resp.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid) + } + if resp.Name != expectedName { + t.Errorf("Expected name %s, got %s", expectedName, resp.Name) + } + }) + + t.Run("accepted_status", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + response := PinResponse{Cid: "QmTest", Name: "test"} + json.NewEncoder(w).Encode(response) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + _, err = client.Pin(context.Background(), "QmTest", "test", 3) + if err != nil { + t.Errorf("Expected success for Accepted status, got error: %v", err) + } + }) +} + +func TestClient_PinStatus(t *testing.T) { + logger := zap.NewNop() + + t.Run("success", func(t *testing.T) { + expectedCID := "QmStatus123" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/pins/") { + t.Errorf("Expected path '/pins/', got %s", r.URL.Path) + } + if r.Method != "GET" { + t.Errorf("Expected method GET, got %s", r.Method) + } + + response := PinStatus{ + Cid: expectedCID, + Name: "test-file", + Status: "pinned", + ReplicationMin: 3, + ReplicationMax: 3, + ReplicationFactor: 3, + Peers: []string{"peer1", "peer2", "peer3"}, + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + status, err := client.PinStatus(context.Background(), expectedCID) + if err != nil { + t.Fatalf("Failed to get pin status: %v", err) + } + + if status.Cid != expectedCID { + t.Errorf("Expected CID %s, got %s", expectedCID, status.Cid) + } + if status.Status != "pinned" { + t.Errorf("Expected status 'pinned', got %s", status.Status) + } + if len(status.Peers) != 3 { + t.Errorf("Expected 3 peers, got %d", len(status.Peers)) + } + }) + + t.Run("not_found", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + _, err = client.PinStatus(context.Background(), "QmNotFound") + if err == nil { + t.Error("Expected error for not found") + } + }) +} + +func TestClient_Unpin(t *testing.T) { + logger := zap.NewNop() + + t.Run("success", func(t *testing.T) { + expectedCID := "QmUnpin123" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/pins/") { + t.Errorf("Expected path '/pins/', got %s", r.URL.Path) + } + if r.Method != "DELETE" { + t.Errorf("Expected method DELETE, got %s", r.Method) + } + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + err = client.Unpin(context.Background(), expectedCID) + if err != nil { + t.Fatalf("Failed to unpin: %v", err) + } + }) + + t.Run("accepted_status", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + err = client.Unpin(context.Background(), "QmTest") + if err != nil { + t.Errorf("Expected success for Accepted status, got error: %v", err) + } + }) +} + +func TestClient_Get(t *testing.T) { + logger := zap.NewNop() + + t.Run("success", func(t *testing.T) { + expectedCID := "QmGet123" + expectedContent := "test content from IPFS" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.Contains(r.URL.Path, "/api/v0/cat") { + t.Errorf("Expected path containing '/api/v0/cat', got %s", r.URL.Path) + } + if r.Method != "POST" { + t.Errorf("Expected method POST, got %s", r.Method) + } + + // Verify CID parameter + if !strings.Contains(r.URL.RawQuery, expectedCID) { + t.Errorf("Expected CID %s in query, got %s", expectedCID, r.URL.RawQuery) + } + + w.Write([]byte(expectedContent)) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: "http://localhost:9094"} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + reader, err := client.Get(context.Background(), expectedCID, server.URL) + if err != nil { + t.Fatalf("Failed to get content: %v", err) + } + defer reader.Close() + + data, err := io.ReadAll(reader) + if err != nil { + t.Fatalf("Failed to read content: %v", err) + } + + if string(data) != expectedContent { + t.Errorf("Expected content %s, got %s", expectedContent, string(data)) + } + }) + + t.Run("not_found", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: "http://localhost:9094"} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + _, err = client.Get(context.Background(), "QmNotFound", server.URL) + if err == nil { + t.Error("Expected error for not found") + } + }) + + t.Run("default_ipfs_api_url", func(t *testing.T) { + expectedCID := "QmDefault" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("content")) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: "http://localhost:9094"} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + // Test with empty IPFS API URL (should use default) + // Note: This will fail because we're using a test server, but it tests the logic + _, err = client.Get(context.Background(), expectedCID, "") + // We expect an error here because default localhost:5001 won't exist + if err == nil { + t.Error("Expected error when using default localhost:5001") + } + }) +} + +func TestClient_Health(t *testing.T) { + logger := zap.NewNop() + + t.Run("success", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/id" { + t.Errorf("Expected path '/id', got %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"id": "test"}`)) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + err = client.Health(context.Background()) + if err != nil { + t.Fatalf("Failed health check: %v", err) + } + }) + + t.Run("unhealthy", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer server.Close() + + cfg := Config{ClusterAPIURL: server.URL} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + err = client.Health(context.Background()) + if err == nil { + t.Error("Expected error for unhealthy status") + } + }) +} + +func TestClient_Close(t *testing.T) { + logger := zap.NewNop() + + cfg := Config{ClusterAPIURL: "http://localhost:9094"} + client, err := NewClient(cfg, logger) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + // Close should not error + err = client.Close(context.Background()) + if err != nil { + t.Errorf("Close should not error, got: %v", err) + } +} From d6009bb33f405cc4712fa08257822b7e38c41837 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Wed, 5 Nov 2025 09:01:55 +0200 Subject: [PATCH 03/57] feat: enhance IPFS and Cluster integration in setup 08:16:27 - Added automatic setup for IPFS and IPFS Cluster during the network setup process. - Implemented initialization of IPFS repositories and Cluster configurations for each node. - Enhanced Makefile to support starting IPFS and Cluster daemons with improved logging. - Introduced a new documentation guide for IPFS Cluster setup, detailing configuration and verification steps. - Updated changelog to reflect the new features and improvements. --- .githooks/pre-commit | 9 + CHANGELOG.md | 56 ++-- Makefile | 241 ++++++++++++++---- docs/ipfs-cluster-setup.md | 171 +++++++++++++ pkg/cli/setup.go | 436 +++++++++++++++++++++++++++++++- pkg/gateway/gateway.go | 19 ++ pkg/gateway/storage_handlers.go | 7 +- pkg/ipfs/client.go | 65 +++-- scripts/update_changelog.sh | 9 + 9 files changed, 925 insertions(+), 88 deletions(-) create mode 100644 docs/ipfs-cluster-setup.md diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 74f323d..d9e2bad 100644 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -30,6 +30,15 @@ if [ -z "$OTHER_FILES" ]; then exit 0 fi +# Check for skip flag +# To skip changelog generation, set SKIP_CHANGELOG=1 before committing: +# SKIP_CHANGELOG=1 git commit -m "your message" +# SKIP_CHANGELOG=1 git commit +if [ "$SKIP_CHANGELOG" = "1" ] || [ "$SKIP_CHANGELOG" = "true" ]; then + echo -e "${YELLOW}Skipping changelog update (SKIP_CHANGELOG is set)${NOCOLOR}" + exit 0 +fi + # Update changelog before commit if [ -f "$CHANGELOG_SCRIPT" ]; then echo -e "\n${CYAN}Updating changelog...${NOCOLOR}" diff --git a/CHANGELOG.md b/CHANGELOG.md index 20b5347..8502398 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,14 +13,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed + ## [0.56.0] - 2025-11-05 ### Added + - Added IPFS storage endpoints to the Gateway for content upload, pinning, status, retrieval, and unpinning. - Introduced `StorageClient` interface and implementation in the Go client library for interacting with the new IPFS storage endpoints. - Added support for automatically starting IPFS daemon, IPFS Cluster daemon, and Olric cache server in the `dev` environment setup. ### Changed + - Updated Gateway configuration to include settings for IPFS Cluster API URL, IPFS API URL, timeout, and replication factor. - Refactored Olric configuration generation to use a simpler, local-environment focused setup. - Improved IPFS content retrieval (`Get`) to fall back to the IPFS Gateway (port 8080) if the IPFS API (port 5001) returns a 404. @@ -30,34 +33,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed -\n -## [0.55.0] - 2025-11-05 -### Added -- Added IPFS storage endpoints to the Gateway for content upload, pinning, status, retrieval, and unpinning. -- Introduced `StorageClient` interface and implementation in the Go client library for interacting with the new IPFS storage endpoints. -- Added support for automatically starting IPFS daemon, IPFS Cluster daemon, and Olric cache server in the `dev` environment setup. - -### Changed -- Updated Gateway configuration to include settings for IPFS Cluster API URL, IPFS API URL, timeout, and replication factor. -- Refactored Olric configuration generation to use a simpler, local-environment focused setup. -- Improved `dev` environment logging to include logs from IPFS and Olric services when running. - -### Deprecated - -### Removed - -### Fixed -\n ## [0.54.0] - 2025-11-03 ### Added + - Integrated Olric distributed cache for high-speed key-value storage and caching. - Added new HTTP Gateway endpoints for cache operations (GET, PUT, DELETE, SCAN) via `/v1/cache/`. - Added `olric_servers` and `olric_timeout` configuration options to the Gateway. - Updated the automated installation script (`install-debros-network.sh`) to include Olric installation, configuration, and firewall rules (ports 3320, 3322). ### Changed + - Refactored README for better clarity and organization, focusing on quick start and core features. ### Deprecated @@ -65,12 +52,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.18] - 2025-11-03 ### Added + \n + ### Changed + - Increased the connection timeout during peer discovery from 15 seconds to 20 seconds to improve connection reliability. - Removed unnecessary debug logging related to filtering out ephemeral port addresses during peer exchange. @@ -79,13 +71,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.17] - 2025-11-03 ### Added + - Added a new Git `pre-commit` hook to automatically update the changelog and version before committing, ensuring version consistency. ### Changed + - Refactored the `update_changelog.sh` script to support different execution contexts (pre-commit vs. pre-push), allowing it to analyze only staged changes during commit. - The Git `pre-push` hook was simplified by removing the changelog update logic, which is now handled by the `pre-commit` hook. @@ -94,12 +90,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.16] - 2025-11-03 ### Added + \n + ### Changed + - Improved the changelog generation script to prevent infinite loops when the only unpushed commit is a previous changelog update. ### Deprecated @@ -107,12 +108,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.15] - 2025-11-03 ### Added + \n + ### Changed + - Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation. - Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security. - Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility. @@ -124,12 +130,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.15] - 2025-11-03 ### Added + \n + ### Changed + - Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation. - Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security. - Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility. @@ -141,14 +152,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.14] - 2025-11-03 ### Added + - Added a new `install-hooks` target to the Makefile to easily set up git hooks. - Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`. ### Changed + - Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog. - Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration. - Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments. @@ -160,14 +175,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.14] - 2025-11-03 ### Added + - Added a new `install-hooks` target to the Makefile to easily set up git hooks. - Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`. ### Changed + - Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog. - Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration. - Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments. @@ -177,6 +196,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n ## [0.53.8] - 2025-10-31 diff --git a/Makefile b/Makefile index bc9bbb2..712948d 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ test-e2e: # Network - Distributed P2P Database System # Makefile for development and build tasks -.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks +.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill VERSION := 0.56.0 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) @@ -109,6 +109,102 @@ dev: build echo " ⚠️ systemctl not found - skipping Anon"; \ fi; \ fi + @echo "Initializing IPFS and Cluster for all nodes..." + @if command -v ipfs >/dev/null 2>&1 && command -v ipfs-cluster-service >/dev/null 2>&1; then \ + CLUSTER_SECRET=$$HOME/.debros/cluster-secret; \ + if [ ! -f $$CLUSTER_SECRET ]; then \ + echo " Generating shared cluster secret..."; \ + ipfs-cluster-service --version >/dev/null 2>&1 && openssl rand -hex 32 > $$CLUSTER_SECRET || echo "0000000000000000000000000000000000000000000000000000000000000000" > $$CLUSTER_SECRET; \ + fi; \ + SECRET=$$(cat $$CLUSTER_SECRET); \ + echo " Setting up bootstrap node (IPFS: 5001, Cluster: 9094)..."; \ + if [ ! -d $$HOME/.debros/bootstrap/ipfs/repo ]; then \ + echo " Initializing IPFS..."; \ + mkdir -p $$HOME/.debros/bootstrap/ipfs; \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5001"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8080"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4001","/ip6/::/tcp/4001"]' 2>&1 | grep -v "generating" || true; \ + fi; \ + echo " Initializing IPFS Cluster..."; \ + mkdir -p $$HOME/.debros/bootstrap/ipfs-cluster; \ + env IPFS_CLUSTER_PATH=$$HOME/.debros/bootstrap/ipfs-cluster ipfs-cluster-service init --force >/dev/null 2>&1 || true; \ + jq '.cluster.peername = "bootstrap" | .cluster.secret = "'$$SECRET'" | .cluster.listen_multiaddress = ["/ip4/0.0.0.0/tcp/9096"] | .consensus.crdt.cluster_name = "debros-cluster" | .consensus.crdt.trusted_peers = ["*"] | .api.restapi.http_listen_multiaddress = "/ip4/0.0.0.0/tcp/9094" | .api.ipfsproxy.listen_multiaddress = "/ip4/127.0.0.1/tcp/9095" | .api.pinsvcapi.http_listen_multiaddress = "/ip4/127.0.0.1/tcp/9097" | .ipfs_connector.ipfshttp.node_multiaddress = "/ip4/127.0.0.1/tcp/5001"' $$HOME/.debros/bootstrap/ipfs-cluster/service.json > $$HOME/.debros/bootstrap/ipfs-cluster/service.json.tmp && mv $$HOME/.debros/bootstrap/ipfs-cluster/service.json.tmp $$HOME/.debros/bootstrap/ipfs-cluster/service.json; \ + echo " Setting up node2 (IPFS: 5002, Cluster: 9104)..."; \ + if [ ! -d $$HOME/.debros/node2/ipfs/repo ]; then \ + echo " Initializing IPFS..."; \ + mkdir -p $$HOME/.debros/node2/ipfs; \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5002"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8081"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4002","/ip6/::/tcp/4002"]' 2>&1 | grep -v "generating" || true; \ + fi; \ + echo " Initializing IPFS Cluster..."; \ + mkdir -p $$HOME/.debros/node2/ipfs-cluster; \ + env IPFS_CLUSTER_PATH=$$HOME/.debros/node2/ipfs-cluster ipfs-cluster-service init --force >/dev/null 2>&1 || true; \ + jq '.cluster.peername = "node2" | .cluster.secret = "'$$SECRET'" | .cluster.listen_multiaddress = ["/ip4/0.0.0.0/tcp/9106"] | .consensus.crdt.cluster_name = "debros-cluster" | .consensus.crdt.trusted_peers = ["*"] | .api.restapi.http_listen_multiaddress = "/ip4/0.0.0.0/tcp/9104" | .api.ipfsproxy.listen_multiaddress = "/ip4/127.0.0.1/tcp/9105" | .api.pinsvcapi.http_listen_multiaddress = "/ip4/127.0.0.1/tcp/9107" | .ipfs_connector.ipfshttp.node_multiaddress = "/ip4/127.0.0.1/tcp/5002"' $$HOME/.debros/node2/ipfs-cluster/service.json > $$HOME/.debros/node2/ipfs-cluster/service.json.tmp && mv $$HOME/.debros/node2/ipfs-cluster/service.json.tmp $$HOME/.debros/node2/ipfs-cluster/service.json; \ + echo " Setting up node3 (IPFS: 5003, Cluster: 9114)..."; \ + if [ ! -d $$HOME/.debros/node3/ipfs/repo ]; then \ + echo " Initializing IPFS..."; \ + mkdir -p $$HOME/.debros/node3/ipfs; \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5003"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8082"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4003","/ip6/::/tcp/4003"]' 2>&1 | grep -v "generating" || true; \ + fi; \ + echo " Initializing IPFS Cluster..."; \ + mkdir -p $$HOME/.debros/node3/ipfs-cluster; \ + env IPFS_CLUSTER_PATH=$$HOME/.debros/node3/ipfs-cluster ipfs-cluster-service init --force >/dev/null 2>&1 || true; \ + jq '.cluster.peername = "node3" | .cluster.secret = "'$$SECRET'" | .cluster.listen_multiaddress = ["/ip4/0.0.0.0/tcp/9116"] | .consensus.crdt.cluster_name = "debros-cluster" | .consensus.crdt.trusted_peers = ["*"] | .api.restapi.http_listen_multiaddress = "/ip4/0.0.0.0/tcp/9114" | .api.ipfsproxy.listen_multiaddress = "/ip4/127.0.0.1/tcp/9115" | .api.pinsvcapi.http_listen_multiaddress = "/ip4/127.0.0.1/tcp/9117" | .ipfs_connector.ipfshttp.node_multiaddress = "/ip4/127.0.0.1/tcp/5003"' $$HOME/.debros/node3/ipfs-cluster/service.json > $$HOME/.debros/node3/ipfs-cluster/service.json.tmp && mv $$HOME/.debros/node3/ipfs-cluster/service.json.tmp $$HOME/.debros/node3/ipfs-cluster/service.json; \ + echo "Starting IPFS daemons..."; \ + if [ ! -f .dev/pids/ipfs-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-bootstrap.pid) 2>/dev/null; then \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-bootstrap.pid; \ + echo " Bootstrap IPFS started (PID: $$(cat .dev/pids/ipfs-bootstrap.pid), API: 5001)"; \ + sleep 3; \ + else \ + echo " ✓ Bootstrap IPFS already running"; \ + fi; \ + if [ ! -f .dev/pids/ipfs-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-node2.pid) 2>/dev/null; then \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-node2.pid; \ + echo " Node2 IPFS started (PID: $$(cat .dev/pids/ipfs-node2.pid), API: 5002)"; \ + sleep 3; \ + else \ + echo " ✓ Node2 IPFS already running"; \ + fi; \ + if [ ! -f .dev/pids/ipfs-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-node3.pid) 2>/dev/null; then \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-node3.pid; \ + echo " Node3 IPFS started (PID: $$(cat .dev/pids/ipfs-node3.pid), API: 5003)"; \ + sleep 3; \ + else \ + echo " ✓ Node3 IPFS already running"; \ + fi; \ + \ + echo "Starting IPFS Cluster peers..."; \ + if [ ! -f .dev/pids/ipfs-cluster-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null; then \ + env IPFS_CLUSTER_PATH=$$HOME/.debros/bootstrap/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-bootstrap.pid; \ + echo " Bootstrap Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-bootstrap.pid), API: 9094)"; \ + sleep 3; \ + else \ + echo " ✓ Bootstrap Cluster already running"; \ + fi; \ + if [ ! -f .dev/pids/ipfs-cluster-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null; then \ + env IPFS_CLUSTER_PATH=$$HOME/.debros/node2/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node2.pid; \ + echo " Node2 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node2.pid), API: 9104)"; \ + sleep 3; \ + else \ + echo " ✓ Node2 Cluster already running"; \ + fi; \ + if [ ! -f .dev/pids/ipfs-cluster-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null; then \ + env IPFS_CLUSTER_PATH=$$HOME/.debros/node3/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node3.pid; \ + echo " Node3 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node3.pid), API: 9114)"; \ + sleep 3; \ + else \ + echo " ✓ Node3 Cluster already running"; \ + fi; \ + else \ + echo " ⚠️ ipfs or ipfs-cluster-service not found - skipping IPFS setup"; \ + echo " Install with: https://docs.ipfs.tech/install/ and https://ipfscluster.io/documentation/guides/install/"; \ + fi @sleep 2 @echo "Starting bootstrap node..." @nohup ./bin/node --config bootstrap.yaml > $$HOME/.debros/logs/bootstrap.log 2>&1 & echo $$! > .dev/pids/bootstrap.pid @@ -119,40 +215,6 @@ dev: build @echo "Starting node3..." @nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid @sleep 1 - @echo "Starting IPFS daemon..." - @if command -v ipfs >/dev/null 2>&1; then \ - if [ ! -d $$HOME/.debros/ipfs ]; then \ - echo " Initializing IPFS repository..."; \ - IPFS_PATH=$$HOME/.debros/ipfs ipfs init 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - fi; \ - if ! pgrep -f "ipfs daemon" >/dev/null 2>&1; then \ - IPFS_PATH=$$HOME/.debros/ipfs nohup ipfs daemon > $$HOME/.debros/logs/ipfs.log 2>&1 & echo $$! > .dev/pids/ipfs.pid; \ - echo " IPFS daemon started (PID: $$(cat .dev/pids/ipfs.pid))"; \ - sleep 5; \ - else \ - echo " ✓ IPFS daemon already running"; \ - fi; \ - else \ - echo " ⚠️ ipfs command not found - skipping IPFS (storage endpoints will be disabled)"; \ - echo " Install with: https://docs.ipfs.tech/install/"; \ - fi - @echo "Starting IPFS Cluster daemon..." - @if command -v ipfs-cluster-service >/dev/null 2>&1; then \ - if [ ! -d $$HOME/.debros/ipfs-cluster ]; then \ - echo " Initializing IPFS Cluster..."; \ - CLUSTER_PATH=$$HOME/.debros/ipfs-cluster ipfs-cluster-service init --force 2>&1 | grep -v "peer identity" || true; \ - fi; \ - if ! pgrep -f "ipfs-cluster-service" >/dev/null 2>&1; then \ - CLUSTER_PATH=$$HOME/.debros/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster.pid; \ - echo " IPFS Cluster daemon started (PID: $$(cat .dev/pids/ipfs-cluster.pid))"; \ - sleep 5; \ - else \ - echo " ✓ IPFS Cluster daemon already running"; \ - fi; \ - else \ - echo " ⚠️ ipfs-cluster-service command not found - skipping IPFS Cluster (storage endpoints will be disabled)"; \ - echo " Install with: https://ipfscluster.io/documentation/guides/install/"; \ - fi @echo "Starting Olric cache server..." @if command -v olric-server >/dev/null 2>&1; then \ if [ ! -f $$HOME/.debros/olric-config.yaml ]; then \ @@ -182,11 +244,23 @@ dev: build @if [ -f .dev/pids/anon.pid ]; then \ echo " Anon: PID=$$(cat .dev/pids/anon.pid) (SOCKS: 9050)"; \ fi - @if [ -f .dev/pids/ipfs.pid ]; then \ - echo " IPFS: PID=$$(cat .dev/pids/ipfs.pid) (API: 5001)"; \ + @if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \ + echo " Bootstrap IPFS: PID=$$(cat .dev/pids/ipfs-bootstrap.pid) (API: 5001)"; \ fi - @if [ -f .dev/pids/ipfs-cluster.pid ]; then \ - echo " IPFS Cluster: PID=$$(cat .dev/pids/ipfs-cluster.pid) (API: 9094)"; \ + @if [ -f .dev/pids/ipfs-node2.pid ]; then \ + echo " Node2 IPFS: PID=$$(cat .dev/pids/ipfs-node2.pid) (API: 5002)"; \ + fi + @if [ -f .dev/pids/ipfs-node3.pid ]; then \ + echo " Node3 IPFS: PID=$$(cat .dev/pids/ipfs-node3.pid) (API: 5003)"; \ + fi + @if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \ + echo " Bootstrap Cluster: PID=$$(cat .dev/pids/ipfs-cluster-bootstrap.pid) (API: 9094)"; \ + fi + @if [ -f .dev/pids/ipfs-cluster-node2.pid ]; then \ + echo " Node2 Cluster: PID=$$(cat .dev/pids/ipfs-cluster-node2.pid) (API: 9104)"; \ + fi + @if [ -f .dev/pids/ipfs-cluster-node3.pid ]; then \ + echo " Node3 Cluster: PID=$$(cat .dev/pids/ipfs-cluster-node3.pid) (API: 9114)"; \ fi @if [ -f .dev/pids/olric.pid ]; then \ echo " Olric: PID=$$(cat .dev/pids/olric.pid) (API: 3320)"; \ @@ -198,9 +272,13 @@ dev: build @echo "" @echo "Ports:" @echo " Anon SOCKS: 9050 (proxy endpoint: POST /v1/proxy/anon)" - @if [ -f .dev/pids/ipfs.pid ]; then \ - echo " IPFS API: 5001 (content retrieval)"; \ - echo " IPFS Cluster: 9094 (pin management)"; \ + @if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \ + echo " Bootstrap IPFS API: 5001"; \ + echo " Node2 IPFS API: 5002"; \ + echo " Node3 IPFS API: 5003"; \ + echo " Bootstrap Cluster: 9094 (pin management)"; \ + echo " Node2 Cluster: 9104 (pin management)"; \ + echo " Node3 Cluster: 9114 (pin management)"; \ fi @if [ -f .dev/pids/olric.pid ]; then \ echo " Olric: 3320 (cache API)"; \ @@ -217,15 +295,85 @@ dev: build if [ -f .dev/pids/anon.pid ]; then \ LOGS="$$LOGS $$HOME/.debros/logs/anon.log"; \ fi; \ - if [ -f .dev/pids/ipfs.pid ]; then \ - LOGS="$$LOGS $$HOME/.debros/logs/ipfs.log"; \ + if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \ + LOGS="$$LOGS $$HOME/.debros/logs/ipfs-bootstrap.log $$HOME/.debros/logs/ipfs-node2.log $$HOME/.debros/logs/ipfs-node3.log"; \ fi; \ - if [ -f .dev/pids/ipfs-cluster.pid ]; then \ - LOGS="$$LOGS $$HOME/.debros/logs/ipfs-cluster.log"; \ + if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \ + LOGS="$$LOGS $$HOME/.debros/logs/ipfs-cluster-bootstrap.log $$HOME/.debros/logs/ipfs-cluster-node2.log $$HOME/.debros/logs/ipfs-cluster-node3.log"; \ + fi; \ + if [ -f .dev/pids/olric.pid ]; then \ + LOGS="$$LOGS $$HOME/.debros/logs/olric.log"; \ fi; \ trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \ tail -f $$LOGS +# Kill all processes +kill: + @echo "🛑 Stopping all DeBros network services..." + @echo "" + @echo "Stopping DeBros nodes and gateway..." + @if [ -f .dev/pids/gateway.pid ]; then \ + kill -TERM $$(cat .dev/pids/gateway.pid) 2>/dev/null && echo " ✓ Gateway stopped" || echo " ✗ Gateway not running"; \ + rm -f .dev/pids/gateway.pid; \ + fi + @if [ -f .dev/pids/bootstrap.pid ]; then \ + kill -TERM $$(cat .dev/pids/bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap node stopped" || echo " ✗ Bootstrap not running"; \ + rm -f .dev/pids/bootstrap.pid; \ + fi + @if [ -f .dev/pids/node2.pid ]; then \ + kill -TERM $$(cat .dev/pids/node2.pid) 2>/dev/null && echo " ✓ Node2 stopped" || echo " ✗ Node2 not running"; \ + rm -f .dev/pids/node2.pid; \ + fi + @if [ -f .dev/pids/node3.pid ]; then \ + kill -TERM $$(cat .dev/pids/node3.pid) 2>/dev/null && echo " ✓ Node3 stopped" || echo " ✗ Node3 not running"; \ + rm -f .dev/pids/node3.pid; \ + fi + @echo "" + @echo "Stopping IPFS Cluster peers..." + @if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \ + kill -TERM $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap Cluster stopped" || echo " ✗ Bootstrap Cluster not running"; \ + rm -f .dev/pids/ipfs-cluster-bootstrap.pid; \ + fi + @if [ -f .dev/pids/ipfs-cluster-node2.pid ]; then \ + kill -TERM $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null && echo " ✓ Node2 Cluster stopped" || echo " ✗ Node2 Cluster not running"; \ + rm -f .dev/pids/ipfs-cluster-node2.pid; \ + fi + @if [ -f .dev/pids/ipfs-cluster-node3.pid ]; then \ + kill -TERM $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null && echo " ✓ Node3 Cluster stopped" || echo " ✗ Node3 Cluster not running"; \ + rm -f .dev/pids/ipfs-cluster-node3.pid; \ + fi + @echo "" + @echo "Stopping IPFS daemons..." + @if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \ + kill -TERM $$(cat .dev/pids/ipfs-bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap IPFS stopped" || echo " ✗ Bootstrap IPFS not running"; \ + rm -f .dev/pids/ipfs-bootstrap.pid; \ + fi + @if [ -f .dev/pids/ipfs-node2.pid ]; then \ + kill -TERM $$(cat .dev/pids/ipfs-node2.pid) 2>/dev/null && echo " ✓ Node2 IPFS stopped" || echo " ✗ Node2 IPFS not running"; \ + rm -f .dev/pids/ipfs-node2.pid; \ + fi + @if [ -f .dev/pids/ipfs-node3.pid ]; then \ + kill -TERM $$(cat .dev/pids/ipfs-node3.pid) 2>/dev/null && echo " ✓ Node3 IPFS stopped" || echo " ✗ Node3 IPFS not running"; \ + rm -f .dev/pids/ipfs-node3.pid; \ + fi + @echo "" + @echo "Stopping Olric cache..." + @if [ -f .dev/pids/olric.pid ]; then \ + kill -TERM $$(cat .dev/pids/olric.pid) 2>/dev/null && echo " ✓ Olric stopped" || echo " ✗ Olric not running"; \ + rm -f .dev/pids/olric.pid; \ + fi + @echo "" + @echo "Stopping Anon proxy..." + @if [ -f .dev/pids/anyone.pid ]; then \ + kill -TERM $$(cat .dev/pids/anyone.pid) 2>/dev/null && echo " ✓ Anon proxy stopped" || echo " ✗ Anon proxy not running"; \ + rm -f .dev/pids/anyone.pid; \ + fi + @echo "" + @echo "Cleaning up any remaining processes on ports..." + @lsof -ti:7001,7002,7003,5001,5002,5003,6001,4001,4002,4003,9050,3320,3322,9094,9095,9096,9097,9104,9105,9106,9107,9114,9115,9116,9117,8080,8081,8082 2>/dev/null | xargs kill -9 2>/dev/null && echo " ✓ Cleaned up remaining port bindings" || echo " ✓ No lingering processes found" + @echo "" + @echo "✅ All services stopped!" + # Help help: @echo "Available targets:" @@ -277,6 +425,7 @@ help: @echo " vet - Vet code" @echo " lint - Lint code (fmt + vet)" @echo " clear-ports - Clear common dev ports" + @echo " kill - Stop all running services (nodes, IPFS, cluster, gateway, olric)" @echo " dev-setup - Setup development environment" @echo " dev-cluster - Show cluster startup commands" @echo " dev - Full development workflow" diff --git a/docs/ipfs-cluster-setup.md b/docs/ipfs-cluster-setup.md new file mode 100644 index 0000000..fa70343 --- /dev/null +++ b/docs/ipfs-cluster-setup.md @@ -0,0 +1,171 @@ +# IPFS Cluster Setup Guide + +This guide explains how IPFS Cluster is configured to run on every DeBros Network node. + +## Overview + +Each DeBros Network node runs its own IPFS Cluster peer, enabling distributed pinning and replication across the network. The cluster uses CRDT consensus for automatic peer discovery. + +## Architecture + +- **IPFS (Kubo)**: Runs on each node, handles content storage and retrieval +- **IPFS Cluster**: Runs on each node, manages pinning and replication +- **Cluster Consensus**: Uses CRDT (instead of Raft) for simpler multi-node setup + +## Automatic Setup + +When you run `network-cli setup`, the following happens automatically: + +1. IPFS (Kubo) and IPFS Cluster are installed +2. IPFS repository is initialized for each node +3. IPFS Cluster service.json config is generated +4. Systemd services are created and started: + - `debros-ipfs` - IPFS daemon + - `debros-ipfs-cluster` - IPFS Cluster service + - `debros-node` - DeBros Network node (depends on cluster) + - `debros-gateway` - HTTP Gateway (depends on node) + +## Configuration + +### Node Configs + +Each node config (`~/.debros/bootstrap.yaml`, `~/.debros/node.yaml`, etc.) includes: + +```yaml +database: + ipfs: + cluster_api_url: "http://localhost:9094" # Local cluster API + api_url: "http://localhost:5001" # Local IPFS API + replication_factor: 3 # Desired replication +``` + +### Cluster Service Config + +Cluster service configs are stored at: + +- Bootstrap: `~/.debros/bootstrap/ipfs-cluster/service.json` +- Nodes: `~/.debros/node/ipfs-cluster/service.json` + +Key settings: + +- **Consensus**: CRDT (automatic peer discovery) +- **API Listen**: `0.0.0.0:9094` (REST API) +- **Cluster Listen**: `0.0.0.0:9096` (peer-to-peer) +- **Secret**: Shared cluster secret stored at `~/.debros/cluster-secret` + +## Verification + +### Check Cluster Peers + +From any node, verify all cluster peers are connected: + +```bash +sudo -u debros ipfs-cluster-ctl --host http://localhost:9094 peers ls +``` + +You should see all cluster peers listed (bootstrap, node1, node2, etc.). + +### Check IPFS Daemon + +Verify IPFS is running: + +```bash +sudo -u debros ipfs daemon --repo-dir=~/.debros/bootstrap/ipfs/repo +# Or for regular nodes: +sudo -u debros ipfs daemon --repo-dir=~/.debros/node/ipfs/repo +``` + +### Check Service Status + +```bash +network-cli service status all +``` + +Should show: + +- `debros-ipfs` - running +- `debros-ipfs-cluster` - running +- `debros-node` - running +- `debros-gateway` - running + +## Troubleshooting + +### Cluster Peers Not Connecting + +If peers aren't discovering each other: + +1. **Check firewall**: Ensure ports 9096 (cluster swarm) and 9094 (cluster API) are open +2. **Verify secret**: All nodes must use the same cluster secret from `~/.debros/cluster-secret` +3. **Check logs**: `journalctl -u debros-ipfs-cluster -f` + +### Not Enough Peers Error + +If you see "not enough peers to allocate CID" errors: + +- The cluster needs at least `replication_factor` peers running +- Check that all nodes have `debros-ipfs-cluster` service running +- Verify with `ipfs-cluster-ctl peers ls` + +### IPFS Not Starting + +If IPFS daemon fails to start: + +1. Check IPFS repo exists: `ls -la ~/.debros/bootstrap/ipfs/repo/` +2. Check permissions: `chown -R debros:debros ~/.debros/bootstrap/ipfs/` +3. Check logs: `journalctl -u debros-ipfs -f` + +## Manual Setup (If Needed) + +If automatic setup didn't work, you can manually initialize: + +### 1. Initialize IPFS + +```bash +sudo -u debros ipfs init --profile=server --repo-dir=~/.debros/bootstrap/ipfs/repo +sudo -u debros ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5001"]' --repo-dir=~/.debros/bootstrap/ipfs/repo +``` + +### 2. Initialize Cluster + +```bash +# Generate or get cluster secret +CLUSTER_SECRET=$(cat ~/.debros/cluster-secret) + +# Initialize cluster (will create service.json) +sudo -u debros ipfs-cluster-service init --consensus crdt +``` + +### 3. Start Services + +```bash +systemctl start debros-ipfs +systemctl start debros-ipfs-cluster +systemctl start debros-node +systemctl start debros-gateway +``` + +## Ports + +- **4001**: IPFS swarm (LibP2P) +- **5001**: IPFS HTTP API +- **8080**: IPFS Gateway (optional) +- **9094**: IPFS Cluster REST API +- **9096**: IPFS Cluster swarm (LibP2P) + +## Replication Factor + +The default replication factor is 3, meaning content is pinned to 3 cluster peers. This requires at least 3 nodes running cluster peers. + +To change replication factor, edit node configs: + +```yaml +database: + ipfs: + replication_factor: 1 # For single-node development +``` + +## Security Notes + +- Cluster secret is stored at `~/.debros/cluster-secret` (mode 0600) +- Cluster API (port 9094) should be firewalled in production +- IPFS API (port 5001) should only be accessible locally diff --git a/pkg/cli/setup.go b/pkg/cli/setup.go index c681554..f9e8634 100644 --- a/pkg/cli/setup.go +++ b/pkg/cli/setup.go @@ -2,6 +2,9 @@ package cli import ( "bufio" + "crypto/rand" + "encoding/hex" + "encoding/json" "fmt" "net" "os" @@ -63,11 +66,12 @@ func HandleSetupCommand(args []string) { fmt.Printf(" 4. Install RQLite database\n") fmt.Printf(" 5. Install Anyone Relay (Anon) for anonymous networking\n") fmt.Printf(" 6. Install Olric cache server\n") - fmt.Printf(" 7. Create directories (/home/debros/bin, /home/debros/src)\n") - fmt.Printf(" 8. Clone and build DeBros Network\n") - fmt.Printf(" 9. Generate configuration files\n") - fmt.Printf(" 10. Create systemd services (debros-node, debros-gateway, debros-olric)\n") - fmt.Printf(" 11. Start and enable services\n") + fmt.Printf(" 7. Install IPFS (Kubo) and IPFS Cluster\n") + fmt.Printf(" 8. Create directories (/home/debros/bin, /home/debros/src)\n") + fmt.Printf(" 9. Clone and build DeBros Network\n") + fmt.Printf(" 10. Generate configuration files\n") + fmt.Printf(" 11. Create systemd services (debros-ipfs, debros-ipfs-cluster, debros-node, debros-gateway, debros-olric)\n") + fmt.Printf(" 12. Start and enable services\n") fmt.Printf(strings.Repeat("=", 70) + "\n\n") fmt.Printf("Ready to begin setup? (yes/no): ") @@ -96,6 +100,9 @@ func HandleSetupCommand(args []string) { // Step 4.6: Install Olric cache server installOlric() + // Step 4.7: Install IPFS and IPFS Cluster + installIPFS() + // Step 5: Setup directories setupDirectories() @@ -123,6 +130,14 @@ func HandleSetupCommand(args []string) { fmt.Printf("🆔 Node Peer ID: %s\n\n", peerID) } + // Display IPFS Cluster information + fmt.Printf("IPFS Cluster Setup:\n") + fmt.Printf(" Each node runs its own IPFS Cluster peer\n") + fmt.Printf(" Cluster peers use CRDT consensus for automatic discovery\n") + fmt.Printf(" To verify cluster is working:\n") + fmt.Printf(" sudo -u debros ipfs-cluster-ctl --host http://localhost:9094 peers ls\n") + fmt.Printf(" You should see all cluster peers listed\n\n") + fmt.Printf("Service Management:\n") fmt.Printf(" network-cli service status all\n") fmt.Printf(" network-cli service logs node --follow\n") @@ -1156,6 +1171,92 @@ func configureFirewallForOlric() { fmt.Printf(" No active firewall detected for Olric\n") } +func installIPFS() { + fmt.Printf("🌐 Installing IPFS (Kubo) and IPFS Cluster...\n") + + // Check if IPFS is already installed + if _, err := exec.LookPath("ipfs"); err == nil { + fmt.Printf(" ✓ IPFS (Kubo) already installed\n") + } else { + fmt.Printf(" Installing IPFS (Kubo)...\n") + // Install IPFS via official installation script + cmd := exec.Command("bash", "-c", "curl -fsSL https://dist.ipfs.tech/kubo/v0.27.0/install.sh | bash") + if err := cmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Failed to install IPFS: %v\n", err) + fmt.Fprintf(os.Stderr, " You may need to install IPFS manually: https://docs.ipfs.tech/install/command-line/\n") + return + } + // Make sure ipfs is in PATH + exec.Command("ln", "-sf", "/usr/local/bin/ipfs", "/usr/bin/ipfs").Run() + fmt.Printf(" ✓ IPFS (Kubo) installed\n") + } + + // Check if IPFS Cluster is already installed + if _, err := exec.LookPath("ipfs-cluster-service"); err == nil { + fmt.Printf(" ✓ IPFS Cluster already installed\n") + } else { + fmt.Printf(" Installing IPFS Cluster...\n") + // Install IPFS Cluster via go install + if _, err := exec.LookPath("go"); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Go not found - cannot install IPFS Cluster. Please install Go first.\n") + return + } + cmd := exec.Command("go", "install", "github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest") + cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin") + if output, err := cmd.CombinedOutput(); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Failed to install IPFS Cluster: %v\n", err) + if len(output) > 0 { + fmt.Fprintf(os.Stderr, " Output: %s\n", string(output)) + } + fmt.Fprintf(os.Stderr, " You can manually install with: go install github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest\n") + return + } + // Also install ipfs-cluster-ctl for management + exec.Command("go", "install", "github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-ctl@latest").Run() + fmt.Printf(" ✓ IPFS Cluster installed\n") + } + + // Configure firewall for IPFS and Cluster + configureFirewallForIPFS() + + fmt.Printf(" ✓ IPFS and IPFS Cluster setup complete\n") +} + +func configureFirewallForIPFS() { + fmt.Printf(" Checking firewall configuration for IPFS...\n") + + // Check for UFW + if _, err := exec.LookPath("ufw"); err == nil { + output, _ := exec.Command("ufw", "status").CombinedOutput() + if strings.Contains(string(output), "Status: active") { + fmt.Printf(" Adding UFW rules for IPFS and Cluster...\n") + exec.Command("ufw", "allow", "4001/tcp", "comment", "IPFS Swarm").Run() + exec.Command("ufw", "allow", "5001/tcp", "comment", "IPFS API").Run() + exec.Command("ufw", "allow", "9094/tcp", "comment", "IPFS Cluster API").Run() + exec.Command("ufw", "allow", "9096/tcp", "comment", "IPFS Cluster Swarm").Run() + fmt.Printf(" ✓ UFW rules added for IPFS\n") + return + } + } + + // Check for firewalld + if _, err := exec.LookPath("firewall-cmd"); err == nil { + output, _ := exec.Command("firewall-cmd", "--state").CombinedOutput() + if strings.Contains(string(output), "running") { + fmt.Printf(" Adding firewalld rules for IPFS...\n") + exec.Command("firewall-cmd", "--permanent", "--add-port=4001/tcp").Run() + exec.Command("firewall-cmd", "--permanent", "--add-port=5001/tcp").Run() + exec.Command("firewall-cmd", "--permanent", "--add-port=9094/tcp").Run() + exec.Command("firewall-cmd", "--permanent", "--add-port=9096/tcp").Run() + exec.Command("firewall-cmd", "--reload").Run() + fmt.Printf(" ✓ firewalld rules added for IPFS\n") + return + } + } + + fmt.Printf(" No active firewall detected for IPFS\n") +} + func setupDirectories() { fmt.Printf("📁 Creating directories...\n") @@ -1405,6 +1506,18 @@ func generateConfigsInteractive(force bool) { exec.Command("chown", "debros:debros", nodeConfigPath).Run() fmt.Printf(" ✓ Node config created: %s\n", nodeConfigPath) + // Initialize IPFS and Cluster for this node + var nodeID string + if isBootstrap { + nodeID = "bootstrap" + } else { + nodeID = "node" + } + if err := initializeIPFSForNode(nodeID, vpsIP, isBootstrap); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Failed to initialize IPFS/Cluster: %v\n", err) + fmt.Fprintf(os.Stderr, " You may need to initialize IPFS and Cluster manually\n") + } + // Generate Olric config file for this node (uses multicast discovery) var olricConfigPath string if isBootstrap { @@ -1730,14 +1843,309 @@ func generateOlricConfig(configPath, bindIP string, httpPort, memberlistPort int return nil } +// getOrGenerateClusterSecret gets or generates a shared cluster secret +func getOrGenerateClusterSecret() (string, error) { + secretPath := "/home/debros/.debros/cluster-secret" + + // Try to read existing secret + if data, err := os.ReadFile(secretPath); err == nil { + secret := strings.TrimSpace(string(data)) + if len(secret) == 64 { + return secret, nil + } + } + + // Generate new secret (64 hex characters = 32 bytes) + bytes := make([]byte, 32) + if _, err := rand.Read(bytes); err != nil { + return "", fmt.Errorf("failed to generate cluster secret: %w", err) + } + secret := hex.EncodeToString(bytes) + + // Save secret + if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil { + return "", fmt.Errorf("failed to save cluster secret: %w", err) + } + exec.Command("chown", "debros:debros", secretPath).Run() + + return secret, nil +} + +// initializeIPFSForNode initializes IPFS and IPFS Cluster for a node +func initializeIPFSForNode(nodeID, vpsIP string, isBootstrap bool) error { + fmt.Printf(" Initializing IPFS and Cluster for node %s...\n", nodeID) + + // Get or generate cluster secret + secret, err := getOrGenerateClusterSecret() + if err != nil { + return fmt.Errorf("failed to get cluster secret: %w", err) + } + + // Determine data directories + var ipfsDataDir, clusterDataDir string + if nodeID == "bootstrap" { + ipfsDataDir = "/home/debros/.debros/bootstrap/ipfs" + clusterDataDir = "/home/debros/.debros/bootstrap/ipfs-cluster" + } else { + ipfsDataDir = "/home/debros/.debros/node/ipfs" + clusterDataDir = "/home/debros/.debros/node/ipfs-cluster" + } + + // Create directories + os.MkdirAll(ipfsDataDir, 0755) + os.MkdirAll(clusterDataDir, 0755) + exec.Command("chown", "-R", "debros:debros", ipfsDataDir).Run() + exec.Command("chown", "-R", "debros:debros", clusterDataDir).Run() + + // Initialize IPFS if not already initialized + ipfsRepoPath := filepath.Join(ipfsDataDir, "repo") + if _, err := os.Stat(filepath.Join(ipfsRepoPath, "config")); os.IsNotExist(err) { + fmt.Printf(" Initializing IPFS repository...\n") + cmd := exec.Command("sudo", "-u", "debros", "ipfs", "init", "--profile=server", "--repo-dir="+ipfsRepoPath) + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to initialize IPFS: %v\n%s", err, string(output)) + } + + // Configure IPFS API and Gateway addresses + exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.API", `["/ip4/127.0.0.1/tcp/5001"]`, "--repo-dir="+ipfsRepoPath).Run() + exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.Gateway", `["/ip4/127.0.0.1/tcp/8080"]`, "--repo-dir="+ipfsRepoPath).Run() + exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.Swarm", `["/ip4/0.0.0.0/tcp/4001","/ip6/::/tcp/4001"]`, "--repo-dir="+ipfsRepoPath).Run() + fmt.Printf(" ✓ IPFS initialized\n") + } + + // Initialize IPFS Cluster if not already initialized + clusterConfigPath := filepath.Join(clusterDataDir, "service.json") + if _, err := os.Stat(clusterConfigPath); os.IsNotExist(err) { + fmt.Printf(" Initializing IPFS Cluster...\n") + + // Generate cluster config + clusterConfig := generateClusterServiceConfig(nodeID, vpsIP, secret, isBootstrap) + + // Write config + configJSON, err := json.MarshalIndent(clusterConfig, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal cluster config: %w", err) + } + + if err := os.WriteFile(clusterConfigPath, configJSON, 0644); err != nil { + return fmt.Errorf("failed to write cluster config: %w", err) + } + exec.Command("chown", "debros:debros", clusterConfigPath).Run() + + fmt.Printf(" ✓ IPFS Cluster initialized\n") + } + + return nil +} + +// getClusterPeerID gets the cluster peer ID from a running cluster service +func getClusterPeerID(clusterAPIURL string) (string, error) { + cmd := exec.Command("ipfs-cluster-ctl", "--host", clusterAPIURL, "id") + output, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to get cluster peer ID: %v\n%s", err, string(output)) + } + + // Parse output to extract peer ID + // Output format: "12D3KooW..." + lines := strings.Split(string(output), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "12D3Koo") { + return line, nil + } + } + + return "", fmt.Errorf("could not parse cluster peer ID from output: %s", string(output)) +} + +// getClusterPeerMultiaddr constructs the cluster peer multiaddr +func getClusterPeerMultiaddr(vpsIP, peerID string) string { + return fmt.Sprintf("/ip4/%s/tcp/9096/p2p/%s", vpsIP, peerID) +} + +// clusterServiceConfig represents IPFS Cluster service.json structure +type clusterServiceConfig struct { + Cluster clusterConfig `json:"cluster"` + Consensus consensusConfig `json:"consensus"` + API apiConfig `json:"api"` + IPFSConnector ipfsConnectorConfig `json:"ipfs_connector"` + Datastore datastoreConfig `json:"datastore"` +} + +type clusterConfig struct { + ID string `json:"id"` + PrivateKey string `json:"private_key"` + Secret string `json:"secret"` + Peername string `json:"peername"` + Bootstrap []string `json:"bootstrap"` + LeaveOnShutdown bool `json:"leave_on_shutdown"` + ListenMultiaddr string `json:"listen_multiaddress"` + ConnectionManager connectionManagerConfig `json:"connection_manager"` +} + +type connectionManagerConfig struct { + LowWater int `json:"low_water"` + HighWater int `json:"high_water"` + GracePeriod string `json:"grace_period"` +} + +type consensusConfig struct { + CRDT crdtConfig `json:"crdt"` +} + +type crdtConfig struct { + ClusterName string `json:"cluster_name"` + TrustedPeers []string `json:"trusted_peers"` +} + +type apiConfig struct { + RestAPI restAPIConfig `json:"restapi"` +} + +type restAPIConfig struct { + HTTPListenMultiaddress string `json:"http_listen_multiaddress"` + ID string `json:"id"` + BasicAuthCredentials interface{} `json:"basic_auth_credentials"` +} + +type ipfsConnectorConfig struct { + IPFSHTTP ipfsHTTPConfig `json:"ipfshttp"` +} + +type ipfsHTTPConfig struct { + NodeMultiaddress string `json:"node_multiaddress"` +} + +type datastoreConfig struct { + Type string `json:"type"` + Path string `json:"path"` +} + +// generateClusterServiceConfig generates IPFS Cluster service.json config +func generateClusterServiceConfig(nodeID, vpsIP, secret string, isBootstrap bool) clusterServiceConfig { + clusterListenAddr := "/ip4/0.0.0.0/tcp/9096" + restAPIListenAddr := "/ip4/0.0.0.0/tcp/9094" + + // For bootstrap node, use empty bootstrap list + // For other nodes, bootstrap list will be set when starting the service + bootstrap := []string{} + + return clusterServiceConfig{ + Cluster: clusterConfig{ + Peername: nodeID, + Secret: secret, + Bootstrap: bootstrap, + LeaveOnShutdown: false, + ListenMultiaddr: clusterListenAddr, + ConnectionManager: connectionManagerConfig{ + LowWater: 50, + HighWater: 200, + GracePeriod: "20s", + }, + }, + Consensus: consensusConfig{ + CRDT: crdtConfig{ + ClusterName: "debros-cluster", + TrustedPeers: []string{"*"}, // Trust all peers + }, + }, + API: apiConfig{ + RestAPI: restAPIConfig{ + HTTPListenMultiaddress: restAPIListenAddr, + ID: "", + BasicAuthCredentials: nil, + }, + }, + IPFSConnector: ipfsConnectorConfig{ + IPFSHTTP: ipfsHTTPConfig{ + NodeMultiaddress: "/ip4/127.0.0.1/tcp/5001", + }, + }, + Datastore: datastoreConfig{ + Type: "badger", + Path: fmt.Sprintf("/home/debros/.debros/%s/ipfs-cluster/badger", nodeID), + }, + } +} + func createSystemdServices() { fmt.Printf("🔧 Creating systemd services...\n") + // IPFS service (runs on all nodes) + ipfsService := `[Unit] +Description=IPFS Daemon +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=debros +Group=debros +Environment=HOME=/home/debros +ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/node.yaml ]; then export IPFS_PATH=/home/debros/.debros/node/ipfs/repo; elif [ -f /home/debros/.debros/bootstrap.yaml ]; then export IPFS_PATH=/home/debros/.debros/bootstrap/ipfs/repo; else export IPFS_PATH=/home/debros/.debros/bootstrap/ipfs/repo; fi' +ExecStart=/usr/bin/ipfs daemon --enable-pubsub-experiment --repo-dir=${IPFS_PATH} +Restart=always +RestartSec=5 +StandardOutput=journal +StandardError=journal +SyslogIdentifier=ipfs + +NoNewPrivileges=yes +PrivateTmp=yes +ProtectSystem=strict +ReadWritePaths=/home/debros + +[Install] +WantedBy=multi-user.target +` + + if err := os.WriteFile("/etc/systemd/system/debros-ipfs.service", []byte(ipfsService), 0644); err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to create IPFS service: %v\n", err) + os.Exit(1) + } + + // IPFS Cluster service (runs on all nodes) + clusterService := `[Unit] +Description=IPFS Cluster Service +After=debros-ipfs.service +Wants=debros-ipfs.service +Requires=debros-ipfs.service + +[Service] +Type=simple +User=debros +Group=debros +WorkingDirectory=/home/debros +Environment=HOME=/home/debros +ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/node.yaml ]; then export CLUSTER_PATH=/home/debros/.debros/node/ipfs-cluster; elif [ -f /home/debros/.debros/bootstrap.yaml ]; then export CLUSTER_PATH=/home/debros/.debros/bootstrap/ipfs-cluster; else export CLUSTER_PATH=/home/debros/.debros/bootstrap/ipfs-cluster; fi' +ExecStart=/usr/local/bin/ipfs-cluster-service daemon --config ${CLUSTER_PATH}/service.json +Restart=always +RestartSec=5 +StandardOutput=journal +StandardError=journal +SyslogIdentifier=ipfs-cluster + +NoNewPrivileges=yes +PrivateTmp=yes +ProtectSystem=strict +ReadWritePaths=/home/debros + +[Install] +WantedBy=multi-user.target +` + + if err := os.WriteFile("/etc/systemd/system/debros-ipfs-cluster.service", []byte(clusterService), 0644); err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to create IPFS Cluster service: %v\n", err) + os.Exit(1) + } + // Node service nodeService := `[Unit] Description=DeBros Network Node -After=network-online.target -Wants=network-online.target +After=network-online.target debros-ipfs-cluster.service +Wants=network-online.target debros-ipfs-cluster.service +Requires=debros-ipfs-cluster.service [Service] Type=simple @@ -1807,6 +2215,8 @@ WantedBy=multi-user.target // Reload systemd exec.Command("systemctl", "daemon-reload").Run() + exec.Command("systemctl", "enable", "debros-ipfs").Run() + exec.Command("systemctl", "enable", "debros-ipfs-cluster").Run() exec.Command("systemctl", "enable", "debros-node").Run() exec.Command("systemctl", "enable", "debros-gateway").Run() @@ -1841,6 +2251,18 @@ func startServices() { } } + // Start IPFS first (required by Cluster) + startOrRestartService("debros-ipfs") + + // Wait a bit for IPFS to start + time.Sleep(2 * time.Second) + + // Start IPFS Cluster (required by Node) + startOrRestartService("debros-ipfs-cluster") + + // Wait a bit for Cluster to start + time.Sleep(2 * time.Second) + // Start or restart node service startOrRestartService("debros-node") diff --git a/pkg/gateway/gateway.go b/pkg/gateway/gateway.go index fc2dce1..d1d1545 100644 --- a/pkg/gateway/gateway.go +++ b/pkg/gateway/gateway.go @@ -254,6 +254,25 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) { logger.ComponentWarn(logging.ComponentGeneral, "failed to initialize IPFS Cluster client; storage endpoints disabled", zap.Error(ipfsErr)) } else { gw.ipfsClient = ipfsClient + + // Check peer count and warn if insufficient (use background context to avoid blocking) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if peerCount, err := ipfsClient.GetPeerCount(ctx); err == nil { + if peerCount < ipfsReplicationFactor { + logger.ComponentWarn(logging.ComponentGeneral, "insufficient cluster peers for replication factor", + zap.Int("peer_count", peerCount), + zap.Int("replication_factor", ipfsReplicationFactor), + zap.String("message", "Some pin operations may fail until more peers join the cluster")) + } else { + logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster peer count sufficient", + zap.Int("peer_count", peerCount), + zap.Int("replication_factor", ipfsReplicationFactor)) + } + } else { + logger.ComponentWarn(logging.ComponentGeneral, "failed to get cluster peer count", zap.Error(err)) + } + logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster client ready", zap.String("cluster_api_url", ipfsCfg.ClusterAPIURL), zap.String("ipfs_api_url", ipfsAPIURL), diff --git a/pkg/gateway/storage_handlers.go b/pkg/gateway/storage_handlers.go index 13269e1..16706b3 100644 --- a/pkg/gateway/storage_handlers.go +++ b/pkg/gateway/storage_handlers.go @@ -275,7 +275,12 @@ func (g *Gateway) storageGetHandler(w http.ResponseWriter, r *http.Request) { reader, err := g.ipfsClient.Get(ctx, path, ipfsAPIURL) if err != nil { g.logger.ComponentError(logging.ComponentGeneral, "failed to get content from IPFS", zap.Error(err), zap.String("cid", path)) - writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get content: %v", err)) + // Check if error indicates content not found (404) + if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "status 404") { + writeError(w, http.StatusNotFound, fmt.Sprintf("content not found: %s", path)) + } else { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get content: %v", err)) + } return } defer reader.Close() diff --git a/pkg/ipfs/client.go b/pkg/ipfs/client.go index b415fd0..83dbb5d 100644 --- a/pkg/ipfs/client.go +++ b/pkg/ipfs/client.go @@ -8,6 +8,7 @@ import ( "io" "mime/multipart" "net/http" + "net/url" "time" "go.uber.org/zap" @@ -21,6 +22,7 @@ type IPFSClient interface { Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) Unpin(ctx context.Context, cid string) error Health(ctx context.Context) error + GetPeerCount(ctx context.Context) (int, error) Close(ctx context.Context) error } @@ -110,6 +112,33 @@ func (c *Client) Health(ctx context.Context) error { return nil } +// GetPeerCount returns the number of cluster peers +func (c *Client) GetPeerCount(ctx context.Context) (int, error) { + req, err := http.NewRequestWithContext(ctx, "GET", c.apiURL+"/peers", nil) + if err != nil { + return 0, fmt.Errorf("failed to create peers request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, fmt.Errorf("peers request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("peers request failed with status: %d", resp.StatusCode) + } + + var peers []struct { + ID string `json:"id"` + } + if err := json.NewDecoder(resp.Body).Decode(&peers); err != nil { + return 0, fmt.Errorf("failed to decode peers response: %w", err) + } + + return len(peers), nil +} + // Add adds content to IPFS and returns the CID func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddResponse, error) { // Create multipart form request for IPFS Cluster API @@ -157,28 +186,25 @@ func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddRe } // Pin pins a CID with specified replication factor +// IPFS Cluster expects pin options (including name) as query parameters, not in JSON body func (c *Client) Pin(ctx context.Context, cid string, name string, replicationFactor int) (*PinResponse, error) { - reqBody := map[string]interface{}{ - "cid": cid, - "replication_factor_min": replicationFactor, - "replication_factor_max": replicationFactor, - } + // Build URL with query parameters + reqURL := c.apiURL + "/pins/" + cid + values := url.Values{} + values.Set("replication-min", fmt.Sprintf("%d", replicationFactor)) + values.Set("replication-max", fmt.Sprintf("%d", replicationFactor)) if name != "" { - reqBody["name"] = name + values.Set("name", name) + } + if len(values) > 0 { + reqURL += "?" + values.Encode() } - jsonBody, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("failed to marshal pin request: %w", err) - } - - req, err := http.NewRequestWithContext(ctx, "POST", c.apiURL+"/pins/"+cid, bytes.NewReader(jsonBody)) + req, err := http.NewRequestWithContext(ctx, "POST", reqURL, nil) if err != nil { return nil, fmt.Errorf("failed to create pin request: %w", err) } - req.Header.Set("Content-Type", "application/json") - resp, err := c.httpClient.Do(req) if err != nil { return nil, fmt.Errorf("pin request failed: %w", err) @@ -242,6 +268,9 @@ func (c *Client) PinStatus(ctx context.Context, cid string) (*PinStatus, error) return nil, fmt.Errorf("failed to decode pin status response: %w", err) } + // Use name from GlobalPinInfo + name := gpi.Name + // Extract status from peer map (use first peer's status, or aggregate) status := "unknown" peers := make([]string, 0, len(gpi.PeerMap)) @@ -274,7 +303,7 @@ func (c *Client) PinStatus(ctx context.Context, cid string) (*PinStatus, error) result := &PinStatus{ Cid: gpi.Cid, - Name: gpi.Name, + Name: name, Status: status, ReplicationMin: 0, // Not available in GlobalPinInfo ReplicationMax: 0, // Not available in GlobalPinInfo @@ -331,8 +360,12 @@ func (c *Client) Get(ctx context.Context, cid string, ipfsAPIURL string) (io.Rea } if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) resp.Body.Close() - return nil, fmt.Errorf("get failed with status %d", resp.StatusCode) + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("content not found (CID: %s). The content may not be available on the IPFS node, or the IPFS API may not be accessible at %s", cid, ipfsAPIURL) + } + return nil, fmt.Errorf("get failed with status %d: %s", resp.StatusCode, string(body)) } return resp.Body, nil diff --git a/scripts/update_changelog.sh b/scripts/update_changelog.sh index 1f10e1d..72f70c2 100755 --- a/scripts/update_changelog.sh +++ b/scripts/update_changelog.sh @@ -67,6 +67,15 @@ if ! command -v curl > /dev/null 2>&1; then exit 1 fi +# Check for skip flag +# To skip changelog generation, set SKIP_CHANGELOG=1 before committing: +# SKIP_CHANGELOG=1 git commit -m "your message" +# SKIP_CHANGELOG=1 git commit +if [ "$SKIP_CHANGELOG" = "1" ] || [ "$SKIP_CHANGELOG" = "true" ]; then + log "Skipping changelog update (SKIP_CHANGELOG is set)" + exit 0 +fi + # Check if we're in a git repo if ! git rev-parse --git-dir > /dev/null 2>&1; then error "Not in a git repository" From 69d7ccf4c7947ea0e3d30b0d5cb8a7f346ebea32 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Wed, 5 Nov 2025 10:52:40 +0200 Subject: [PATCH 04/57] feat: enhance IPFS and Cluster integration in setup - Added automatic setup for IPFS and IPFS Cluster during the network setup process. - Implemented initialization of IPFS repositories and Cluster configurations for each node. - Enhanced Makefile to support starting IPFS and Cluster daemons with improved logging. - Introduced a new documentation guide for IPFS Cluster setup, detailing configuration and verification steps. - Updated changelog to reflect the new features and improvements. --- .zed/debug.json | 4 +-- Makefile | 26 ++++++++++---------- README.md | 4 +-- cmd/node/main.go | 4 +-- docs/ipfs-cluster-setup.md | 2 +- e2e/gateway_e2e_test.go | 2 +- pkg/anyoneproxy/socks.go | 6 ++--- pkg/cli/config_commands.go | 10 ++++---- pkg/cli/setup.go | 10 ++++---- pkg/client/defaults_test.go | 6 ++--- pkg/config/validate_test.go | 20 +++++++-------- pkg/gateway/anon_proxy_handler.go | 2 +- pkg/gateway/anon_proxy_handler_test.go | 4 +-- pkg/gateway/gateway.go | 4 +-- pkg/gateway/storage_handlers_test.go | 18 ++++++++++---- pkg/ipfs/client_test.go | 34 +++++++++++++++----------- pkg/node/node.go | 2 +- pkg/node/node_test.go | 10 ++++---- 18 files changed, 91 insertions(+), 77 deletions(-) diff --git a/.zed/debug.json b/.zed/debug.json index 6418b00..4119f7a 100644 --- a/.zed/debug.json +++ b/.zed/debug.json @@ -11,7 +11,7 @@ "program": "./cmd/gateway", "env": { "GATEWAY_ADDR": ":6001", - "GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", + "GATEWAY_BOOTSTRAP_PEERS": "/ip4/localhost/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", "GATEWAY_NAMESPACE": "default", "GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default" } @@ -36,7 +36,7 @@ "program": "./cmd/gateway", "env": { "GATEWAY_ADDR": ":6001", - "GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", + "GATEWAY_BOOTSTRAP_PEERS": "/ip4/localhost/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", "GATEWAY_NAMESPACE": "default", "GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default" } diff --git a/Makefile b/Makefile index 712948d..355cce4 100644 --- a/Makefile +++ b/Makefile @@ -7,12 +7,12 @@ test: # Gateway-focused E2E tests assume gateway and nodes are already running # Configure via env: -# GATEWAY_BASE_URL (default http://127.0.0.1:6001) +# GATEWAY_BASE_URL (default http://localhost:6001) # GATEWAY_API_KEY (required for auth-protected routes) .PHONY: test-e2e test-e2e: @echo "Running gateway E2E tests (HTTP/WS only)..." - @echo "Base URL: $${GATEWAY_BASE_URL:-http://127.0.0.1:6001}" + @echo "Base URL: $${GATEWAY_BASE_URL:-http://localhost:6001}" @test -n "$$GATEWAY_API_KEY" || (echo "GATEWAY_API_KEY must be set" && exit 1) go test -v -tags e2e ./e2e @@ -57,7 +57,7 @@ run-node: go run ./cmd/node --config node.yaml # Run second node (regular) - requires join address of bootstrap node -# Usage: make run-node2 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002 +# Usage: make run-node2 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002 run-node2: @echo "Starting regular node (node.yaml)..." @echo "Config: ~/.debros/node.yaml" @@ -65,7 +65,7 @@ run-node2: go run ./cmd/node --config node2.yaml # Run third node (regular) - requires join address of bootstrap node -# Usage: make run-node3 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003 +# Usage: make run-node3 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003 run-node3: @echo "Starting regular node (node2.yaml)..." @echo "Config: ~/.debros/node2.yaml" @@ -122,9 +122,9 @@ dev: build echo " Initializing IPFS..."; \ mkdir -p $$HOME/.debros/bootstrap/ipfs; \ IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5001"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8080"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4001","/ip6/::/tcp/4001"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.API '["/ip4/localhost/tcp/5001"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/localhost/tcp/8080"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4101","/ip6/::/tcp/4101"]' 2>&1 | grep -v "generating" || true; \ fi; \ echo " Initializing IPFS Cluster..."; \ mkdir -p $$HOME/.debros/bootstrap/ipfs-cluster; \ @@ -135,9 +135,9 @@ dev: build echo " Initializing IPFS..."; \ mkdir -p $$HOME/.debros/node2/ipfs; \ IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5002"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8081"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4002","/ip6/::/tcp/4002"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.API '["/ip4/localhost/tcp/5002"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/localhost/tcp/8081"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4102","/ip6/::/tcp/4102"]' 2>&1 | grep -v "generating" || true; \ fi; \ echo " Initializing IPFS Cluster..."; \ mkdir -p $$HOME/.debros/node2/ipfs-cluster; \ @@ -148,9 +148,9 @@ dev: build echo " Initializing IPFS..."; \ mkdir -p $$HOME/.debros/node3/ipfs; \ IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5003"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8082"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4003","/ip6/::/tcp/4003"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.API '["/ip4/localhost/tcp/5003"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/localhost/tcp/8082"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4103","/ip6/::/tcp/4103"]' 2>&1 | grep -v "generating" || true; \ fi; \ echo " Initializing IPFS Cluster..."; \ mkdir -p $$HOME/.debros/node3/ipfs-cluster; \ diff --git a/README.md b/README.md index b325374..dd2e561 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ Use `make dev` for the complete stack or run binaries individually with `go run All runtime configuration lives in `~/.debros/`. - `bootstrap.yaml`: `type: bootstrap`, blank `database.rqlite_join_address` -- `node*.yaml`: `type: node`, set `database.rqlite_join_address` (e.g. `127.0.0.1:7001`) and include the bootstrap `discovery.bootstrap_peers` +- `node*.yaml`: `type: node`, set `database.rqlite_join_address` (e.g. `localhost:7001`) and include the bootstrap `discovery.bootstrap_peers` - `gateway.yaml`: configure `gateway.bootstrap_peers`, `gateway.namespace`, and optional auth flags Validation reminders: @@ -127,7 +127,7 @@ Environment overrides: ```bash export GATEWAY_ADDR="0.0.0.0:6001" export GATEWAY_NAMESPACE="my-app" -export GATEWAY_BOOTSTRAP_PEERS="/ip4/127.0.0.1/tcp/4001/p2p/" +export GATEWAY_BOOTSTRAP_PEERS="/ip4/localhost/tcp/4001/p2p/" export GATEWAY_REQUIRE_AUTH=true export GATEWAY_API_KEYS="key1:namespace1,key2:namespace2" ``` diff --git a/cmd/node/main.go b/cmd/node/main.go index 5d469b1..949ecd3 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -255,10 +255,10 @@ func main() { // Set default advertised addresses if empty if cfg.Discovery.HttpAdvAddress == "" { - cfg.Discovery.HttpAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLitePort) + cfg.Discovery.HttpAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLitePort) } if cfg.Discovery.RaftAdvAddress == "" { - cfg.Discovery.RaftAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLiteRaftPort) + cfg.Discovery.RaftAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLiteRaftPort) } // Validate configuration diff --git a/docs/ipfs-cluster-setup.md b/docs/ipfs-cluster-setup.md index fa70343..65e606f 100644 --- a/docs/ipfs-cluster-setup.md +++ b/docs/ipfs-cluster-setup.md @@ -122,7 +122,7 @@ If automatic setup didn't work, you can manually initialize: ```bash sudo -u debros ipfs init --profile=server --repo-dir=~/.debros/bootstrap/ipfs/repo -sudo -u debros ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5001"]' --repo-dir=~/.debros/bootstrap/ipfs/repo +sudo -u debros ipfs config --json Addresses.API '["/ip4/localhost/tcp/5001"]' --repo-dir=~/.debros/bootstrap/ipfs/repo ``` ### 2. Initialize Cluster diff --git a/e2e/gateway_e2e_test.go b/e2e/gateway_e2e_test.go index 8c6cb27..036d9b2 100644 --- a/e2e/gateway_e2e_test.go +++ b/e2e/gateway_e2e_test.go @@ -37,7 +37,7 @@ func requireAPIKey(t *testing.T) string { } func gatewayBaseURL() string { - return getEnv("GATEWAY_BASE_URL", "http://127.0.0.1:6001") + return getEnv("GATEWAY_BASE_URL", "http://localhost:6001") } func httpClient() *http.Client { diff --git a/pkg/anyoneproxy/socks.go b/pkg/anyoneproxy/socks.go index a4c4ce2..df4a2eb 100644 --- a/pkg/anyoneproxy/socks.go +++ b/pkg/anyoneproxy/socks.go @@ -19,7 +19,7 @@ var disabled bool func SetDisabled(v bool) { disabled = v } // Enabled reports whether Anyone proxy routing is active. -// Defaults to true, using SOCKS5 at 127.0.0.1:9050, unless explicitly disabled +// Defaults to true, using SOCKS5 at localhost:9050, unless explicitly disabled // via SetDisabled(true) or environment variable ANYONE_DISABLE=1. // ANYONE_SOCKS5 may override the proxy address. func Enabled() bool { @@ -31,7 +31,7 @@ func Enabled() bool { // socksAddr returns the SOCKS5 address to use for proxying (host:port). func socksAddr() string { - return "127.0.0.1:9050" + return "localhost:9050" } // socksContextDialer implements tcp.ContextDialer over a SOCKS5 proxy. @@ -57,7 +57,7 @@ func (d *socksContextDialer) DialContext(ctx context.Context, network, address s // DialerForAddr returns a tcp.DialerForAddr that routes through the Anyone SOCKS5 proxy. // It automatically BYPASSES the proxy for loopback, private, and link-local addresses -// to allow local/dev networking (e.g. 127.0.0.1, 10.0.0.0/8, 192.168.0.0/16, fc00::/7, fe80::/10). +// to allow local/dev networking (e.g. localhost, 10.0.0.0/8, 192.168.0.0/16, fc00::/7, fe80::/10). func DialerForAddr() tcp.DialerForAddr { return func(raddr ma.Multiaddr) (tcp.ContextDialer, error) { // Prefer direct dialing for local/private targets diff --git a/pkg/cli/config_commands.go b/pkg/cli/config_commands.go index 84f267e..208aac7 100644 --- a/pkg/cli/config_commands.go +++ b/pkg/cli/config_commands.go @@ -286,7 +286,7 @@ func initFullStack(force bool) { fmt.Printf("✅ Generated bootstrap identity: %s (Peer ID: %s)\n", bootstrapIdentityPath, bootstrapInfo.PeerID.String()) // Construct bootstrap multiaddr - bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String()) + bootstrapMultiaddr := fmt.Sprintf("/ip4/localhost/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String()) fmt.Printf(" Bootstrap multiaddr: %s\n", bootstrapMultiaddr) // Generate configs for all nodes... @@ -430,8 +430,8 @@ discovery: %s discovery_interval: "15s" bootstrap_port: %d - http_adv_address: "127.0.0.1:%d" - raft_adv_address: "127.0.0.1:%d" + http_adv_address: "localhost:%d" + raft_adv_address: "localhost:%d" node_namespace: "default" security: @@ -477,8 +477,8 @@ discovery: bootstrap_peers: [] discovery_interval: "15s" bootstrap_port: %d - http_adv_address: "127.0.0.1:%d" - raft_adv_address: "127.0.0.1:%d" + http_adv_address: "localhost:%d" + raft_adv_address: "localhost:%d" node_namespace: "default" security: diff --git a/pkg/cli/setup.go b/pkg/cli/setup.go index f9e8634..18dd3b5 100644 --- a/pkg/cli/setup.go +++ b/pkg/cli/setup.go @@ -1102,12 +1102,12 @@ func installOlric() { configPath := olricConfigDir + "/config.yaml" if _, err := os.Stat(configPath); os.IsNotExist(err) { configContent := `server: - bindAddr: "127.0.0.1" + bindAddr: "localhost" bindPort: 3320 memberlist: environment: local - bindAddr: "127.0.0.1" + bindAddr: "localhost" bindPort: 3322 ` @@ -1907,8 +1907,8 @@ func initializeIPFSForNode(nodeID, vpsIP string, isBootstrap bool) error { } // Configure IPFS API and Gateway addresses - exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.API", `["/ip4/127.0.0.1/tcp/5001"]`, "--repo-dir="+ipfsRepoPath).Run() - exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.Gateway", `["/ip4/127.0.0.1/tcp/8080"]`, "--repo-dir="+ipfsRepoPath).Run() + exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.API", `["/ip4/localhost/tcp/5001"]`, "--repo-dir="+ipfsRepoPath).Run() + exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.Gateway", `["/ip4/localhost/tcp/8080"]`, "--repo-dir="+ipfsRepoPath).Run() exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.Swarm", `["/ip4/0.0.0.0/tcp/4001","/ip6/::/tcp/4001"]`, "--repo-dir="+ipfsRepoPath).Run() fmt.Printf(" ✓ IPFS initialized\n") } @@ -2059,7 +2059,7 @@ func generateClusterServiceConfig(nodeID, vpsIP, secret string, isBootstrap bool }, IPFSConnector: ipfsConnectorConfig{ IPFSHTTP: ipfsHTTPConfig{ - NodeMultiaddress: "/ip4/127.0.0.1/tcp/5001", + NodeMultiaddress: "/ip4/localhost/tcp/5001", }, }, Datastore: datastoreConfig{ diff --git a/pkg/client/defaults_test.go b/pkg/client/defaults_test.go index eca0d4e..a686094 100644 --- a/pkg/client/defaults_test.go +++ b/pkg/client/defaults_test.go @@ -11,7 +11,7 @@ func TestDefaultBootstrapPeersNonEmpty(t *testing.T) { old := os.Getenv("DEBROS_BOOTSTRAP_PEERS") t.Cleanup(func() { os.Setenv("DEBROS_BOOTSTRAP_PEERS", old) }) // Set a valid bootstrap peer - validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" + validPeer := "/ip4/localhost/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" _ = os.Setenv("DEBROS_BOOTSTRAP_PEERS", validPeer) peers := DefaultBootstrapPeers() if len(peers) == 0 { @@ -50,8 +50,8 @@ func TestNormalizeEndpoints(t *testing.T) { } func TestEndpointFromMultiaddr(t *testing.T) { - ma, _ := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4001") - if ep := endpointFromMultiaddr(ma, 5001); ep != "http://127.0.0.1:5001" { + ma, _ := multiaddr.NewMultiaddr("/ip4/localhost/tcp/4001") + if ep := endpointFromMultiaddr(ma, 5001); ep != "http://localhost:5001" { t.Fatalf("unexpected endpoint: %s", ep) } } diff --git a/pkg/config/validate_test.go b/pkg/config/validate_test.go index 2122e6f..f351e9d 100644 --- a/pkg/config/validate_test.go +++ b/pkg/config/validate_test.go @@ -7,7 +7,7 @@ import ( // validConfigForType returns a valid config for the given node type func validConfigForType(nodeType string) *Config { - validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" + validPeer := "/ip4/localhost/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" cfg := &Config{ Node: NodeConfig{ Type: nodeType, @@ -30,8 +30,8 @@ func validConfigForType(nodeType string) *Config { BootstrapPeers: []string{validPeer}, DiscoveryInterval: 15 * time.Second, BootstrapPort: 4001, - HttpAdvAddress: "127.0.0.1:5001", - RaftAdvAddress: "127.0.0.1:7001", + HttpAdvAddress: "localhost:5001", + RaftAdvAddress: "localhost:7001", NodeNamespace: "default", }, Logging: LoggingConfig{ @@ -205,7 +205,7 @@ func TestValidateRQLiteJoinAddress(t *testing.T) { } func TestValidateBootstrapPeers(t *testing.T) { - validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" + validPeer := "/ip4/localhost/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" tests := []struct { name string nodeType string @@ -217,9 +217,9 @@ func TestValidateBootstrapPeers(t *testing.T) { {"bootstrap with peer", "bootstrap", []string{validPeer}, false}, {"bootstrap without peer", "bootstrap", []string{}, false}, {"invalid multiaddr", "node", []string{"invalid"}, true}, - {"missing p2p", "node", []string{"/ip4/127.0.0.1/tcp/4001"}, true}, + {"missing p2p", "node", []string{"/ip4/localhost/tcp/4001"}, true}, {"duplicate peer", "node", []string{validPeer, validPeer}, true}, - {"invalid port", "node", []string{"/ip4/127.0.0.1/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true}, + {"invalid port", "node", []string{"/ip4/localhost/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true}, } for _, tt := range tests { @@ -392,17 +392,17 @@ func TestValidateCompleteConfig(t *testing.T) { BackupInterval: 24 * time.Hour, RQLitePort: 5002, RQLiteRaftPort: 7002, - RQLiteJoinAddress: "127.0.0.1:7001", + RQLiteJoinAddress: "localhost:7001", MinClusterSize: 1, }, Discovery: DiscoveryConfig{ BootstrapPeers: []string{ - "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj", + "/ip4/localhost/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj", }, DiscoveryInterval: 15 * time.Second, BootstrapPort: 4001, - HttpAdvAddress: "127.0.0.1:5001", - RaftAdvAddress: "127.0.0.1:7001", + HttpAdvAddress: "localhost:5001", + RaftAdvAddress: "localhost:7001", NodeNamespace: "default", }, Security: SecurityConfig{ diff --git a/pkg/gateway/anon_proxy_handler.go b/pkg/gateway/anon_proxy_handler.go index e8aa925..7b0cd2d 100644 --- a/pkg/gateway/anon_proxy_handler.go +++ b/pkg/gateway/anon_proxy_handler.go @@ -234,7 +234,7 @@ func isPrivateOrLocalHost(host string) bool { } // Check for localhost variants - if host == "localhost" || host == "127.0.0.1" || host == "::1" { + if host == "localhost" || host == "localhost" || host == "::1" { return true } diff --git a/pkg/gateway/anon_proxy_handler_test.go b/pkg/gateway/anon_proxy_handler_test.go index 005e124..d72c2e5 100644 --- a/pkg/gateway/anon_proxy_handler_test.go +++ b/pkg/gateway/anon_proxy_handler_test.go @@ -92,7 +92,7 @@ func TestAnonProxyHandler_PrivateAddressBlocking(t *testing.T) { url string }{ {"localhost", "http://localhost/test"}, - {"127.0.0.1", "http://127.0.0.1/test"}, + {"localhost", "http://localhost/test"}, {"private 10.x", "http://10.0.0.1/test"}, {"private 192.168.x", "http://192.168.1.1/test"}, {"private 172.16.x", "http://172.16.0.1/test"}, @@ -166,7 +166,7 @@ func TestIsPrivateOrLocalHost(t *testing.T) { expected bool }{ {"localhost", true}, - {"127.0.0.1", true}, + {"localhost", true}, {"::1", true}, {"10.0.0.1", true}, {"192.168.1.1", true}, diff --git a/pkg/gateway/gateway.go b/pkg/gateway/gateway.go index d1d1545..e14a043 100644 --- a/pkg/gateway/gateway.go +++ b/pkg/gateway/gateway.go @@ -371,7 +371,7 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger } // Skip localhost loopback addresses (we'll use localhost:3320 as fallback) - if ip == "127.0.0.1" || ip == "::1" || ip == "localhost" { + if ip == "localhost" || ip == "::1" || ip == "localhost" { continue } @@ -402,7 +402,7 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger } // Skip localhost - if ip == "127.0.0.1" || ip == "::1" || ip == "localhost" { + if ip == "localhost" || ip == "::1" || ip == "localhost" { continue } diff --git a/pkg/gateway/storage_handlers_test.go b/pkg/gateway/storage_handlers_test.go index 30dd839..e539aec 100644 --- a/pkg/gateway/storage_handlers_test.go +++ b/pkg/gateway/storage_handlers_test.go @@ -18,11 +18,12 @@ import ( // mockIPFSClient is a mock implementation of ipfs.IPFSClient for testing type mockIPFSClient struct { - addFunc func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) - pinFunc func(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error) - pinStatusFunc func(ctx context.Context, cid string) (*ipfs.PinStatus, error) - getFunc func(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) - unpinFunc func(ctx context.Context, cid string) error + addFunc func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) + pinFunc func(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error) + pinStatusFunc func(ctx context.Context, cid string) (*ipfs.PinStatus, error) + getFunc func(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) + unpinFunc func(ctx context.Context, cid string) error + getPeerCountFunc func(ctx context.Context) (int, error) } func (m *mockIPFSClient) Add(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) { @@ -72,6 +73,13 @@ func (m *mockIPFSClient) Health(ctx context.Context) error { return nil } +func (m *mockIPFSClient) GetPeerCount(ctx context.Context) (int, error) { + if m.getPeerCountFunc != nil { + return m.getPeerCountFunc(ctx) + } + return 3, nil +} + func (m *mockIPFSClient) Close(ctx context.Context) error { return nil } diff --git a/pkg/ipfs/client_test.go b/pkg/ipfs/client_test.go index 344dad1..77445eb 100644 --- a/pkg/ipfs/client_test.go +++ b/pkg/ipfs/client_test.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "net/http/httptest" + "strconv" "strings" "testing" "time" @@ -158,14 +159,19 @@ func TestClient_Pin(t *testing.T) { t.Errorf("Expected method POST, got %s", r.Method) } - var reqBody map[string]interface{} - if err := json.NewDecoder(r.Body).Decode(&reqBody); err != nil { - t.Errorf("Failed to decode request: %v", err) - return + if cid := strings.TrimPrefix(r.URL.Path, "/pins/"); cid != expectedCID { + t.Errorf("Expected CID %s in path, got %s", expectedCID, cid) } - if reqBody["cid"] != expectedCID { - t.Errorf("Expected CID %s, got %v", expectedCID, reqBody["cid"]) + query := r.URL.Query() + if got := query.Get("replication-min"); got != strconv.Itoa(expectedReplicationFactor) { + t.Errorf("Expected replication-min %d, got %s", expectedReplicationFactor, got) + } + if got := query.Get("replication-max"); got != strconv.Itoa(expectedReplicationFactor) { + t.Errorf("Expected replication-max %d, got %s", expectedReplicationFactor, got) + } + if got := query.Get("name"); got != expectedName { + t.Errorf("Expected name %s, got %s", expectedName, got) } response := PinResponse{ @@ -231,14 +237,14 @@ func TestClient_PinStatus(t *testing.T) { t.Errorf("Expected method GET, got %s", r.Method) } - response := PinStatus{ - Cid: expectedCID, - Name: "test-file", - Status: "pinned", - ReplicationMin: 3, - ReplicationMax: 3, - ReplicationFactor: 3, - Peers: []string{"peer1", "peer2", "peer3"}, + response := map[string]interface{}{ + "cid": expectedCID, + "name": "test-file", + "peer_map": map[string]interface{}{ + "peer1": map[string]interface{}{"status": "pinned"}, + "peer2": map[string]interface{}{"status": "pinned"}, + "peer3": map[string]interface{}{"status": "pinned"}, + }, } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(response) diff --git a/pkg/node/node.go b/pkg/node/node.go index af840e8..1687b73 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -321,7 +321,7 @@ func (n *Node) startLibP2P() error { // For localhost/development, disable NAT services // For production, these would be enabled isLocalhost := len(n.config.Node.ListenAddresses) > 0 && - (strings.Contains(n.config.Node.ListenAddresses[0], "127.0.0.1") || + (strings.Contains(n.config.Node.ListenAddresses[0], "localhost") || strings.Contains(n.config.Node.ListenAddresses[0], "localhost")) if isLocalhost { diff --git a/pkg/node/node_test.go b/pkg/node/node_test.go index 8ee0ab4..b07bb05 100644 --- a/pkg/node/node_test.go +++ b/pkg/node/node_test.go @@ -177,13 +177,13 @@ func TestHashBootstrapConnections(t *testing.T) { } // Create two hosts (A and B) listening on localhost TCP - hA, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) + hA, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) if err != nil { t.Fatalf("libp2p.New (A): %v", err) } defer hA.Close() - hB, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) + hB, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) if err != nil { t.Fatalf("libp2p.New (B): %v", err) } @@ -244,19 +244,19 @@ func TestHashBootstrapConnections(t *testing.T) { } // Create three hosts (A, B, C) listening on localhost TCP - hA, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) + hA, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) if err != nil { t.Fatalf("libp2p.New (A): %v", err) } defer hA.Close() - hB, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) + hB, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) if err != nil { t.Fatalf("libp2p.New (B): %v", err) } defer hB.Close() - hC, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) + hC, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) if err != nil { t.Fatalf("libp2p.New (C): %v", err) } From d00290d27828846a626da9d8c34f45c01cdcbdc1 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Wed, 5 Nov 2025 17:30:25 +0200 Subject: [PATCH 05/57] feat: enhance IPFS and Cluster integration in setup - Added automatic setup for IPFS and IPFS Cluster during the network setup process. - Implemented initialization of IPFS repositories and Cluster configurations for each node. - Enhanced Makefile to support starting IPFS and Cluster daemons with improved logging. - Introduced a new documentation guide for IPFS Cluster setup, detailing configuration and verification steps. - Updated changelog to reflect the new features and improvements. --- Makefile | 119 +++++--- pkg/cli/setup.go | 74 ++++- pkg/ipfs/cluster.go | 717 ++++++++++++++++++++++++++++++++++++++++++++ pkg/node/node.go | 50 +++ 4 files changed, 919 insertions(+), 41 deletions(-) create mode 100644 pkg/ipfs/cluster.go diff --git a/Makefile b/Makefile index 355cce4..27e3b85 100644 --- a/Makefile +++ b/Makefile @@ -117,45 +117,61 @@ dev: build ipfs-cluster-service --version >/dev/null 2>&1 && openssl rand -hex 32 > $$CLUSTER_SECRET || echo "0000000000000000000000000000000000000000000000000000000000000000" > $$CLUSTER_SECRET; \ fi; \ SECRET=$$(cat $$CLUSTER_SECRET); \ + SWARM_KEY=$$HOME/.debros/swarm.key; \ + if [ ! -f $$SWARM_KEY ]; then \ + echo " Generating private swarm key..."; \ + KEY_HEX=$$(openssl rand -hex 32 | tr '[:lower:]' '[:upper:]'); \ + printf "/key/swarm/psk/1.0.0/\n/base16/\n%s\n" "$$KEY_HEX" > $$SWARM_KEY; \ + chmod 600 $$SWARM_KEY; \ + fi; \ echo " Setting up bootstrap node (IPFS: 5001, Cluster: 9094)..."; \ if [ ! -d $$HOME/.debros/bootstrap/ipfs/repo ]; then \ echo " Initializing IPFS..."; \ mkdir -p $$HOME/.debros/bootstrap/ipfs; \ IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.API '["/ip4/localhost/tcp/5001"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/localhost/tcp/8080"]' 2>&1 | grep -v "generating" || true; \ + cp $$SWARM_KEY $$HOME/.debros/bootstrap/ipfs/repo/swarm.key; \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5001"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8080"]' 2>&1 | grep -v "generating" || true; \ IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4101","/ip6/::/tcp/4101"]' 2>&1 | grep -v "generating" || true; \ + else \ + if [ ! -f $$HOME/.debros/bootstrap/ipfs/repo/swarm.key ]; then \ + cp $$SWARM_KEY $$HOME/.debros/bootstrap/ipfs/repo/swarm.key; \ + fi; \ fi; \ - echo " Initializing IPFS Cluster..."; \ + echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \ mkdir -p $$HOME/.debros/bootstrap/ipfs-cluster; \ - env IPFS_CLUSTER_PATH=$$HOME/.debros/bootstrap/ipfs-cluster ipfs-cluster-service init --force >/dev/null 2>&1 || true; \ - jq '.cluster.peername = "bootstrap" | .cluster.secret = "'$$SECRET'" | .cluster.listen_multiaddress = ["/ip4/0.0.0.0/tcp/9096"] | .consensus.crdt.cluster_name = "debros-cluster" | .consensus.crdt.trusted_peers = ["*"] | .api.restapi.http_listen_multiaddress = "/ip4/0.0.0.0/tcp/9094" | .api.ipfsproxy.listen_multiaddress = "/ip4/127.0.0.1/tcp/9095" | .api.pinsvcapi.http_listen_multiaddress = "/ip4/127.0.0.1/tcp/9097" | .ipfs_connector.ipfshttp.node_multiaddress = "/ip4/127.0.0.1/tcp/5001"' $$HOME/.debros/bootstrap/ipfs-cluster/service.json > $$HOME/.debros/bootstrap/ipfs-cluster/service.json.tmp && mv $$HOME/.debros/bootstrap/ipfs-cluster/service.json.tmp $$HOME/.debros/bootstrap/ipfs-cluster/service.json; \ echo " Setting up node2 (IPFS: 5002, Cluster: 9104)..."; \ if [ ! -d $$HOME/.debros/node2/ipfs/repo ]; then \ echo " Initializing IPFS..."; \ mkdir -p $$HOME/.debros/node2/ipfs; \ IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.API '["/ip4/localhost/tcp/5002"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/localhost/tcp/8081"]' 2>&1 | grep -v "generating" || true; \ + cp $$SWARM_KEY $$HOME/.debros/node2/ipfs/repo/swarm.key; \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5002"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8081"]' 2>&1 | grep -v "generating" || true; \ IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4102","/ip6/::/tcp/4102"]' 2>&1 | grep -v "generating" || true; \ + else \ + if [ ! -f $$HOME/.debros/node2/ipfs/repo/swarm.key ]; then \ + cp $$SWARM_KEY $$HOME/.debros/node2/ipfs/repo/swarm.key; \ + fi; \ fi; \ - echo " Initializing IPFS Cluster..."; \ + echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \ mkdir -p $$HOME/.debros/node2/ipfs-cluster; \ - env IPFS_CLUSTER_PATH=$$HOME/.debros/node2/ipfs-cluster ipfs-cluster-service init --force >/dev/null 2>&1 || true; \ - jq '.cluster.peername = "node2" | .cluster.secret = "'$$SECRET'" | .cluster.listen_multiaddress = ["/ip4/0.0.0.0/tcp/9106"] | .consensus.crdt.cluster_name = "debros-cluster" | .consensus.crdt.trusted_peers = ["*"] | .api.restapi.http_listen_multiaddress = "/ip4/0.0.0.0/tcp/9104" | .api.ipfsproxy.listen_multiaddress = "/ip4/127.0.0.1/tcp/9105" | .api.pinsvcapi.http_listen_multiaddress = "/ip4/127.0.0.1/tcp/9107" | .ipfs_connector.ipfshttp.node_multiaddress = "/ip4/127.0.0.1/tcp/5002"' $$HOME/.debros/node2/ipfs-cluster/service.json > $$HOME/.debros/node2/ipfs-cluster/service.json.tmp && mv $$HOME/.debros/node2/ipfs-cluster/service.json.tmp $$HOME/.debros/node2/ipfs-cluster/service.json; \ echo " Setting up node3 (IPFS: 5003, Cluster: 9114)..."; \ if [ ! -d $$HOME/.debros/node3/ipfs/repo ]; then \ echo " Initializing IPFS..."; \ mkdir -p $$HOME/.debros/node3/ipfs; \ IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.API '["/ip4/localhost/tcp/5003"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/localhost/tcp/8082"]' 2>&1 | grep -v "generating" || true; \ + cp $$SWARM_KEY $$HOME/.debros/node3/ipfs/repo/swarm.key; \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5003"]' 2>&1 | grep -v "generating" || true; \ + IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8082"]' 2>&1 | grep -v "generating" || true; \ IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4103","/ip6/::/tcp/4103"]' 2>&1 | grep -v "generating" || true; \ + else \ + if [ ! -f $$HOME/.debros/node3/ipfs/repo/swarm.key ]; then \ + cp $$SWARM_KEY $$HOME/.debros/node3/ipfs/repo/swarm.key; \ + fi; \ fi; \ - echo " Initializing IPFS Cluster..."; \ + echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \ mkdir -p $$HOME/.debros/node3/ipfs-cluster; \ - env IPFS_CLUSTER_PATH=$$HOME/.debros/node3/ipfs-cluster ipfs-cluster-service init --force >/dev/null 2>&1 || true; \ - jq '.cluster.peername = "node3" | .cluster.secret = "'$$SECRET'" | .cluster.listen_multiaddress = ["/ip4/0.0.0.0/tcp/9116"] | .consensus.crdt.cluster_name = "debros-cluster" | .consensus.crdt.trusted_peers = ["*"] | .api.restapi.http_listen_multiaddress = "/ip4/0.0.0.0/tcp/9114" | .api.ipfsproxy.listen_multiaddress = "/ip4/127.0.0.1/tcp/9115" | .api.pinsvcapi.http_listen_multiaddress = "/ip4/127.0.0.1/tcp/9117" | .ipfs_connector.ipfshttp.node_multiaddress = "/ip4/127.0.0.1/tcp/5003"' $$HOME/.debros/node3/ipfs-cluster/service.json > $$HOME/.debros/node3/ipfs-cluster/service.json.tmp && mv $$HOME/.debros/node3/ipfs-cluster/service.json.tmp $$HOME/.debros/node3/ipfs-cluster/service.json; \ echo "Starting IPFS daemons..."; \ if [ ! -f .dev/pids/ipfs-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-bootstrap.pid) 2>/dev/null; then \ IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-bootstrap.pid; \ @@ -178,29 +194,6 @@ dev: build else \ echo " ✓ Node3 IPFS already running"; \ fi; \ - \ - echo "Starting IPFS Cluster peers..."; \ - if [ ! -f .dev/pids/ipfs-cluster-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null; then \ - env IPFS_CLUSTER_PATH=$$HOME/.debros/bootstrap/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-bootstrap.pid; \ - echo " Bootstrap Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-bootstrap.pid), API: 9094)"; \ - sleep 3; \ - else \ - echo " ✓ Bootstrap Cluster already running"; \ - fi; \ - if [ ! -f .dev/pids/ipfs-cluster-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null; then \ - env IPFS_CLUSTER_PATH=$$HOME/.debros/node2/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node2.pid; \ - echo " Node2 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node2.pid), API: 9104)"; \ - sleep 3; \ - else \ - echo " ✓ Node2 Cluster already running"; \ - fi; \ - if [ ! -f .dev/pids/ipfs-cluster-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null; then \ - env IPFS_CLUSTER_PATH=$$HOME/.debros/node3/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node3.pid; \ - echo " Node3 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node3.pid), API: 9114)"; \ - sleep 3; \ - else \ - echo " ✓ Node3 Cluster already running"; \ - fi; \ else \ echo " ⚠️ ipfs or ipfs-cluster-service not found - skipping IPFS setup"; \ echo " Install with: https://docs.ipfs.tech/install/ and https://ipfscluster.io/documentation/guides/install/"; \ @@ -208,12 +201,58 @@ dev: build @sleep 2 @echo "Starting bootstrap node..." @nohup ./bin/node --config bootstrap.yaml > $$HOME/.debros/logs/bootstrap.log 2>&1 & echo $$! > .dev/pids/bootstrap.pid - @sleep 2 + @sleep 3 @echo "Starting node2..." @nohup ./bin/node --config node2.yaml > $$HOME/.debros/logs/node2.log 2>&1 & echo $$! > .dev/pids/node2.pid - @sleep 1 + @sleep 2 @echo "Starting node3..." @nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid + @sleep 3 + @echo "Starting IPFS Cluster daemons (after Go nodes have configured them)..." + @if command -v ipfs-cluster-service >/dev/null 2>&1; then \ + if [ ! -f .dev/pids/ipfs-cluster-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null; then \ + if [ -f $$HOME/.debros/bootstrap/ipfs-cluster/service.json ]; then \ + env IPFS_CLUSTER_PATH=$$HOME/.debros/bootstrap/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-bootstrap.pid; \ + echo " Bootstrap Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-bootstrap.pid), API: 9094)"; \ + echo " Waiting for bootstrap cluster to be ready..."; \ + for i in $$(seq 1 30); do \ + if curl -s http://localhost:9094/peers >/dev/null 2>&1; then \ + break; \ + fi; \ + sleep 1; \ + done; \ + sleep 2; \ + else \ + echo " ⚠️ Bootstrap cluster config not ready yet"; \ + fi; \ + else \ + echo " ✓ Bootstrap Cluster already running"; \ + fi; \ + if [ ! -f .dev/pids/ipfs-cluster-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null; then \ + if [ -f $$HOME/.debros/node2/ipfs-cluster/service.json ]; then \ + env IPFS_CLUSTER_PATH=$$HOME/.debros/node2/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node2.pid; \ + echo " Node2 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node2.pid), API: 9104)"; \ + sleep 3; \ + else \ + echo " ⚠️ Node2 cluster config not ready yet"; \ + fi; \ + else \ + echo " ✓ Node2 Cluster already running"; \ + fi; \ + if [ ! -f .dev/pids/ipfs-cluster-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null; then \ + if [ -f $$HOME/.debros/node3/ipfs-cluster/service.json ]; then \ + env IPFS_CLUSTER_PATH=$$HOME/.debros/node3/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node3.pid; \ + echo " Node3 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node3.pid), API: 9114)"; \ + sleep 3; \ + else \ + echo " ⚠️ Node3 cluster config not ready yet"; \ + fi; \ + else \ + echo " ✓ Node3 Cluster already running"; \ + fi; \ + else \ + echo " ⚠️ ipfs-cluster-service not found - skipping cluster daemon startup"; \ + fi @sleep 1 @echo "Starting Olric cache server..." @if command -v olric-server >/dev/null 2>&1; then \ diff --git a/pkg/cli/setup.go b/pkg/cli/setup.go index 18dd3b5..a103b27 100644 --- a/pkg/cli/setup.go +++ b/pkg/cli/setup.go @@ -1871,6 +1871,60 @@ func getOrGenerateClusterSecret() (string, error) { return secret, nil } +// getOrGenerateSwarmKey gets or generates a shared IPFS swarm key +// Returns the swarm key content as bytes (formatted for IPFS) +func getOrGenerateSwarmKey() ([]byte, error) { + secretPath := "/home/debros/.debros/swarm.key" + + // Try to read existing key + if data, err := os.ReadFile(secretPath); err == nil { + // Validate it's a proper swarm key format + content := string(data) + if strings.Contains(content, "/key/swarm/psk/1.0.0/") { + return data, nil + } + } + + // Generate new key (32 bytes) + keyBytes := make([]byte, 32) + if _, err := rand.Read(keyBytes); err != nil { + return nil, fmt.Errorf("failed to generate swarm key: %w", err) + } + + // Format as IPFS swarm key file + keyHex := strings.ToUpper(hex.EncodeToString(keyBytes)) + content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex) + + // Save key + if err := os.WriteFile(secretPath, []byte(content), 0600); err != nil { + return nil, fmt.Errorf("failed to save swarm key: %w", err) + } + exec.Command("chown", "debros:debros", secretPath).Run() + + fmt.Printf(" ✓ Generated private swarm key\n") + return []byte(content), nil +} + +// ensureSwarmKey ensures the swarm key exists in the IPFS repo +func ensureSwarmKey(repoPath string, swarmKey []byte) error { + swarmKeyPath := filepath.Join(repoPath, "swarm.key") + + // Check if swarm key already exists + if _, err := os.Stat(swarmKeyPath); err == nil { + // Verify it matches (optional: could compare content) + return nil + } + + // Create swarm key file in repo + if err := os.WriteFile(swarmKeyPath, swarmKey, 0600); err != nil { + return fmt.Errorf("failed to write swarm key to repo: %w", err) + } + + // Fix ownership + exec.Command("chown", "debros:debros", swarmKeyPath).Run() + return nil +} + // initializeIPFSForNode initializes IPFS and IPFS Cluster for a node func initializeIPFSForNode(nodeID, vpsIP string, isBootstrap bool) error { fmt.Printf(" Initializing IPFS and Cluster for node %s...\n", nodeID) @@ -1881,6 +1935,12 @@ func initializeIPFSForNode(nodeID, vpsIP string, isBootstrap bool) error { return fmt.Errorf("failed to get cluster secret: %w", err) } + // Get or generate swarm key for private network + swarmKey, err := getOrGenerateSwarmKey() + if err != nil { + return fmt.Errorf("failed to get swarm key: %w", err) + } + // Determine data directories var ipfsDataDir, clusterDataDir string if nodeID == "bootstrap" { @@ -1906,11 +1966,22 @@ func initializeIPFSForNode(nodeID, vpsIP string, isBootstrap bool) error { return fmt.Errorf("failed to initialize IPFS: %v\n%s", err, string(output)) } + // Ensure swarm key is in place (creates private network) + if err := ensureSwarmKey(ipfsRepoPath, swarmKey); err != nil { + return fmt.Errorf("failed to set swarm key: %w", err) + } + // Configure IPFS API and Gateway addresses exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.API", `["/ip4/localhost/tcp/5001"]`, "--repo-dir="+ipfsRepoPath).Run() exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.Gateway", `["/ip4/localhost/tcp/8080"]`, "--repo-dir="+ipfsRepoPath).Run() exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.Swarm", `["/ip4/0.0.0.0/tcp/4001","/ip6/::/tcp/4001"]`, "--repo-dir="+ipfsRepoPath).Run() - fmt.Printf(" ✓ IPFS initialized\n") + fmt.Printf(" ✓ IPFS initialized with private swarm key\n") + } else { + // Repo exists, but ensure swarm key is present + if err := ensureSwarmKey(ipfsRepoPath, swarmKey); err != nil { + return fmt.Errorf("failed to set swarm key: %w", err) + } + fmt.Printf(" ✓ IPFS repository already exists, swarm key ensured\n") } // Initialize IPFS Cluster if not already initialized @@ -2084,6 +2155,7 @@ User=debros Group=debros Environment=HOME=/home/debros ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/node.yaml ]; then export IPFS_PATH=/home/debros/.debros/node/ipfs/repo; elif [ -f /home/debros/.debros/bootstrap.yaml ]; then export IPFS_PATH=/home/debros/.debros/bootstrap/ipfs/repo; else export IPFS_PATH=/home/debros/.debros/bootstrap/ipfs/repo; fi' +ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/swarm.key ] && [ ! -f ${IPFS_PATH}/swarm.key ]; then cp /home/debros/.debros/swarm.key ${IPFS_PATH}/swarm.key && chmod 600 ${IPFS_PATH}/swarm.key; fi' ExecStart=/usr/bin/ipfs daemon --enable-pubsub-experiment --repo-dir=${IPFS_PATH} Restart=always RestartSec=5 diff --git a/pkg/ipfs/cluster.go b/pkg/ipfs/cluster.go new file mode 100644 index 0000000..0ab5e58 --- /dev/null +++ b/pkg/ipfs/cluster.go @@ -0,0 +1,717 @@ +package ipfs + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "go.uber.org/zap" + + "github.com/DeBrosOfficial/network/pkg/config" +) + +// ClusterConfigManager manages IPFS Cluster configuration files +type ClusterConfigManager struct { + cfg *config.Config + logger *zap.Logger + clusterPath string + secret string +} + +// ClusterServiceConfig represents the structure of service.json +type ClusterServiceConfig struct { + Cluster struct { + Peername string `json:"peername"` + Secret string `json:"secret"` + LeaveOnShutdown bool `json:"leave_on_shutdown"` + ListenMultiaddress []string `json:"listen_multiaddress"` + PeerAddresses []string `json:"peer_addresses"` + // ... other fields kept from template + } `json:"cluster"` + Consensus struct { + CRDT struct { + ClusterName string `json:"cluster_name"` + TrustedPeers []string `json:"trusted_peers"` + Batching struct { + MaxBatchSize int `json:"max_batch_size"` + MaxBatchAge string `json:"max_batch_age"` + } `json:"batching"` + RepairInterval string `json:"repair_interval"` + } `json:"crdt"` + } `json:"consensus"` + API struct { + IPFSProxy struct { + ListenMultiaddress string `json:"listen_multiaddress"` + NodeMultiaddress string `json:"node_multiaddress"` + } `json:"ipfsproxy"` + PinSvcAPI struct { + HTTPListenMultiaddress string `json:"http_listen_multiaddress"` + } `json:"pinsvcapi"` + RestAPI struct { + HTTPListenMultiaddress string `json:"http_listen_multiaddress"` + } `json:"restapi"` + } `json:"api"` + IPFSConnector struct { + IPFSHTTP struct { + NodeMultiaddress string `json:"node_multiaddress"` + } `json:"ipfshttp"` + } `json:"ipfs_connector"` + // Keep rest of fields as raw JSON to preserve structure + Raw map[string]interface{} `json:"-"` +} + +// NewClusterConfigManager creates a new IPFS Cluster config manager +func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterConfigManager, error) { + // Expand data directory path + dataDir := cfg.Node.DataDir + if strings.HasPrefix(dataDir, "~") { + home, err := os.UserHomeDir() + if err != nil { + return nil, fmt.Errorf("failed to determine home directory: %w", err) + } + dataDir = filepath.Join(home, dataDir[1:]) + } + + // Determine cluster path based on data directory structure + // Check if dataDir contains specific node names (e.g., ~/.debros/bootstrap, ~/.debros/node2) + clusterPath := filepath.Join(dataDir, "ipfs-cluster") + if strings.Contains(dataDir, "bootstrap") { + // Check if bootstrap is a direct child + if filepath.Base(filepath.Dir(dataDir)) == "bootstrap" || filepath.Base(dataDir) == "bootstrap" { + clusterPath = filepath.Join(dataDir, "ipfs-cluster") + } else { + clusterPath = filepath.Join(dataDir, "bootstrap", "ipfs-cluster") + } + } else if strings.Contains(dataDir, "node2") { + if filepath.Base(filepath.Dir(dataDir)) == "node2" || filepath.Base(dataDir) == "node2" { + clusterPath = filepath.Join(dataDir, "ipfs-cluster") + } else { + clusterPath = filepath.Join(dataDir, "node2", "ipfs-cluster") + } + } else if strings.Contains(dataDir, "node3") { + if filepath.Base(filepath.Dir(dataDir)) == "node3" || filepath.Base(dataDir) == "node3" { + clusterPath = filepath.Join(dataDir, "ipfs-cluster") + } else { + clusterPath = filepath.Join(dataDir, "node3", "ipfs-cluster") + } + } + + // Load or generate cluster secret + secretPath := filepath.Join(dataDir, "..", "cluster-secret") + if strings.Contains(dataDir, ".debros") { + // Try to find cluster-secret in ~/.debros + home, err := os.UserHomeDir() + if err == nil { + secretPath = filepath.Join(home, ".debros", "cluster-secret") + } + } + + secret, err := loadOrGenerateClusterSecret(secretPath) + if err != nil { + return nil, fmt.Errorf("failed to load/generate cluster secret: %w", err) + } + + return &ClusterConfigManager{ + cfg: cfg, + logger: logger, + clusterPath: clusterPath, + secret: secret, + }, nil +} + +// EnsureConfig ensures the IPFS Cluster service.json exists and is properly configured +func (cm *ClusterConfigManager) EnsureConfig() error { + if cm.cfg.Database.IPFS.ClusterAPIURL == "" { + cm.logger.Debug("IPFS Cluster API URL not configured, skipping cluster config") + return nil + } + + serviceJSONPath := filepath.Join(cm.clusterPath, "service.json") + + // Parse ports from URLs + clusterPort, restAPIPort, err := parseClusterPorts(cm.cfg.Database.IPFS.ClusterAPIURL) + if err != nil { + return fmt.Errorf("failed to parse cluster API URL: %w", err) + } + + ipfsPort, err := parseIPFSPort(cm.cfg.Database.IPFS.APIURL) + if err != nil { + return fmt.Errorf("failed to parse IPFS API URL: %w", err) + } + + // Determine node name + nodeName := cm.cfg.Node.Type + if nodeName == "node" { + // Try to extract from data dir or ID + if strings.Contains(cm.cfg.Node.DataDir, "node2") || strings.Contains(cm.cfg.Node.ID, "node2") { + nodeName = "node2" + } else if strings.Contains(cm.cfg.Node.DataDir, "node3") || strings.Contains(cm.cfg.Node.ID, "node3") { + nodeName = "node3" + } else { + nodeName = "node" + } + } + + // Calculate ports based on pattern + proxyPort := clusterPort - 1 + pinSvcPort := clusterPort + 1 + clusterListenPort := clusterPort + 2 + + // If config doesn't exist, initialize it with ipfs-cluster-service init + // This ensures we have all required sections (datastore, informer, etc.) + if _, err := os.Stat(serviceJSONPath); os.IsNotExist(err) { + cm.logger.Info("Initializing cluster config with ipfs-cluster-service init") + initCmd := exec.Command("ipfs-cluster-service", "init", "--force") + initCmd.Env = append(os.Environ(), "IPFS_CLUSTER_PATH="+cm.clusterPath) + if err := initCmd.Run(); err != nil { + cm.logger.Warn("Failed to initialize cluster config with ipfs-cluster-service init, will create minimal template", zap.Error(err)) + } + } + + // Load existing config or create new + cfg, err := cm.loadOrCreateConfig(serviceJSONPath) + if err != nil { + return fmt.Errorf("failed to load/create config: %w", err) + } + + // Update configuration + cfg.Cluster.Peername = nodeName + cfg.Cluster.Secret = cm.secret + cfg.Cluster.ListenMultiaddress = []string{fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", clusterListenPort)} + cfg.Consensus.CRDT.ClusterName = "debros-cluster" + cfg.Consensus.CRDT.TrustedPeers = []string{"*"} + + // API endpoints + cfg.API.RestAPI.HTTPListenMultiaddress = fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", restAPIPort) + cfg.API.IPFSProxy.ListenMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", proxyPort) + cfg.API.IPFSProxy.NodeMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsPort) // FIX: Correct path! + cfg.API.PinSvcAPI.HTTPListenMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", pinSvcPort) + + // IPFS connector (also needs to be set) + cfg.IPFSConnector.IPFSHTTP.NodeMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsPort) + + // Save configuration + if err := cm.saveConfig(serviceJSONPath, cfg); err != nil { + return fmt.Errorf("failed to save config: %w", err) + } + + cm.logger.Info("IPFS Cluster configuration ensured", + zap.String("path", serviceJSONPath), + zap.String("node_name", nodeName), + zap.Int("ipfs_port", ipfsPort), + zap.Int("cluster_port", clusterPort), + zap.Int("rest_api_port", restAPIPort)) + + return nil +} + +// UpdateBootstrapPeers updates peer_addresses and peerstore with bootstrap peer information +func (cm *ClusterConfigManager) UpdateBootstrapPeers(bootstrapAPIURL string) error { + if cm.cfg.Database.IPFS.ClusterAPIURL == "" { + return nil // IPFS not configured + } + + // Skip if this is the bootstrap node itself + if cm.cfg.Node.Type == "bootstrap" { + return nil + } + + // Query bootstrap cluster API to get peer ID + peerID, err := getBootstrapPeerID(bootstrapAPIURL) + if err != nil { + return fmt.Errorf("failed to get bootstrap peer ID: %w", err) + } + + if peerID == "" { + cm.logger.Warn("Bootstrap peer ID not available yet") + return nil + } + + // Extract bootstrap cluster port from URL + _, clusterPort, err := parseClusterPorts(bootstrapAPIURL) + if err != nil { + return fmt.Errorf("failed to parse bootstrap cluster API URL: %w", err) + } + + // Bootstrap listens on clusterPort + 2 (same pattern) + bootstrapClusterPort := clusterPort + 2 + bootstrapPeerAddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", bootstrapClusterPort, peerID) + + // Load current config + serviceJSONPath := filepath.Join(cm.clusterPath, "service.json") + cfg, err := cm.loadOrCreateConfig(serviceJSONPath) + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + // Update peer_addresses + cfg.Cluster.PeerAddresses = []string{bootstrapPeerAddr} + + // Save config + if err := cm.saveConfig(serviceJSONPath, cfg); err != nil { + return fmt.Errorf("failed to save config: %w", err) + } + + // Write to peerstore file + peerstorePath := filepath.Join(cm.clusterPath, "peerstore") + if err := os.WriteFile(peerstorePath, []byte(bootstrapPeerAddr+"\n"), 0644); err != nil { + return fmt.Errorf("failed to write peerstore: %w", err) + } + + cm.logger.Info("Updated bootstrap peer configuration", + zap.String("bootstrap_peer_addr", bootstrapPeerAddr), + zap.String("peerstore_path", peerstorePath)) + + return nil +} + +// loadOrCreateConfig loads existing service.json or creates a template +func (cm *ClusterConfigManager) loadOrCreateConfig(path string) (*ClusterServiceConfig, error) { + // Try to load existing config + if data, err := os.ReadFile(path); err == nil { + var cfg ClusterServiceConfig + if err := json.Unmarshal(data, &cfg); err == nil { + // Also unmarshal into raw map to preserve all fields + var raw map[string]interface{} + if err := json.Unmarshal(data, &raw); err == nil { + cfg.Raw = raw + } + return &cfg, nil + } + } + + // Create new config from template + return cm.createTemplateConfig(), nil +} + +// createTemplateConfig creates a template configuration matching the structure +func (cm *ClusterConfigManager) createTemplateConfig() *ClusterServiceConfig { + cfg := &ClusterServiceConfig{} + cfg.Cluster.LeaveOnShutdown = false + cfg.Cluster.PeerAddresses = []string{} + cfg.Consensus.CRDT.TrustedPeers = []string{"*"} + cfg.Consensus.CRDT.Batching.MaxBatchSize = 0 + cfg.Consensus.CRDT.Batching.MaxBatchAge = "0s" + cfg.Consensus.CRDT.RepairInterval = "1h0m0s" + cfg.Raw = make(map[string]interface{}) + return cfg +} + +// saveConfig saves the configuration, preserving all existing fields +func (cm *ClusterConfigManager) saveConfig(path string, cfg *ClusterServiceConfig) error { + // Create directory if needed + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return fmt.Errorf("failed to create cluster directory: %w", err) + } + + // Load existing config if it exists to preserve all fields + var final map[string]interface{} + if data, err := os.ReadFile(path); err == nil { + if err := json.Unmarshal(data, &final); err != nil { + // If parsing fails, start fresh + final = make(map[string]interface{}) + } + } else { + final = make(map[string]interface{}) + } + + // Deep merge: update nested structures while preserving other fields + updateNestedMap(final, "cluster", map[string]interface{}{ + "peername": cfg.Cluster.Peername, + "secret": cfg.Cluster.Secret, + "leave_on_shutdown": cfg.Cluster.LeaveOnShutdown, + "listen_multiaddress": cfg.Cluster.ListenMultiaddress, + "peer_addresses": cfg.Cluster.PeerAddresses, + }) + + updateNestedMap(final, "consensus", map[string]interface{}{ + "crdt": map[string]interface{}{ + "cluster_name": cfg.Consensus.CRDT.ClusterName, + "trusted_peers": cfg.Consensus.CRDT.TrustedPeers, + "batching": map[string]interface{}{ + "max_batch_size": cfg.Consensus.CRDT.Batching.MaxBatchSize, + "max_batch_age": cfg.Consensus.CRDT.Batching.MaxBatchAge, + }, + "repair_interval": cfg.Consensus.CRDT.RepairInterval, + }, + }) + + // Update API section, preserving other fields + updateNestedMap(final, "api", map[string]interface{}{ + "ipfsproxy": map[string]interface{}{ + "listen_multiaddress": cfg.API.IPFSProxy.ListenMultiaddress, + "node_multiaddress": cfg.API.IPFSProxy.NodeMultiaddress, // FIX: Correct path! + }, + "pinsvcapi": map[string]interface{}{ + "http_listen_multiaddress": cfg.API.PinSvcAPI.HTTPListenMultiaddress, + }, + "restapi": map[string]interface{}{ + "http_listen_multiaddress": cfg.API.RestAPI.HTTPListenMultiaddress, + }, + }) + + // Update IPFS connector section + updateNestedMap(final, "ipfs_connector", map[string]interface{}{ + "ipfshttp": map[string]interface{}{ + "node_multiaddress": cfg.IPFSConnector.IPFSHTTP.NodeMultiaddress, + "connect_swarms_delay": "30s", + "ipfs_request_timeout": "5m0s", + "pin_timeout": "2m0s", + "unpin_timeout": "3h0m0s", + "repogc_timeout": "24h0m0s", + "informer_trigger_interval": 0, + }, + }) + + // Ensure all required sections exist with defaults if missing + ensureRequiredSection(final, "datastore", map[string]interface{}{ + "pebble": map[string]interface{}{ + "pebble_options": map[string]interface{}{ + "cache_size_bytes": 1073741824, + "bytes_per_sync": 1048576, + "disable_wal": false, + }, + }, + }) + + ensureRequiredSection(final, "informer", map[string]interface{}{ + "disk": map[string]interface{}{ + "metric_ttl": "30s", + "metric_type": "freespace", + }, + "pinqueue": map[string]interface{}{ + "metric_ttl": "30s", + "weight_bucket_size": 100000, + }, + "tags": map[string]interface{}{ + "metric_ttl": "30s", + "tags": map[string]interface{}{ + "group": "default", + }, + }, + }) + + ensureRequiredSection(final, "monitor", map[string]interface{}{ + "pubsubmon": map[string]interface{}{ + "check_interval": "15s", + }, + }) + + ensureRequiredSection(final, "pin_tracker", map[string]interface{}{ + "stateless": map[string]interface{}{ + "concurrent_pins": 10, + "priority_pin_max_age": "24h0m0s", + "priority_pin_max_retries": 5, + }, + }) + + ensureRequiredSection(final, "allocator", map[string]interface{}{ + "balanced": map[string]interface{}{ + "allocate_by": []interface{}{"tag:group", "freespace"}, + }, + }) + + // Write JSON + data, err := json.MarshalIndent(final, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal config: %w", err) + } + + if err := os.WriteFile(path, data, 0644); err != nil { + return fmt.Errorf("failed to write config: %w", err) + } + + return nil +} + +// updateNestedMap updates a nested map structure, merging values +func updateNestedMap(parent map[string]interface{}, key string, updates map[string]interface{}) { + existing, ok := parent[key].(map[string]interface{}) + if !ok { + parent[key] = updates + return + } + + // Merge updates into existing + for k, v := range updates { + if vm, ok := v.(map[string]interface{}); ok { + // Recursively merge nested maps + if _, ok := existing[k].(map[string]interface{}); !ok { + existing[k] = vm + } else { + updateNestedMap(existing, k, vm) + } + } else { + existing[k] = v + } + } + parent[key] = existing +} + +// ensureRequiredSection ensures a section exists in the config, creating it with defaults if missing +func ensureRequiredSection(parent map[string]interface{}, key string, defaults map[string]interface{}) { + if _, exists := parent[key]; !exists { + parent[key] = defaults + return + } + // If section exists, merge defaults to ensure all required subsections exist + existing, ok := parent[key].(map[string]interface{}) + if ok { + updateNestedMap(parent, key, defaults) + parent[key] = existing + } +} + +// parseClusterPorts extracts cluster port and REST API port from ClusterAPIURL +func parseClusterPorts(clusterAPIURL string) (clusterPort, restAPIPort int, err error) { + u, err := url.Parse(clusterAPIURL) + if err != nil { + return 0, 0, err + } + + portStr := u.Port() + if portStr == "" { + // Default port based on scheme + if u.Scheme == "http" { + portStr = "9094" + } else if u.Scheme == "https" { + portStr = "443" + } else { + return 0, 0, fmt.Errorf("unknown scheme: %s", u.Scheme) + } + } + + _, err = fmt.Sscanf(portStr, "%d", &restAPIPort) + if err != nil { + return 0, 0, fmt.Errorf("invalid port: %s", portStr) + } + + // Cluster listen port is typically REST API port + 2 + clusterPort = restAPIPort + 2 + + return clusterPort, restAPIPort, nil +} + +// parseIPFSPort extracts IPFS API port from APIURL +func parseIPFSPort(apiURL string) (int, error) { + if apiURL == "" { + return 5001, nil // Default + } + + u, err := url.Parse(apiURL) + if err != nil { + return 0, err + } + + portStr := u.Port() + if portStr == "" { + if u.Scheme == "http" { + return 5001, nil // Default HTTP port + } + return 0, fmt.Errorf("unknown scheme: %s", u.Scheme) + } + + var port int + _, err = fmt.Sscanf(portStr, "%d", &port) + if err != nil { + return 0, fmt.Errorf("invalid port: %s", portStr) + } + + return port, nil +} + +// getBootstrapPeerID queries the bootstrap cluster API to get the peer ID +func getBootstrapPeerID(apiURL string) (string, error) { + // Simple HTTP client to query /peers endpoint + client := &standardHTTPClient{} + peersResp, err := client.Get(fmt.Sprintf("%s/peers", apiURL)) + if err != nil { + return "", err + } + + var peersData struct { + ID string `json:"id"` + } + if err := json.Unmarshal(peersResp, &peersData); err != nil { + return "", err + } + + return peersData.ID, nil +} + +// loadOrGenerateClusterSecret loads cluster secret or generates a new one +func loadOrGenerateClusterSecret(path string) (string, error) { + // Try to load existing secret + if data, err := os.ReadFile(path); err == nil { + return strings.TrimSpace(string(data)), nil + } + + // Generate new secret (32 bytes hex = 64 hex chars) + secret := generateRandomSecret(64) + + // Save secret + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return "", err + } + if err := os.WriteFile(path, []byte(secret), 0600); err != nil { + return "", err + } + + return secret, nil +} + +// generateRandomSecret generates a random hex string +func generateRandomSecret(length int) string { + bytes := make([]byte, length/2) + if _, err := rand.Read(bytes); err != nil { + // Fallback to simple generation if crypto/rand fails + for i := range bytes { + bytes[i] = byte(os.Getpid() + i) + } + } + return hex.EncodeToString(bytes) +} + +// standardHTTPClient implements HTTP client using net/http +type standardHTTPClient struct{} + +func (c *standardHTTPClient) Get(url string) ([]byte, error) { + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP %d: %s", resp.StatusCode, resp.Status) + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return data, nil +} + +// FixIPFSConfigAddresses fixes localhost addresses in IPFS config to use 127.0.0.1 +// This is necessary because IPFS doesn't accept "localhost" as a valid IP address in multiaddrs +// This function always ensures the config is correct, regardless of current state +func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error { + if cm.cfg.Database.IPFS.APIURL == "" { + return nil // IPFS not configured + } + + // Determine IPFS repo path from config + dataDir := cm.cfg.Node.DataDir + if strings.HasPrefix(dataDir, "~") { + home, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("failed to determine home directory: %w", err) + } + dataDir = filepath.Join(home, dataDir[1:]) + } + + // Try to find IPFS repo path + // Check common locations: dataDir/ipfs/repo, or dataDir/bootstrap/ipfs/repo, etc. + possiblePaths := []string{ + filepath.Join(dataDir, "ipfs", "repo"), + filepath.Join(dataDir, "bootstrap", "ipfs", "repo"), + filepath.Join(dataDir, "node2", "ipfs", "repo"), + filepath.Join(dataDir, "node3", "ipfs", "repo"), + filepath.Join(filepath.Dir(dataDir), "bootstrap", "ipfs", "repo"), + filepath.Join(filepath.Dir(dataDir), "node2", "ipfs", "repo"), + filepath.Join(filepath.Dir(dataDir), "node3", "ipfs", "repo"), + } + + var ipfsRepoPath string + for _, path := range possiblePaths { + if _, err := os.Stat(filepath.Join(path, "config")); err == nil { + ipfsRepoPath = path + break + } + } + + if ipfsRepoPath == "" { + cm.logger.Debug("IPFS repo not found, skipping config fix") + return nil // Not an error if repo doesn't exist yet + } + + // Parse IPFS API port from config + ipfsPort, err := parseIPFSPort(cm.cfg.Database.IPFS.APIURL) + if err != nil { + return fmt.Errorf("failed to parse IPFS API URL: %w", err) + } + + // Determine gateway port (typically API port + 3079, or 8080 for bootstrap, 8081 for node2, etc.) + gatewayPort := 8080 + if strings.Contains(dataDir, "node2") { + gatewayPort = 8081 + } else if strings.Contains(dataDir, "node3") { + gatewayPort = 8082 + } else if ipfsPort == 5002 { + gatewayPort = 8081 + } else if ipfsPort == 5003 { + gatewayPort = 8082 + } + + // Always ensure API address is correct (don't just check, always set it) + correctAPIAddr := fmt.Sprintf(`["/ip4/127.0.0.1/tcp/%d"]`, ipfsPort) + cm.logger.Info("Ensuring IPFS API address is correct", + zap.String("repo", ipfsRepoPath), + zap.Int("port", ipfsPort), + zap.String("correct_address", correctAPIAddr)) + + fixCmd := exec.Command("ipfs", "config", "--json", "Addresses.API", correctAPIAddr) + fixCmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath) + if err := fixCmd.Run(); err != nil { + cm.logger.Warn("Failed to fix IPFS API address", zap.Error(err)) + return fmt.Errorf("failed to set IPFS API address: %w", err) + } + + // Always ensure Gateway address is correct + correctGatewayAddr := fmt.Sprintf(`["/ip4/127.0.0.1/tcp/%d"]`, gatewayPort) + cm.logger.Info("Ensuring IPFS Gateway address is correct", + zap.String("repo", ipfsRepoPath), + zap.Int("port", gatewayPort), + zap.String("correct_address", correctGatewayAddr)) + + fixCmd = exec.Command("ipfs", "config", "--json", "Addresses.Gateway", correctGatewayAddr) + fixCmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath) + if err := fixCmd.Run(); err != nil { + cm.logger.Warn("Failed to fix IPFS Gateway address", zap.Error(err)) + return fmt.Errorf("failed to set IPFS Gateway address: %w", err) + } + + // Check if IPFS daemon is running - if so, it may need to be restarted for changes to take effect + // We can't restart it from here (it's managed by Makefile/systemd), but we can warn + if cm.isIPFSRunning(ipfsPort) { + cm.logger.Warn("IPFS daemon appears to be running - it may need to be restarted for config changes to take effect", + zap.Int("port", ipfsPort), + zap.String("repo", ipfsRepoPath)) + } + + return nil +} + +// isIPFSRunning checks if IPFS daemon is running by attempting to connect to the API +func (cm *ClusterConfigManager) isIPFSRunning(port int) bool { + client := &http.Client{ + Timeout: 1 * time.Second, + } + resp, err := client.Get(fmt.Sprintf("http://127.0.0.1:%d/api/v0/id", port)) + if err != nil { + return false + } + resp.Body.Close() + return resp.StatusCode == 200 +} diff --git a/pkg/node/node.go b/pkg/node/node.go index 1687b73..5a4ec9b 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -22,6 +22,7 @@ import ( "github.com/DeBrosOfficial/network/pkg/config" "github.com/DeBrosOfficial/network/pkg/discovery" "github.com/DeBrosOfficial/network/pkg/encryption" + "github.com/DeBrosOfficial/network/pkg/ipfs" "github.com/DeBrosOfficial/network/pkg/logging" "github.com/DeBrosOfficial/network/pkg/pubsub" database "github.com/DeBrosOfficial/network/pkg/rqlite" @@ -45,6 +46,9 @@ type Node struct { // Discovery discoveryManager *discovery.Manager + + // IPFS Cluster config manager + clusterConfigManager *ipfs.ClusterConfigManager } // NewNode creates a new network node @@ -631,6 +635,14 @@ func (n *Node) Start(ctx context.Context) error { return fmt.Errorf("failed to start LibP2P: %w", err) } + // Initialize IPFS Cluster configuration if enabled + if n.config.Database.IPFS.ClusterAPIURL != "" { + if err := n.startIPFSClusterConfig(); err != nil { + n.logger.ComponentWarn(logging.ComponentNode, "Failed to initialize IPFS Cluster config", zap.Error(err)) + // Don't fail node startup if cluster config fails + } + } + // Start RQLite with cluster discovery if err := n.startRQLite(ctx); err != nil { return fmt.Errorf("failed to start RQLite: %w", err) @@ -651,3 +663,41 @@ func (n *Node) Start(ctx context.Context) error { return nil } + +// startIPFSClusterConfig initializes and ensures IPFS Cluster configuration +func (n *Node) startIPFSClusterConfig() error { + n.logger.ComponentInfo(logging.ComponentNode, "Initializing IPFS Cluster configuration") + + // Create config manager + cm, err := ipfs.NewClusterConfigManager(n.config, n.logger.Logger) + if err != nil { + return fmt.Errorf("failed to create cluster config manager: %w", err) + } + n.clusterConfigManager = cm + + // Fix IPFS config addresses (localhost -> 127.0.0.1) before ensuring cluster config + if err := cm.FixIPFSConfigAddresses(); err != nil { + n.logger.ComponentWarn(logging.ComponentNode, "Failed to fix IPFS config addresses", zap.Error(err)) + // Don't fail startup if config fix fails - cluster config will handle it + } + + // Ensure configuration exists and is correct + if err := cm.EnsureConfig(); err != nil { + return fmt.Errorf("failed to ensure cluster config: %w", err) + } + + // If this is not the bootstrap node, try to update bootstrap peer info + if n.config.Node.Type != "bootstrap" && len(n.config.Discovery.BootstrapPeers) > 0 { + // Try to find bootstrap cluster API URL from config + // For now, we'll discover it from the first bootstrap peer + // In a real scenario, you might want to configure this explicitly + bootstrapClusterAPI := "http://localhost:9094" // Default bootstrap cluster API + if err := cm.UpdateBootstrapPeers(bootstrapClusterAPI); err != nil { + n.logger.ComponentWarn(logging.ComponentNode, "Failed to update bootstrap peers, will retry later", zap.Error(err)) + // Don't fail - peers can connect later via mDNS or manual config + } + } + + n.logger.ComponentInfo(logging.ComponentNode, "IPFS Cluster configuration initialized") + return nil +} From fbdfa23c7761ad20c5cd3966fa5937eeed1e89fd Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Wed, 5 Nov 2025 17:32:18 +0200 Subject: [PATCH 06/57] feat: enhance IPFS and Cluster integration in setup - Added automatic setup for IPFS and IPFS Cluster during the network setup process. - Implemented initialization of IPFS repositories and Cluster configurations for each node. - Enhanced Makefile to support starting IPFS and Cluster daemons with improved logging. - Introduced a new documentation guide for IPFS Cluster setup, detailing configuration and verification steps. - Updated changelog to reflect the new features and improvements. --- pkg/cli/config_commands.go | 2 +- pkg/client/defaults_test.go | 9 ++++++--- pkg/config/validate_test.go | 10 +++++----- pkg/gateway/anon_proxy_handler.go | 2 +- pkg/gateway/gateway.go | 4 ++-- pkg/node/node_test.go | 10 +++++----- 6 files changed, 20 insertions(+), 17 deletions(-) diff --git a/pkg/cli/config_commands.go b/pkg/cli/config_commands.go index 208aac7..a7043af 100644 --- a/pkg/cli/config_commands.go +++ b/pkg/cli/config_commands.go @@ -286,7 +286,7 @@ func initFullStack(force bool) { fmt.Printf("✅ Generated bootstrap identity: %s (Peer ID: %s)\n", bootstrapIdentityPath, bootstrapInfo.PeerID.String()) // Construct bootstrap multiaddr - bootstrapMultiaddr := fmt.Sprintf("/ip4/localhost/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String()) + bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String()) fmt.Printf(" Bootstrap multiaddr: %s\n", bootstrapMultiaddr) // Generate configs for all nodes... diff --git a/pkg/client/defaults_test.go b/pkg/client/defaults_test.go index a686094..82cdbc2 100644 --- a/pkg/client/defaults_test.go +++ b/pkg/client/defaults_test.go @@ -11,7 +11,7 @@ func TestDefaultBootstrapPeersNonEmpty(t *testing.T) { old := os.Getenv("DEBROS_BOOTSTRAP_PEERS") t.Cleanup(func() { os.Setenv("DEBROS_BOOTSTRAP_PEERS", old) }) // Set a valid bootstrap peer - validPeer := "/ip4/localhost/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" + validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" _ = os.Setenv("DEBROS_BOOTSTRAP_PEERS", validPeer) peers := DefaultBootstrapPeers() if len(peers) == 0 { @@ -50,8 +50,11 @@ func TestNormalizeEndpoints(t *testing.T) { } func TestEndpointFromMultiaddr(t *testing.T) { - ma, _ := multiaddr.NewMultiaddr("/ip4/localhost/tcp/4001") - if ep := endpointFromMultiaddr(ma, 5001); ep != "http://localhost:5001" { + ma, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4001") + if err != nil { + t.Fatalf("failed to create multiaddr: %v", err) + } + if ep := endpointFromMultiaddr(ma, 5001); ep != "http://127.0.0.1:5001" { t.Fatalf("unexpected endpoint: %s", ep) } } diff --git a/pkg/config/validate_test.go b/pkg/config/validate_test.go index f351e9d..79a829f 100644 --- a/pkg/config/validate_test.go +++ b/pkg/config/validate_test.go @@ -7,7 +7,7 @@ import ( // validConfigForType returns a valid config for the given node type func validConfigForType(nodeType string) *Config { - validPeer := "/ip4/localhost/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" + validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" cfg := &Config{ Node: NodeConfig{ Type: nodeType, @@ -205,7 +205,7 @@ func TestValidateRQLiteJoinAddress(t *testing.T) { } func TestValidateBootstrapPeers(t *testing.T) { - validPeer := "/ip4/localhost/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" + validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj" tests := []struct { name string nodeType string @@ -217,9 +217,9 @@ func TestValidateBootstrapPeers(t *testing.T) { {"bootstrap with peer", "bootstrap", []string{validPeer}, false}, {"bootstrap without peer", "bootstrap", []string{}, false}, {"invalid multiaddr", "node", []string{"invalid"}, true}, - {"missing p2p", "node", []string{"/ip4/localhost/tcp/4001"}, true}, + {"missing p2p", "node", []string{"/ip4/127.0.0.1/tcp/4001"}, true}, {"duplicate peer", "node", []string{validPeer, validPeer}, true}, - {"invalid port", "node", []string{"/ip4/localhost/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true}, + {"invalid port", "node", []string{"/ip4/127.0.0.1/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true}, } for _, tt := range tests { @@ -397,7 +397,7 @@ func TestValidateCompleteConfig(t *testing.T) { }, Discovery: DiscoveryConfig{ BootstrapPeers: []string{ - "/ip4/localhost/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj", + "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj", }, DiscoveryInterval: 15 * time.Second, BootstrapPort: 4001, diff --git a/pkg/gateway/anon_proxy_handler.go b/pkg/gateway/anon_proxy_handler.go index 7b0cd2d..692434d 100644 --- a/pkg/gateway/anon_proxy_handler.go +++ b/pkg/gateway/anon_proxy_handler.go @@ -234,7 +234,7 @@ func isPrivateOrLocalHost(host string) bool { } // Check for localhost variants - if host == "localhost" || host == "localhost" || host == "::1" { + if host == "localhost" || host == "::1" { return true } diff --git a/pkg/gateway/gateway.go b/pkg/gateway/gateway.go index e14a043..546293d 100644 --- a/pkg/gateway/gateway.go +++ b/pkg/gateway/gateway.go @@ -371,7 +371,7 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger } // Skip localhost loopback addresses (we'll use localhost:3320 as fallback) - if ip == "localhost" || ip == "::1" || ip == "localhost" { + if ip == "localhost" || ip == "::1" { continue } @@ -402,7 +402,7 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger } // Skip localhost - if ip == "localhost" || ip == "::1" || ip == "localhost" { + if ip == "localhost" || ip == "::1" { continue } diff --git a/pkg/node/node_test.go b/pkg/node/node_test.go index b07bb05..8ee0ab4 100644 --- a/pkg/node/node_test.go +++ b/pkg/node/node_test.go @@ -177,13 +177,13 @@ func TestHashBootstrapConnections(t *testing.T) { } // Create two hosts (A and B) listening on localhost TCP - hA, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) + hA, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) if err != nil { t.Fatalf("libp2p.New (A): %v", err) } defer hA.Close() - hB, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) + hB, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) if err != nil { t.Fatalf("libp2p.New (B): %v", err) } @@ -244,19 +244,19 @@ func TestHashBootstrapConnections(t *testing.T) { } // Create three hosts (A, B, C) listening on localhost TCP - hA, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) + hA, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) if err != nil { t.Fatalf("libp2p.New (A): %v", err) } defer hA.Close() - hB, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) + hB, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) if err != nil { t.Fatalf("libp2p.New (B): %v", err) } defer hB.Close() - hC, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/localhost/tcp/0")) + hC, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0")) if err != nil { t.Fatalf("libp2p.New (C): %v", err) } From a7d21d421744d4704e8ec91e6f107c6f2b6bc5d1 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Thu, 6 Nov 2025 06:25:41 +0200 Subject: [PATCH 07/57] remove docs files --- docs/ipfs-cluster-setup.md | 171 ------------------------------------- 1 file changed, 171 deletions(-) delete mode 100644 docs/ipfs-cluster-setup.md diff --git a/docs/ipfs-cluster-setup.md b/docs/ipfs-cluster-setup.md deleted file mode 100644 index 65e606f..0000000 --- a/docs/ipfs-cluster-setup.md +++ /dev/null @@ -1,171 +0,0 @@ -# IPFS Cluster Setup Guide - -This guide explains how IPFS Cluster is configured to run on every DeBros Network node. - -## Overview - -Each DeBros Network node runs its own IPFS Cluster peer, enabling distributed pinning and replication across the network. The cluster uses CRDT consensus for automatic peer discovery. - -## Architecture - -- **IPFS (Kubo)**: Runs on each node, handles content storage and retrieval -- **IPFS Cluster**: Runs on each node, manages pinning and replication -- **Cluster Consensus**: Uses CRDT (instead of Raft) for simpler multi-node setup - -## Automatic Setup - -When you run `network-cli setup`, the following happens automatically: - -1. IPFS (Kubo) and IPFS Cluster are installed -2. IPFS repository is initialized for each node -3. IPFS Cluster service.json config is generated -4. Systemd services are created and started: - - `debros-ipfs` - IPFS daemon - - `debros-ipfs-cluster` - IPFS Cluster service - - `debros-node` - DeBros Network node (depends on cluster) - - `debros-gateway` - HTTP Gateway (depends on node) - -## Configuration - -### Node Configs - -Each node config (`~/.debros/bootstrap.yaml`, `~/.debros/node.yaml`, etc.) includes: - -```yaml -database: - ipfs: - cluster_api_url: "http://localhost:9094" # Local cluster API - api_url: "http://localhost:5001" # Local IPFS API - replication_factor: 3 # Desired replication -``` - -### Cluster Service Config - -Cluster service configs are stored at: - -- Bootstrap: `~/.debros/bootstrap/ipfs-cluster/service.json` -- Nodes: `~/.debros/node/ipfs-cluster/service.json` - -Key settings: - -- **Consensus**: CRDT (automatic peer discovery) -- **API Listen**: `0.0.0.0:9094` (REST API) -- **Cluster Listen**: `0.0.0.0:9096` (peer-to-peer) -- **Secret**: Shared cluster secret stored at `~/.debros/cluster-secret` - -## Verification - -### Check Cluster Peers - -From any node, verify all cluster peers are connected: - -```bash -sudo -u debros ipfs-cluster-ctl --host http://localhost:9094 peers ls -``` - -You should see all cluster peers listed (bootstrap, node1, node2, etc.). - -### Check IPFS Daemon - -Verify IPFS is running: - -```bash -sudo -u debros ipfs daemon --repo-dir=~/.debros/bootstrap/ipfs/repo -# Or for regular nodes: -sudo -u debros ipfs daemon --repo-dir=~/.debros/node/ipfs/repo -``` - -### Check Service Status - -```bash -network-cli service status all -``` - -Should show: - -- `debros-ipfs` - running -- `debros-ipfs-cluster` - running -- `debros-node` - running -- `debros-gateway` - running - -## Troubleshooting - -### Cluster Peers Not Connecting - -If peers aren't discovering each other: - -1. **Check firewall**: Ensure ports 9096 (cluster swarm) and 9094 (cluster API) are open -2. **Verify secret**: All nodes must use the same cluster secret from `~/.debros/cluster-secret` -3. **Check logs**: `journalctl -u debros-ipfs-cluster -f` - -### Not Enough Peers Error - -If you see "not enough peers to allocate CID" errors: - -- The cluster needs at least `replication_factor` peers running -- Check that all nodes have `debros-ipfs-cluster` service running -- Verify with `ipfs-cluster-ctl peers ls` - -### IPFS Not Starting - -If IPFS daemon fails to start: - -1. Check IPFS repo exists: `ls -la ~/.debros/bootstrap/ipfs/repo/` -2. Check permissions: `chown -R debros:debros ~/.debros/bootstrap/ipfs/` -3. Check logs: `journalctl -u debros-ipfs -f` - -## Manual Setup (If Needed) - -If automatic setup didn't work, you can manually initialize: - -### 1. Initialize IPFS - -```bash -sudo -u debros ipfs init --profile=server --repo-dir=~/.debros/bootstrap/ipfs/repo -sudo -u debros ipfs config --json Addresses.API '["/ip4/localhost/tcp/5001"]' --repo-dir=~/.debros/bootstrap/ipfs/repo -``` - -### 2. Initialize Cluster - -```bash -# Generate or get cluster secret -CLUSTER_SECRET=$(cat ~/.debros/cluster-secret) - -# Initialize cluster (will create service.json) -sudo -u debros ipfs-cluster-service init --consensus crdt -``` - -### 3. Start Services - -```bash -systemctl start debros-ipfs -systemctl start debros-ipfs-cluster -systemctl start debros-node -systemctl start debros-gateway -``` - -## Ports - -- **4001**: IPFS swarm (LibP2P) -- **5001**: IPFS HTTP API -- **8080**: IPFS Gateway (optional) -- **9094**: IPFS Cluster REST API -- **9096**: IPFS Cluster swarm (LibP2P) - -## Replication Factor - -The default replication factor is 3, meaning content is pinned to 3 cluster peers. This requires at least 3 nodes running cluster peers. - -To change replication factor, edit node configs: - -```yaml -database: - ipfs: - replication_factor: 1 # For single-node development -``` - -## Security Notes - -- Cluster secret is stored at `~/.debros/cluster-secret` (mode 0600) -- Cluster API (port 9094) should be firewalled in production -- IPFS API (port 5001) should only be accessible locally From 5b21774e0487788fdc2d6e3a3146be7f69c98b59 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Fri, 7 Nov 2025 10:33:25 +0200 Subject: [PATCH 08/57] feat: add cache multi-get handler and improve API key extraction - Implemented a new cacheMultiGetHandler to retrieve multiple keys from the Olric cache in a single request. - Enhanced the extractAPIKey function to prioritize the X-API-Key header and improve handling of non-JWT Bearer tokens. - Updated routes to include the new multi-get endpoint for cache operations. --- CHANGELOG.md | 15 ++++++ Makefile | 2 +- pkg/gateway/cache_handlers.go | 99 ++++++++++++++++++++++++++++++++--- pkg/gateway/middleware.go | 47 +++++++++++------ pkg/gateway/routes.go | 1 + 5 files changed, 140 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8502398..6fe5b33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,21 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed +## [0.57.0] - 2025-11-07 + +### Added +- Added a new endpoint `/v1/cache/mget` to retrieve multiple keys from the distributed cache in a single request. + +### Changed +- Improved API key extraction logic to prioritize the `X-API-Key` header and better handle different authorization schemes (Bearer, ApiKey) while avoiding confusion with JWTs. +- Refactored cache retrieval logic to use a dedicated function for decoding values from the distributed cache. + +### Deprecated + +### Removed + +### Fixed +\n ## [0.56.0] - 2025-11-05 diff --git a/Makefile b/Makefile index 27e3b85..ca4347b 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ test-e2e: .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill -VERSION := 0.56.0 +VERSION := 0.57.0 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' diff --git a/pkg/gateway/cache_handlers.go b/pkg/gateway/cache_handlers.go index 1796b7e..58c2beb 100644 --- a/pkg/gateway/cache_handlers.go +++ b/pkg/gateway/cache_handlers.go @@ -80,9 +80,22 @@ func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) { return } - // Try to decode the value from Olric - // Values stored as JSON bytes need to be deserialized, while basic types - // (strings, numbers, bools) can be retrieved directly + value, err := decodeValueFromOlric(gr) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to decode value: %v", err)) + return + } + + writeJSON(w, http.StatusOK, map[string]any{ + "key": req.Key, + "value": value, + "dmap": req.DMap, + }) +} + +// decodeValueFromOlric decodes a value from Olric GetResponse +// Handles JSON-serialized complex types and basic types (string, number, bool) +func decodeValueFromOlric(gr *olriclib.GetResponse) (any, error) { var value any // First, try to get as bytes (for JSON-serialized complex types) @@ -113,10 +126,84 @@ func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) { } } + return value, nil +} + +func (g *Gateway) cacheMultiGetHandler(w http.ResponseWriter, r *http.Request) { + if g.olricClient == nil { + writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized") + return + } + + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + var req struct { + DMap string `json:"dmap"` // Distributed map name + Keys []string `json:"keys"` // Keys to retrieve + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid json body") + return + } + + if strings.TrimSpace(req.DMap) == "" { + writeError(w, http.StatusBadRequest, "dmap is required") + return + } + + if len(req.Keys) == 0 { + writeError(w, http.StatusBadRequest, "keys array is required and cannot be empty") + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second) + defer cancel() + + client := g.olricClient.GetClient() + dm, err := client.NewDMap(req.DMap) + if err != nil { + writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err)) + return + } + + // Get all keys and collect results + var results []map[string]any + for _, key := range req.Keys { + if strings.TrimSpace(key) == "" { + continue // Skip empty keys + } + + gr, err := dm.Get(ctx, key) + if err != nil { + // Skip keys that are not found - don't include them in results + // This matches the SDK's expectation that only found keys are returned + if err == olriclib.ErrKeyNotFound { + continue + } + // For other errors, log but continue with other keys + // We don't want one bad key to fail the entire request + continue + } + + value, err := decodeValueFromOlric(gr) + if err != nil { + // If we can't decode, skip this key + continue + } + + results = append(results, map[string]any{ + "key": key, + "value": value, + }) + } + writeJSON(w, http.StatusOK, map[string]any{ - "key": req.Key, - "value": value, - "dmap": req.DMap, + "results": results, + "dmap": req.DMap, }) } diff --git a/pkg/gateway/middleware.go b/pkg/gateway/middleware.go index 0924488..d92f9ac 100644 --- a/pkg/gateway/middleware.go +++ b/pkg/gateway/middleware.go @@ -131,27 +131,40 @@ func (g *Gateway) authMiddleware(next http.Handler) http.Handler { } // extractAPIKey extracts API key from Authorization, X-API-Key header, or query parameters +// Note: Bearer tokens that look like JWTs (have 2 dots) are skipped (they're JWTs, handled separately) +// X-API-Key header is preferred when both Authorization and X-API-Key are present func extractAPIKey(r *http.Request) string { - // Prefer Authorization header - auth := r.Header.Get("Authorization") - if auth != "" { - // Support "Bearer " and "ApiKey " - lower := strings.ToLower(auth) - if strings.HasPrefix(lower, "bearer ") { - return strings.TrimSpace(auth[len("Bearer "):]) - } - if strings.HasPrefix(lower, "apikey ") { - return strings.TrimSpace(auth[len("ApiKey "):]) - } - // If header has no scheme, treat the whole value as token (lenient for dev) - if !strings.Contains(auth, " ") { - return strings.TrimSpace(auth) - } - } - // Fallback to X-API-Key header + // Prefer X-API-Key header (most explicit) - check this first if v := strings.TrimSpace(r.Header.Get("X-API-Key")); v != "" { return v } + + // Check Authorization header for ApiKey scheme or non-JWT Bearer tokens + auth := r.Header.Get("Authorization") + if auth != "" { + lower := strings.ToLower(auth) + if strings.HasPrefix(lower, "bearer ") { + tok := strings.TrimSpace(auth[len("Bearer "):]) + // Skip Bearer tokens that look like JWTs (have 2 dots) - they're JWTs + // But allow Bearer tokens that don't look like JWTs (for backward compatibility) + if strings.Count(tok, ".") == 2 { + // This is a JWT, skip it + } else { + // This doesn't look like a JWT, treat as API key (backward compatibility) + return tok + } + } else if strings.HasPrefix(lower, "apikey ") { + return strings.TrimSpace(auth[len("ApiKey "):]) + } else if !strings.Contains(auth, " ") { + // If header has no scheme, treat the whole value as token (lenient for dev) + // But skip if it looks like a JWT (has 2 dots) + tok := strings.TrimSpace(auth) + if strings.Count(tok, ".") != 2 { + return tok + } + } + } + // Fallback to query parameter (for WebSocket support) if v := strings.TrimSpace(r.URL.Query().Get("api_key")); v != "" { return v diff --git a/pkg/gateway/routes.go b/pkg/gateway/routes.go index 7ab103a..531bf24 100644 --- a/pkg/gateway/routes.go +++ b/pkg/gateway/routes.go @@ -50,6 +50,7 @@ func (g *Gateway) Routes() http.Handler { // cache endpoints (Olric) mux.HandleFunc("/v1/cache/health", g.cacheHealthHandler) mux.HandleFunc("/v1/cache/get", g.cacheGetHandler) + mux.HandleFunc("/v1/cache/mget", g.cacheMultiGetHandler) mux.HandleFunc("/v1/cache/put", g.cachePutHandler) mux.HandleFunc("/v1/cache/delete", g.cacheDeleteHandler) mux.HandleFunc("/v1/cache/scan", g.cacheScanHandler) From 50f7abf37604fe781ea16fe3c8f49830aa43ed46 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Fri, 7 Nov 2025 16:51:08 +0200 Subject: [PATCH 09/57] feat: enhance IPFS configuration and logging in CLI - Added IPFS cluster API and HTTP API configuration options to node and bootstrap configurations. - Improved the generation of IPFS-related URLs and parameters for better integration. - Enhanced error logging in cache handlers to provide more context on failures during cache operations. --- CHANGELOG.md | 15 ++++++++++++++ Makefile | 2 +- pkg/cli/config_commands.go | 37 +++++++++++++++++++++++++++++++++-- pkg/cli/setup.go | 12 +++++------- pkg/gateway/cache_handlers.go | 10 ++++++++++ 5 files changed, 66 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fe5b33..3c68f26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,21 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed +## [0.58.0] - 2025-11-07 + +### Added +- Added default configuration for IPFS Cluster and IPFS API settings in node and gateway configurations. +- Added `ipfs` configuration section to node configuration, including settings for cluster API URL, replication factor, and encryption. + +### Changed +- Improved error logging for cache operations in the Gateway. + +### Deprecated + +### Removed + +### Fixed +\n ## [0.57.0] - 2025-11-07 ### Added diff --git a/Makefile b/Makefile index ca4347b..4262b76 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ test-e2e: .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill -VERSION := 0.57.0 +VERSION := 0.58.0 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' diff --git a/pkg/cli/config_commands.go b/pkg/cli/config_commands.go index a7043af..ebd6326 100644 --- a/pkg/cli/config_commands.go +++ b/pkg/cli/config_commands.go @@ -405,6 +405,10 @@ func GenerateNodeConfig(name, id string, listenPort, rqliteHTTPPort, rqliteRaftP joinAddr = "localhost:5001" } + // Calculate IPFS cluster API port (9094 for bootstrap, 9104+ for nodes) + // Pattern: Bootstrap (5001) -> 9094, Node2 (5002) -> 9104, Node3 (5003) -> 9114 + clusterAPIPort := 9094 + (rqliteHTTPPort-5001)*10 + return fmt.Sprintf(`node: id: "%s" type: "node" @@ -425,6 +429,17 @@ database: cluster_sync_interval: "30s" peer_inactivity_limit: "24h" min_cluster_size: 1 + ipfs: + # IPFS Cluster API endpoint for pin management (leave empty to disable) + cluster_api_url: "http://localhost:%d" + # IPFS HTTP API endpoint for content retrieval + api_url: "http://localhost:%d" + # Timeout for IPFS operations + timeout: "60s" + # Replication factor for pinned content + replication_factor: 3 + # Enable client-side encryption before upload + enable_encryption: true discovery: %s @@ -440,7 +455,7 @@ security: logging: level: "info" format: "console" -`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, joinAddr, peersYAML.String(), 4001, rqliteHTTPPort, rqliteRaftPort) +`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, joinAddr, clusterAPIPort, rqliteHTTPPort, peersYAML.String(), 4001, rqliteHTTPPort, rqliteRaftPort) } // GenerateBootstrapConfig generates a bootstrap configuration @@ -472,6 +487,17 @@ database: cluster_sync_interval: "30s" peer_inactivity_limit: "24h" min_cluster_size: 1 + ipfs: + # IPFS Cluster API endpoint for pin management (leave empty to disable) + cluster_api_url: "http://localhost:9094" + # IPFS HTTP API endpoint for content retrieval + api_url: "http://localhost:%d" + # Timeout for IPFS operations + timeout: "60s" + # Replication factor for pinned content + replication_factor: 3 + # Enable client-side encryption before upload + enable_encryption: true discovery: bootstrap_peers: [] @@ -487,7 +513,7 @@ security: logging: level: "info" format: "console" -`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, 4001, rqliteHTTPPort, rqliteRaftPort) +`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, rqliteHTTPPort, 4001, rqliteHTTPPort, rqliteRaftPort) } // GenerateGatewayConfig generates a gateway configuration @@ -515,5 +541,12 @@ func GenerateGatewayConfig(bootstrapPeers string) string { client_namespace: "default" rqlite_dsn: "" %s +olric_servers: + - "127.0.0.1:3320" +olric_timeout: "10s" +ipfs_cluster_api_url: "http://localhost:9094" +ipfs_api_url: "http://localhost:9105" +ipfs_timeout: "60s" +ipfs_replication_factor: 3 `, peersYAML.String()) } diff --git a/pkg/cli/setup.go b/pkg/cli/setup.go index a103b27..782877a 100644 --- a/pkg/cli/setup.go +++ b/pkg/cli/setup.go @@ -1794,13 +1794,11 @@ func generateGatewayConfigDirect(bootstrapPeers string, enableHTTPS bool, domain olricYAML.WriteString(" - \"localhost:3320\"\n") } - // IPFS Cluster configuration (defaults - can be customized later) - ipfsYAML := `# IPFS Cluster configuration (optional) -# Uncomment and configure if you have IPFS Cluster running: -# ipfs_cluster_api_url: "http://localhost:9094" -# ipfs_api_url: "http://localhost:5001" -# ipfs_timeout: "60s" -# ipfs_replication_factor: 3 + // IPFS Cluster configuration + ipfsYAML := `ipfs_cluster_api_url: "http://localhost:9094" +ipfs_api_url: "http://localhost:9105" +ipfs_timeout: "60s" +ipfs_replication_factor: 3 ` return fmt.Sprintf(`listen_addr: ":6001" diff --git a/pkg/gateway/cache_handlers.go b/pkg/gateway/cache_handlers.go index 58c2beb..56ea931 100644 --- a/pkg/gateway/cache_handlers.go +++ b/pkg/gateway/cache_handlers.go @@ -8,7 +8,9 @@ import ( "strings" "time" + "github.com/DeBrosOfficial/network/pkg/logging" olriclib "github.com/olric-data/olric" + "go.uber.org/zap" ) // Cache HTTP handlers for Olric distributed cache @@ -76,12 +78,20 @@ func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) { writeError(w, http.StatusNotFound, "key not found") return } + g.logger.ComponentError(logging.ComponentGeneral, "failed to get key from cache", + zap.String("dmap", req.DMap), + zap.String("key", req.Key), + zap.Error(err)) writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get key: %v", err)) return } value, err := decodeValueFromOlric(gr) if err != nil { + g.logger.ComponentError(logging.ComponentGeneral, "failed to decode value from cache", + zap.String("dmap", req.DMap), + zap.String("key", req.Key), + zap.Error(err)) writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to decode value: %v", err)) return } From 93b25c42e45626995a1ec468c91a14237c4707c6 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Sat, 8 Nov 2025 11:59:38 +0200 Subject: [PATCH 10/57] feat: enhance IPFS configuration and logging in CLI - Added IPFS cluster API and HTTP API configuration options to node and bootstrap configurations. - Improved the generation of IPFS-related URLs and parameters for better integration. - Enhanced error logging in cache handlers to provide more context on failures during cache operations. --- CHANGELOG.md | 15 ++++++++++ Makefile | 2 +- pkg/gateway/storage_handlers.go | 49 ++++++++++++++++++++++++++++----- pkg/ipfs/client.go | 32 ++++++++++++++++++--- 4 files changed, 86 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c68f26..55eedd5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,21 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed +## [0.59.0] - 2025-11-08 + +### Added +- Added support for asynchronous pinning of uploaded files, improving upload speed. +- Added an optional `pin` flag to the storage upload endpoint to control whether content is pinned (defaults to true). + +### Changed +- Improved handling of IPFS Cluster responses during the Add operation to correctly process streaming NDJSON output. + +### Deprecated + +### Removed + +### Fixed +\n ## [0.58.0] - 2025-11-07 ### Added diff --git a/Makefile b/Makefile index 4262b76..215f1c7 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ test-e2e: .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill -VERSION := 0.58.0 +VERSION := 0.59.0 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' diff --git a/pkg/gateway/storage_handlers.go b/pkg/gateway/storage_handlers.go index 16706b3..03e3f91 100644 --- a/pkg/gateway/storage_handlers.go +++ b/pkg/gateway/storage_handlers.go @@ -9,6 +9,7 @@ import ( "io" "net/http" "strings" + "time" "github.com/DeBrosOfficial/network/pkg/client" "github.com/DeBrosOfficial/network/pkg/logging" @@ -81,6 +82,7 @@ func (g *Gateway) storageUploadHandler(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") var reader io.Reader var name string + var shouldPin bool = true // Default to true if strings.HasPrefix(contentType, "multipart/form-data") { // Handle multipart upload @@ -98,6 +100,11 @@ func (g *Gateway) storageUploadHandler(w http.ResponseWriter, r *http.Request) { reader = file name = header.Filename + + // Parse pin flag from form (default: true) + if pinValue := r.FormValue("pin"); pinValue != "" { + shouldPin = strings.ToLower(pinValue) == "true" + } } else { // Handle JSON request with base64 data var req StorageUploadRequest @@ -120,6 +127,7 @@ func (g *Gateway) storageUploadHandler(w http.ResponseWriter, r *http.Request) { reader = bytes.NewReader(data) name = req.Name + // For JSON requests, pin defaults to true (can be extended if needed) } // Add to IPFS @@ -131,19 +139,18 @@ func (g *Gateway) storageUploadHandler(w http.ResponseWriter, r *http.Request) { return } - // Pin with replication factor - _, err = g.ipfsClient.Pin(ctx, addResp.Cid, name, replicationFactor) - if err != nil { - g.logger.ComponentWarn(logging.ComponentGeneral, "failed to pin content", zap.Error(err), zap.String("cid", addResp.Cid)) - // Still return success, but log the pin failure - } - + // Return response immediately - don't block on pinning response := StorageUploadResponse{ Cid: addResp.Cid, Name: addResp.Name, Size: addResp.Size, } + // Pin asynchronously in background if requested + if shouldPin { + go g.pinAsync(addResp.Cid, name, replicationFactor) + } + writeJSON(w, http.StatusOK, response) } @@ -322,6 +329,34 @@ func (g *Gateway) storageUnpinHandler(w http.ResponseWriter, r *http.Request) { writeJSON(w, http.StatusOK, map[string]any{"status": "ok", "cid": path}) } +// pinAsync pins a CID asynchronously in the background with retry logic +// Retries once if the first attempt fails, then gives up +func (g *Gateway) pinAsync(cid, name string, replicationFactor int) { + ctx := context.Background() + + // First attempt + _, err := g.ipfsClient.Pin(ctx, cid, name, replicationFactor) + if err == nil { + g.logger.ComponentWarn(logging.ComponentGeneral, "async pin succeeded", zap.String("cid", cid)) + return + } + + // Log first failure + g.logger.ComponentWarn(logging.ComponentGeneral, "async pin failed, retrying once", + zap.Error(err), zap.String("cid", cid)) + + // Retry once after a short delay + time.Sleep(2 * time.Second) + _, err = g.ipfsClient.Pin(ctx, cid, name, replicationFactor) + if err != nil { + // Final failure - log and give up + g.logger.ComponentWarn(logging.ComponentGeneral, "async pin retry failed, giving up", + zap.Error(err), zap.String("cid", cid)) + } else { + g.logger.ComponentWarn(logging.ComponentGeneral, "async pin succeeded on retry", zap.String("cid", cid)) + } +} + // base64Decode decodes base64 string to bytes func base64Decode(s string) ([]byte, error) { return base64.StdEncoding.DecodeString(s) diff --git a/pkg/ipfs/client.go b/pkg/ipfs/client.go index 83dbb5d..ff41c42 100644 --- a/pkg/ipfs/client.go +++ b/pkg/ipfs/client.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "mime/multipart" @@ -177,12 +178,35 @@ func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddRe return nil, fmt.Errorf("add failed with status %d: %s", resp.StatusCode, string(body)) } - var result AddResponse - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { - return nil, fmt.Errorf("failed to decode add response: %w", err) + // IPFS Cluster streams NDJSON responses. We need to drain the entire stream + // to prevent the connection from closing prematurely, which would cancel + // the cluster's pinning operation. Read all JSON objects and keep the last one. + dec := json.NewDecoder(resp.Body) + var last AddResponse + var hasResult bool + + for { + var chunk AddResponse + if err := dec.Decode(&chunk); err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, fmt.Errorf("failed to decode add response: %w", err) + } + last = chunk + hasResult = true } - return &result, nil + if !hasResult { + return nil, fmt.Errorf("add response missing CID") + } + + // Ensure name is set if provided + if last.Name == "" && name != "" { + last.Name = name + } + + return &last, nil } // Pin pins a CID with specified replication factor From a5c30d014102b56ceef9d118c1096d05afc88db4 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Sat, 8 Nov 2025 12:59:54 +0200 Subject: [PATCH 11/57] feat: update IPFS configuration and enhance cluster secret management - Changed default IPFS API URL to port 5001 for better compatibility. - Enhanced the initialization process for IPFS and Cluster by adding support for bootstrap peers. - Introduced user prompts for cluster secret and swarm key generation, improving user experience during setup. - Updated service configuration to dynamically determine paths based on existing configuration files. --- CHANGELOG.md | 15 +++ Makefile | 2 +- pkg/cli/config_commands.go | 2 +- pkg/cli/setup.go | 232 +++++++++++++++++++++++++++++++------ 4 files changed, 213 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55eedd5..a5ce626 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,21 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed +## [0.59.1] - 2025-11-08 + +### Added +\n +### Changed +- Improved interactive setup to prompt for existing IPFS Cluster secret and Swarm key, allowing easier joining of existing private networks. +- Updated default IPFS API URL in configuration files from `http://localhost:9105` to the standard `http://localhost:5001`. +- Updated systemd service files (debros-ipfs.service and debros-ipfs-cluster.service) to correctly determine and use the IPFS and Cluster repository paths. + +### Deprecated + +### Removed + +### Fixed +\n ## [0.59.0] - 2025-11-08 ### Added diff --git a/Makefile b/Makefile index 215f1c7..61afc0f 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ test-e2e: .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill -VERSION := 0.59.0 +VERSION := 0.59.1 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' diff --git a/pkg/cli/config_commands.go b/pkg/cli/config_commands.go index ebd6326..6f5ea4b 100644 --- a/pkg/cli/config_commands.go +++ b/pkg/cli/config_commands.go @@ -545,7 +545,7 @@ olric_servers: - "127.0.0.1:3320" olric_timeout: "10s" ipfs_cluster_api_url: "http://localhost:9094" -ipfs_api_url: "http://localhost:9105" +ipfs_api_url: "http://localhost:5001" ipfs_timeout: "60s" ipfs_replication_factor: 3 `, peersYAML.String()) diff --git a/pkg/cli/setup.go b/pkg/cli/setup.go index 782877a..526dcbb 100644 --- a/pkg/cli/setup.go +++ b/pkg/cli/setup.go @@ -1513,7 +1513,18 @@ func generateConfigsInteractive(force bool) { } else { nodeID = "node" } - if err := initializeIPFSForNode(nodeID, vpsIP, isBootstrap); err != nil { + + // Parse bootstrap peers from config + var bootstrapPeerList []string + if bootstrapPeers != "" { + for _, p := range strings.Split(bootstrapPeers, ",") { + if p = strings.TrimSpace(p); p != "" { + bootstrapPeerList = append(bootstrapPeerList, p) + } + } + } + + if err := initializeIPFSForNode(nodeID, vpsIP, isBootstrap, bootstrapPeerList, reader); err != nil { fmt.Fprintf(os.Stderr, "⚠️ Failed to initialize IPFS/Cluster: %v\n", err) fmt.Fprintf(os.Stderr, " You may need to initialize IPFS and Cluster manually\n") } @@ -1796,7 +1807,7 @@ func generateGatewayConfigDirect(bootstrapPeers string, enableHTTPS bool, domain // IPFS Cluster configuration ipfsYAML := `ipfs_cluster_api_url: "http://localhost:9094" -ipfs_api_url: "http://localhost:9105" +ipfs_api_url: "http://localhost:5001" ipfs_timeout: "60s" ipfs_replication_factor: 3 ` @@ -1841,24 +1852,72 @@ func generateOlricConfig(configPath, bindIP string, httpPort, memberlistPort int return nil } +// promptClusterSecret prompts the user for a cluster secret (64 hex characters) +func promptClusterSecret(reader *bufio.Reader) (string, error) { + for { + fmt.Printf("\n Enter cluster secret (64 hex characters, or press Enter to generate new): ") + input, _ := reader.ReadString('\n') + input = strings.TrimSpace(input) + + if input == "" { + // Generate new secret + bytes := make([]byte, 32) + if _, err := rand.Read(bytes); err != nil { + return "", fmt.Errorf("failed to generate cluster secret: %w", err) + } + secret := hex.EncodeToString(bytes) + fmt.Printf(" ✓ Generated new cluster secret\n") + return secret, nil + } + + // Validate input (must be 64 hex characters) + input = strings.ToUpper(input) + if len(input) != 64 { + fmt.Printf(" ❌ Invalid: cluster secret must be exactly 64 hex characters\n") + continue + } + + // Validate hex characters + valid := true + for _, char := range input { + if !((char >= '0' && char <= '9') || (char >= 'A' && char <= 'F')) { + valid = false + break + } + } + if !valid { + fmt.Printf(" ❌ Invalid: cluster secret must contain only hex characters (0-9, A-F)\n") + continue + } + + return input, nil + } +} + // getOrGenerateClusterSecret gets or generates a shared cluster secret -func getOrGenerateClusterSecret() (string, error) { +func getOrGenerateClusterSecret(reader *bufio.Reader) (string, error) { secretPath := "/home/debros/.debros/cluster-secret" // Try to read existing secret if data, err := os.ReadFile(secretPath); err == nil { secret := strings.TrimSpace(string(data)) if len(secret) == 64 { + fmt.Printf(" ✓ Using existing cluster secret\n") return secret, nil } } - // Generate new secret (64 hex characters = 32 bytes) - bytes := make([]byte, 32) - if _, err := rand.Read(bytes); err != nil { - return "", fmt.Errorf("failed to generate cluster secret: %w", err) + // Prompt for secret + fmt.Printf("\n🔐 Cluster Secret Configuration\n") + fmt.Printf(" The cluster secret is used to authenticate IPFS Cluster peers.\n") + fmt.Printf(" All nodes in the cluster must use the same secret.\n") + fmt.Printf(" If this is the first node, press Enter to generate a new secret.\n") + fmt.Printf(" If joining an existing cluster, enter the secret from the bootstrap node.\n") + + secret, err := promptClusterSecret(reader) + if err != nil { + return "", err } - secret := hex.EncodeToString(bytes) // Save secret if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil { @@ -1869,9 +1928,56 @@ func getOrGenerateClusterSecret() (string, error) { return secret, nil } +// promptSwarmKey prompts the user for a swarm key (64 hex characters) +func promptSwarmKey(reader *bufio.Reader) ([]byte, error) { + for { + fmt.Printf("\n Enter swarm key (64 hex characters, or press Enter to generate new): ") + input, _ := reader.ReadString('\n') + input = strings.TrimSpace(input) + + if input == "" { + // Generate new key (32 bytes) + keyBytes := make([]byte, 32) + if _, err := rand.Read(keyBytes); err != nil { + return nil, fmt.Errorf("failed to generate swarm key: %w", err) + } + + // Format as IPFS swarm key file + keyHex := strings.ToUpper(hex.EncodeToString(keyBytes)) + content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex) + fmt.Printf(" ✓ Generated new swarm key\n") + return []byte(content), nil + } + + // Validate input (must be 64 hex characters) + input = strings.ToUpper(input) + if len(input) != 64 { + fmt.Printf(" ❌ Invalid: swarm key must be exactly 64 hex characters\n") + continue + } + + // Validate hex characters + valid := true + for _, char := range input { + if !((char >= '0' && char <= '9') || (char >= 'A' && char <= 'F')) { + valid = false + break + } + } + if !valid { + fmt.Printf(" ❌ Invalid: swarm key must contain only hex characters (0-9, A-F)\n") + continue + } + + // Format as IPFS swarm key file + content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", input) + return []byte(content), nil + } +} + // getOrGenerateSwarmKey gets or generates a shared IPFS swarm key // Returns the swarm key content as bytes (formatted for IPFS) -func getOrGenerateSwarmKey() ([]byte, error) { +func getOrGenerateSwarmKey(reader *bufio.Reader) ([]byte, error) { secretPath := "/home/debros/.debros/swarm.key" // Try to read existing key @@ -1879,28 +1985,31 @@ func getOrGenerateSwarmKey() ([]byte, error) { // Validate it's a proper swarm key format content := string(data) if strings.Contains(content, "/key/swarm/psk/1.0.0/") { + fmt.Printf(" ✓ Using existing swarm key\n") return data, nil } } - // Generate new key (32 bytes) - keyBytes := make([]byte, 32) - if _, err := rand.Read(keyBytes); err != nil { - return nil, fmt.Errorf("failed to generate swarm key: %w", err) + // Prompt for key + fmt.Printf("\n🔐 IPFS Swarm Key Configuration\n") + fmt.Printf(" The swarm key creates a private IPFS network.\n") + fmt.Printf(" All nodes in the network must use the same swarm key.\n") + fmt.Printf(" If this is the first node, press Enter to generate a new key.\n") + fmt.Printf(" If joining an existing network, enter the key from the bootstrap node.\n") + fmt.Printf(" Enter only the 64 hex characters (e.g., F62B18F11C5457F11E1863126ECAA259E76DA967121A291351FBFA2542B4BF56)\n") + + swarmKey, err := promptSwarmKey(reader) + if err != nil { + return nil, err } - // Format as IPFS swarm key file - keyHex := strings.ToUpper(hex.EncodeToString(keyBytes)) - content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex) - // Save key - if err := os.WriteFile(secretPath, []byte(content), 0600); err != nil { + if err := os.WriteFile(secretPath, swarmKey, 0600); err != nil { return nil, fmt.Errorf("failed to save swarm key: %w", err) } exec.Command("chown", "debros:debros", secretPath).Run() - fmt.Printf(" ✓ Generated private swarm key\n") - return []byte(content), nil + return swarmKey, nil } // ensureSwarmKey ensures the swarm key exists in the IPFS repo @@ -1924,17 +2033,17 @@ func ensureSwarmKey(repoPath string, swarmKey []byte) error { } // initializeIPFSForNode initializes IPFS and IPFS Cluster for a node -func initializeIPFSForNode(nodeID, vpsIP string, isBootstrap bool) error { +func initializeIPFSForNode(nodeID, vpsIP string, isBootstrap bool, bootstrapPeers []string, reader *bufio.Reader) error { fmt.Printf(" Initializing IPFS and Cluster for node %s...\n", nodeID) // Get or generate cluster secret - secret, err := getOrGenerateClusterSecret() + secret, err := getOrGenerateClusterSecret(reader) if err != nil { return fmt.Errorf("failed to get cluster secret: %w", err) } // Get or generate swarm key for private network - swarmKey, err := getOrGenerateSwarmKey() + swarmKey, err := getOrGenerateSwarmKey(reader) if err != nil { return fmt.Errorf("failed to get swarm key: %w", err) } @@ -1988,7 +2097,7 @@ func initializeIPFSForNode(nodeID, vpsIP string, isBootstrap bool) error { fmt.Printf(" Initializing IPFS Cluster...\n") // Generate cluster config - clusterConfig := generateClusterServiceConfig(nodeID, vpsIP, secret, isBootstrap) + clusterConfig := generateClusterServiceConfig(nodeID, vpsIP, secret, isBootstrap, bootstrapPeers) // Write config configJSON, err := json.MarshalIndent(clusterConfig, "", " ") @@ -2091,14 +2200,45 @@ type datastoreConfig struct { Path string `json:"path"` } +// extractClusterBootstrapAddresses extracts IPFS Cluster bootstrap addresses from node bootstrap peers +// IPFS Cluster uses port 9096 for cluster communication +// Note: We extract IP addresses, but cluster peer IDs will be discovered at runtime +// For CRDT consensus, bootstrap peers are optional but help with initial discovery +func extractClusterBootstrapAddresses(bootstrapPeers []string) []string { + var clusterBootstrap []string + + for _, peerAddr := range bootstrapPeers { + // Extract IP from multiaddr format: /ip4/IP/tcp/PORT/p2p/PEER_ID + ip := extractIPFromMultiaddr(peerAddr) + if ip != "" && ip != "127.0.0.1" && ip != "localhost" { + // Construct cluster bootstrap address (port 9096 is standard for IPFS Cluster) + // Note: We don't have the cluster peer ID yet, but we can construct the address + // IPFS Cluster CRDT will discover peers automatically, but having IPs helps + // For now, we'll leave bootstrap empty and rely on CRDT auto-discovery + // The IP addresses can be used later when cluster peer IDs are known + _ = ip // Store for potential future use + } + } + + // For now, return empty bootstrap list - CRDT consensus will auto-discover peers + // Bootstrap peers can be added later when cluster peer IDs are known + return clusterBootstrap +} + // generateClusterServiceConfig generates IPFS Cluster service.json config -func generateClusterServiceConfig(nodeID, vpsIP, secret string, isBootstrap bool) clusterServiceConfig { +func generateClusterServiceConfig(nodeID, vpsIP, secret string, isBootstrap bool, bootstrapPeers []string) clusterServiceConfig { clusterListenAddr := "/ip4/0.0.0.0/tcp/9096" restAPIListenAddr := "/ip4/0.0.0.0/tcp/9094" // For bootstrap node, use empty bootstrap list - // For other nodes, bootstrap list will be set when starting the service - bootstrap := []string{} + // For other nodes, extract bootstrap addresses from node config + // Note: IPFS Cluster CRDT consensus can auto-discover peers, so bootstrap is optional + var bootstrap []string + if !isBootstrap && len(bootstrapPeers) > 0 { + bootstrap = extractClusterBootstrapAddresses(bootstrapPeers) + // For now, bootstrap will be empty as we need cluster peer IDs + // CRDT will handle peer discovery automatically + } return clusterServiceConfig{ Cluster: clusterConfig{ @@ -2142,7 +2282,17 @@ func createSystemdServices() { fmt.Printf("🔧 Creating systemd services...\n") // IPFS service (runs on all nodes) - ipfsService := `[Unit] + // Determine IPFS path based on config file + var ipfsPath string + if _, err := os.Stat("/home/debros/.debros/node.yaml"); err == nil { + ipfsPath = "/home/debros/.debros/node/ipfs/repo" + } else if _, err := os.Stat("/home/debros/.debros/bootstrap.yaml"); err == nil { + ipfsPath = "/home/debros/.debros/bootstrap/ipfs/repo" + } else { + ipfsPath = "/home/debros/.debros/bootstrap/ipfs/repo" + } + + ipfsService := fmt.Sprintf(`[Unit] Description=IPFS Daemon After=network-online.target Wants=network-online.target @@ -2152,9 +2302,9 @@ Type=simple User=debros Group=debros Environment=HOME=/home/debros -ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/node.yaml ]; then export IPFS_PATH=/home/debros/.debros/node/ipfs/repo; elif [ -f /home/debros/.debros/bootstrap.yaml ]; then export IPFS_PATH=/home/debros/.debros/bootstrap/ipfs/repo; else export IPFS_PATH=/home/debros/.debros/bootstrap/ipfs/repo; fi' -ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/swarm.key ] && [ ! -f ${IPFS_PATH}/swarm.key ]; then cp /home/debros/.debros/swarm.key ${IPFS_PATH}/swarm.key && chmod 600 ${IPFS_PATH}/swarm.key; fi' -ExecStart=/usr/bin/ipfs daemon --enable-pubsub-experiment --repo-dir=${IPFS_PATH} +Environment=IPFS_PATH=%s +ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/swarm.key ] && [ ! -f %s/swarm.key ]; then cp /home/debros/.debros/swarm.key %s/swarm.key && chmod 600 %s/swarm.key; fi' +ExecStart=/usr/bin/ipfs daemon --enable-pubsub-experiment --repo-dir=%s Restart=always RestartSec=5 StandardOutput=journal @@ -2168,7 +2318,7 @@ ReadWritePaths=/home/debros [Install] WantedBy=multi-user.target -` +`, ipfsPath, ipfsPath, ipfsPath, ipfsPath, ipfsPath) if err := os.WriteFile("/etc/systemd/system/debros-ipfs.service", []byte(ipfsService), 0644); err != nil { fmt.Fprintf(os.Stderr, "❌ Failed to create IPFS service: %v\n", err) @@ -2176,7 +2326,17 @@ WantedBy=multi-user.target } // IPFS Cluster service (runs on all nodes) - clusterService := `[Unit] + // Determine Cluster path based on config file + var clusterPath string + if _, err := os.Stat("/home/debros/.debros/node.yaml"); err == nil { + clusterPath = "/home/debros/.debros/node/ipfs-cluster" + } else if _, err := os.Stat("/home/debros/.debros/bootstrap.yaml"); err == nil { + clusterPath = "/home/debros/.debros/bootstrap/ipfs-cluster" + } else { + clusterPath = "/home/debros/.debros/bootstrap/ipfs-cluster" + } + + clusterService := fmt.Sprintf(`[Unit] Description=IPFS Cluster Service After=debros-ipfs.service Wants=debros-ipfs.service @@ -2188,8 +2348,8 @@ User=debros Group=debros WorkingDirectory=/home/debros Environment=HOME=/home/debros -ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/node.yaml ]; then export CLUSTER_PATH=/home/debros/.debros/node/ipfs-cluster; elif [ -f /home/debros/.debros/bootstrap.yaml ]; then export CLUSTER_PATH=/home/debros/.debros/bootstrap/ipfs-cluster; else export CLUSTER_PATH=/home/debros/.debros/bootstrap/ipfs-cluster; fi' -ExecStart=/usr/local/bin/ipfs-cluster-service daemon --config ${CLUSTER_PATH}/service.json +Environment=CLUSTER_PATH=%s +ExecStart=/usr/local/bin/ipfs-cluster-service daemon --config %s/service.json Restart=always RestartSec=5 StandardOutput=journal @@ -2203,7 +2363,7 @@ ReadWritePaths=/home/debros [Install] WantedBy=multi-user.target -` +`, clusterPath, clusterPath) if err := os.WriteFile("/etc/systemd/system/debros-ipfs-cluster.service", []byte(clusterService), 0644); err != nil { fmt.Fprintf(os.Stderr, "❌ Failed to create IPFS Cluster service: %v\n", err) From c726dfc401774a6ae1b9c61f06d0952811dfb276 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Sat, 8 Nov 2025 13:29:21 +0200 Subject: [PATCH 12/57] feat: update IPFS configuration and enhance cluster secret management - Changed default IPFS API URL to port 5001 for better compatibility. - Enhanced the initialization process for IPFS and Cluster by adding support for bootstrap peers. - Introduced user prompts for cluster secret and swarm key generation, improving user experience during setup. - Updated service configuration to dynamically determine paths based on existing configuration files. --- CHANGELOG.md | 19 +++ Makefile | 2 +- pkg/cli/setup.go | 50 +++++-- scripts/install-debros-network.sh | 238 ++++++++++++++++++++++++++++-- 4 files changed, 277 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a5ce626..8de2ddc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,25 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed +## [0.59.2] - 2025-11-08 + +### Added +- Added health checks to the installation script to verify the gateway and node services are running after setup or upgrade. +- The installation script now attempts to verify the downloaded binary using checksums.txt if available. +- Added checks in the CLI setup to ensure systemd is available before attempting to create service files. + +### Changed +- Improved the installation script to detect existing installations, stop services before upgrading, and restart them afterward to minimize downtime. +- Enhanced the CLI setup process by detecting the VPS IP address earlier and improving validation feedback for cluster secrets and swarm keys. +- Modified directory setup to log warnings instead of exiting if `chown` fails, providing manual instructions for fixing ownership issues. +- Improved the HTTPS configuration flow to check for port 80/443 availability before prompting for a domain name. + +### Deprecated + +### Removed + +### Fixed +\n ## [0.59.1] - 2025-11-08 ### Added diff --git a/Makefile b/Makefile index 61afc0f..de2e029 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ test-e2e: .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill -VERSION := 0.59.1 +VERSION := 0.59.2 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' diff --git a/pkg/cli/setup.go b/pkg/cli/setup.go index 526dcbb..36993f5 100644 --- a/pkg/cli/setup.go +++ b/pkg/cli/setup.go @@ -1273,9 +1273,12 @@ func setupDirectories() { fmt.Fprintf(os.Stderr, "❌ Failed to create %s: %v\n", dir, err) os.Exit(1) } - // Change ownership to debros + // Change ownership to debros (log failures but continue) cmd := exec.Command("chown", "-R", "debros:debros", dir) - cmd.Run() + if err := cmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Failed to set ownership for %s: %v\n", dir, err) + fmt.Fprintf(os.Stderr, " This may cause permission issues - consider running: sudo chown -R debros:debros %s\n", dir) + } } fmt.Printf(" ✓ Directories created\n") @@ -1373,6 +1376,16 @@ func generateConfigsInteractive(force bool) { gatewayExists = true } + // Get VPS IP early for port checking + vpsIP, err := getVPSIPv4Address() + if err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Failed to detect IPv4 address: %v\n", err) + fmt.Fprintf(os.Stderr, " Using 0.0.0.0 as fallback. You may need to edit config files manually.\n") + vpsIP = "0.0.0.0" + } else { + fmt.Printf(" ✓ Detected IPv4 address: %s\n\n", vpsIP) + } + // If both configs exist and not forcing, skip configuration prompts if nodeExists && gatewayExists && !force { fmt.Printf(" ℹ️ Configuration files already exist (node.yaml and gateway.yaml)\n") @@ -1432,23 +1445,16 @@ func generateConfigsInteractive(force bool) { return } - // Get VPS IPv4 address - fmt.Printf("Detecting VPS IPv4 address...\n") - vpsIP, err := getVPSIPv4Address() - if err != nil { - fmt.Fprintf(os.Stderr, "⚠️ Failed to detect IPv4 address: %v\n", err) - fmt.Fprintf(os.Stderr, " Using 0.0.0.0 as fallback. You may need to edit config files manually.\n") - vpsIP = "0.0.0.0" - } else { - fmt.Printf(" ✓ Detected IPv4 address: %s\n\n", vpsIP) - } + // Create reader for prompts + reader := bufio.NewReader(os.Stdin) + + // vpsIP was already obtained earlier, continue with configuration // Ask about node type fmt.Printf("What type of node is this?\n") fmt.Printf(" 1. Bootstrap node (cluster leader)\n") fmt.Printf(" 2. Regular node (joins existing cluster)\n") fmt.Printf("Enter choice (1 or 2): ") - reader := bufio.NewReader(os.Stdin) choice, _ := reader.ReadString('\n') choice = strings.ToLower(strings.TrimSpace(choice)) @@ -1563,7 +1569,8 @@ func generateConfigsInteractive(force bool) { response = strings.ToLower(strings.TrimSpace(response)) if response == "yes" || response == "y" { - // Check if ports 80 and 443 are available + // Check if ports 80 and 443 are available BEFORE proceeding + fmt.Printf("\n Checking if ports 80 and 443 are available...\n") portsAvailable, portIssues := checkPorts80And443() if !portsAvailable { fmt.Fprintf(os.Stderr, "\n⚠️ Cannot enable HTTPS: %s is already in use\n", portIssues) @@ -1571,6 +1578,7 @@ func generateConfigsInteractive(force bool) { fmt.Fprintf(os.Stderr, " Continuing without HTTPS configuration...\n\n") enableHTTPS = false } else { + fmt.Printf(" ✓ Ports 80 and 443 are available\n") // Prompt for domain name domain = promptDomainForHTTPS(reader, vpsIP) if domain != "" { @@ -1873,7 +1881,7 @@ func promptClusterSecret(reader *bufio.Reader) (string, error) { // Validate input (must be 64 hex characters) input = strings.ToUpper(input) if len(input) != 64 { - fmt.Printf(" ❌ Invalid: cluster secret must be exactly 64 hex characters\n") + fmt.Printf(" ❌ Invalid: cluster secret must be exactly 64 hex characters (got %d)\n", len(input)) continue } @@ -1890,6 +1898,7 @@ func promptClusterSecret(reader *bufio.Reader) (string, error) { continue } + fmt.Printf(" ✓ Cluster secret validated (length: %d, all hex)\n", len(input)) return input, nil } } @@ -1952,7 +1961,7 @@ func promptSwarmKey(reader *bufio.Reader) ([]byte, error) { // Validate input (must be 64 hex characters) input = strings.ToUpper(input) if len(input) != 64 { - fmt.Printf(" ❌ Invalid: swarm key must be exactly 64 hex characters\n") + fmt.Printf(" ❌ Invalid: swarm key must be exactly 64 hex characters (got %d)\n", len(input)) continue } @@ -1970,6 +1979,7 @@ func promptSwarmKey(reader *bufio.Reader) ([]byte, error) { } // Format as IPFS swarm key file + fmt.Printf(" ✓ Swarm key validated (length: %d, all hex)\n", len(input)) content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", input) return []byte(content), nil } @@ -2281,6 +2291,14 @@ func generateClusterServiceConfig(nodeID, vpsIP, secret string, isBootstrap bool func createSystemdServices() { fmt.Printf("🔧 Creating systemd services...\n") + // Check if systemd is available + if _, err := os.Stat("/etc/systemd"); os.IsNotExist(err) { + fmt.Fprintf(os.Stderr, "⚠️ systemd not detected on this system\n") + fmt.Fprintf(os.Stderr, " Systemd service files cannot be created on non-systemd systems\n") + fmt.Fprintf(os.Stderr, " Please manually start services or use an alternative init system\n") + return + } + // IPFS service (runs on all nodes) // Determine IPFS path based on config file var ipfsPath string diff --git a/scripts/install-debros-network.sh b/scripts/install-debros-network.sh index a1bd4e5..d9b1959 100755 --- a/scripts/install-debros-network.sh +++ b/scripts/install-debros-network.sh @@ -11,7 +11,7 @@ # bash scripts/install-debros-network.sh set -e -trap 'echo -e "${RED}An error occurred. Installation aborted.${NOCOLOR}"; exit 1' ERR +trap 'error "An error occurred. Installation aborted."; execute_traps; exit 1' ERR # Color codes RED='\033[0;31m' @@ -26,10 +26,33 @@ GITHUB_REPO="DeBrosOfficial/network" GITHUB_API="https://api.github.com/repos/$GITHUB_REPO" INSTALL_DIR="/usr/local/bin" +# Upgrade detection flags +PREVIOUS_INSTALL=false +SETUP_EXECUTED=false +PREVIOUS_VERSION="" +LATEST_VERSION="" +VERSION_CHANGED=false + +# Cleanup handlers (for proper trap stacking) +declare -a CLEANUP_HANDLERS + log() { echo -e "${CYAN}[$(date '+%Y-%m-%d %H:%M:%S')]${NOCOLOR} $1"; } -error() { echo -e "${RED}[ERROR]${NOCOLOR} $1"; } +error() { echo -e "${RED}[ERROR]${NOCOLOR} $1" >&2; } success() { echo -e "${GREEN}[SUCCESS]${NOCOLOR} $1"; } -warning() { echo -e "${YELLOW}[WARNING]${NOCOLOR} $1"; } +warning() { echo -e "${YELLOW}[WARNING]${NOCOLOR} $1" >&2; } + +# Stack-based trap cleanup +push_trap() { + local handler="$1" + local signal="${2:-EXIT}" + CLEANUP_HANDLERS+=("$handler") +} + +execute_traps() { + for ((i=${#CLEANUP_HANDLERS[@]}-1; i>=0; i--)); do + eval "${CLEANUP_HANDLERS[$i]}" + done +} # REQUIRE INTERACTIVE MODE if [ ! -t 0 ]; then @@ -133,13 +156,21 @@ check_dependencies() { get_latest_release() { log "Fetching latest release information..." - # Get latest release (exclude pre-releases and nightly) - LATEST_RELEASE=$(curl -fsSL "$GITHUB_API/releases" | \ - grep -v "prerelease.*true" | \ - grep -v "draft.*true" | \ - grep '"tag_name"' | \ - head -1 | \ - cut -d'"' -f4) + # Check if jq is available for robust JSON parsing + if command -v jq &>/dev/null; then + # Use jq for structured JSON parsing + LATEST_RELEASE=$(curl -fsSL -H "Accept: application/vnd.github+json" "$GITHUB_API/releases" | \ + jq -r '.[] | select(.prerelease == false and .draft == false) | .tag_name' | head -1) + else + # Fallback to grep-based parsing + log "Note: jq not available, using basic parsing (consider installing jq for robustness)" + LATEST_RELEASE=$(curl -fsSL "$GITHUB_API/releases" | \ + grep -v "prerelease.*true" | \ + grep -v "draft.*true" | \ + grep '"tag_name"' | \ + head -1 | \ + cut -d'"' -f4) + fi if [ -z "$LATEST_RELEASE" ]; then error "Could not determine latest release" @@ -154,10 +185,11 @@ download_and_install() { # Construct download URL DOWNLOAD_URL="https://github.com/$GITHUB_REPO/releases/download/$LATEST_RELEASE/debros-network_${LATEST_RELEASE#v}_linux_${GITHUB_ARCH}.tar.gz" + CHECKSUM_URL="https://github.com/$GITHUB_REPO/releases/download/$LATEST_RELEASE/checksums.txt" # Create temporary directory TEMP_DIR=$(mktemp -d) - trap "rm -rf $TEMP_DIR" EXIT + push_trap "rm -rf $TEMP_DIR" EXIT # Download log "Downloading from: $DOWNLOAD_URL" @@ -166,6 +198,24 @@ download_and_install() { exit 1 fi + # Try to download and verify checksum + CHECKSUM_FILE="$TEMP_DIR/checksums.txt" + if curl -fsSL -o "$CHECKSUM_FILE" "$CHECKSUM_URL" 2>/dev/null; then + log "Verifying checksum..." + cd "$TEMP_DIR" + if command -v sha256sum &>/dev/null; then + if sha256sum -c "$CHECKSUM_FILE" --ignore-missing >/dev/null 2>&1; then + success "Checksum verified" + else + warning "Checksum verification failed (continuing anyway)" + fi + else + log "sha256sum not available, skipping checksum verification" + fi + else + log "Checksums not available for this release (continuing without verification)" + fi + # Extract log "Extracting network-cli..." cd "$TEMP_DIR" @@ -179,6 +229,51 @@ download_and_install() { success "network-cli installed successfully" } +check_existing_installation() { + if command -v network-cli &>/dev/null 2>&1; then + PREVIOUS_INSTALL=true + PREVIOUS_VERSION=$(network-cli version 2>/dev/null | head -n1 || echo "unknown") + echo -e "" + echo -e "${YELLOW}⚠️ Existing installation detected: ${PREVIOUS_VERSION}${NOCOLOR}" + echo -e "" + + # Version will be compared after fetching latest release + # If they match, we skip the service stop/restart to minimize downtime + else + log "No previous installation detected - performing fresh install" + fi +} + +compare_versions() { + # Compare previous and latest versions + if [ "$PREVIOUS_INSTALL" = true ] && [ ! -z "$PREVIOUS_VERSION" ] && [ ! -z "$LATEST_VERSION" ]; then + if [ "$PREVIOUS_VERSION" = "$LATEST_VERSION" ]; then + VERSION_CHANGED=false + log "Installed version ($PREVIOUS_VERSION) matches latest release ($LATEST_VERSION)" + log "Skipping service restart - no upgrade needed" + return 0 + else + VERSION_CHANGED=true + log "Version change detected: $PREVIOUS_VERSION → $LATEST_VERSION" + log "Services will be stopped before updating." + echo -e "" + + # Check if services are running + if sudo network-cli service status all >/dev/null 2>&1; then + log "Stopping DeBros services before upgrade..." + log "Note: Anon (if running) will not be stopped as it may be managed separately" + if sudo network-cli service stop all; then + success "DeBros services stopped successfully" + else + warning "Failed to stop some services (continuing anyway)" + fi + else + log "DeBros services already stopped or not running" + fi + fi + fi +} + verify_installation() { if command -v network-cli &>/dev/null; then INSTALLED_VERSION=$(network-cli version 2>/dev/null || echo "unknown") @@ -190,6 +285,28 @@ verify_installation() { fi } +# Check if port 9050 is in use (Anon SOCKS port) +is_anon_running() { + # Check if port 9050 is listening + if command -v ss &>/dev/null; then + if ss -tlnp 2>/dev/null | grep -q ":9050"; then + return 0 + fi + elif command -v netstat &>/dev/null; then + if netstat -tlnp 2>/dev/null | grep -q ":9050"; then + return 0 + fi + elif command -v lsof &>/dev/null; then + # Try to check without sudo first (in case of passwordless sudo issues) + if sudo -n lsof -i :9050 >/dev/null 2>&1; then + return 0 + fi + fi + + # Fallback: assume Anon is not running if we can't determine + return 1 +} + install_anon() { echo -e "" echo -e "${BLUE}========================================${NOCOLOR}" @@ -197,16 +314,28 @@ install_anon() { echo -e "${BLUE}========================================${NOCOLOR}" echo -e "" - log "Installing Anyone relay for anonymous networking..." + log "Checking Anyone relay (Anon) status..." - # Check if anon is already installed - if command -v anon &>/dev/null; then - success "Anon already installed" + # Check if Anon is already running on port 9050 + if is_anon_running; then + success "Anon is already running on port 9050" + log "Skipping Anon installation - using existing instance" configure_anon_logs configure_firewall_for_anon return 0 fi + # Check if anon binary is already installed + if command -v anon &>/dev/null; then + success "Anon binary already installed" + log "Anon is installed but not running. You can start it manually if needed." + configure_anon_logs + configure_firewall_for_anon + return 0 + fi + + log "Installing Anyone relay for anonymous networking..." + # Install via APT (official method from docs.anyone.io) log "Adding Anyone APT repository..." @@ -471,9 +600,47 @@ run_setup() { echo -e "" log "Running setup (requires sudo)..." + SETUP_EXECUTED=true sudo network-cli setup } +perform_health_check() { + echo -e "" + echo -e "${BLUE}========================================${NOCOLOR}" + log "Performing post-install health checks..." + echo -e "${BLUE}========================================${NOCOLOR}" + echo -e "" + + local health_ok=true + + # Give services a moment to start if they were just restarted + sleep 2 + + # Check gateway health + if curl -sf http://localhost:6001/health >/dev/null 2>&1; then + success "Gateway health check passed" + else + warning "Gateway health check failed - check logs with: sudo network-cli service logs gateway" + health_ok=false + fi + + # Check if node is running (may not respond immediately) + if sudo network-cli service status node >/dev/null 2>&1; then + success "Node service is running" + else + warning "Node service is not running - check with: sudo network-cli service status node" + health_ok=false + fi + + echo -e "" + if [ "$health_ok" = true ]; then + success "All health checks passed!" + else + warning "Some health checks failed - review logs and start services if needed" + fi + echo -e "" +} + show_completion() { echo -e "" echo -e "${BLUE}========================================================================${NOCOLOR}" @@ -496,6 +663,11 @@ show_completion() { echo -e " • View Anon logs: ${CYAN}sudo tail -f /home/debros/.debros/logs/anon/notices.log${NOCOLOR}" echo -e " • Proxy endpoint: ${CYAN}POST http://localhost:6001/v1/proxy/anon${NOCOLOR}" echo -e "" + echo -e "${CYAN}🔐 Shared Secrets (for adding more nodes):${NOCOLOR}" + echo -e " • Swarm key: ${CYAN}cat /home/debros/.debros/swarm.key${NOCOLOR}" + echo -e " • Cluster secret: ${CYAN}sudo cat /home/debros/.debros/cluster-secret${NOCOLOR}" + echo -e " • Copy these to bootstrap node before setting up secondary nodes${NOCOLOR}" + echo -e "" echo -e "${CYAN}Documentation: https://docs.debros.io${NOCOLOR}" echo -e "" } @@ -507,6 +679,9 @@ main() { log "Starting DeBros Network installation..." echo -e "" + # Check for existing installation and stop services if needed + check_existing_installation + detect_os check_architecture check_dependencies @@ -518,6 +693,11 @@ main() { echo -e "" get_latest_release + LATEST_VERSION="$LATEST_RELEASE" + + # Compare versions and determine if upgrade is needed + compare_versions + download_and_install # Verify installation @@ -531,6 +711,34 @@ main() { # Run setup run_setup + # If this was an upgrade and setup wasn't run, restart services + if [ "$PREVIOUS_INSTALL" = true ] && [ "$VERSION_CHANGED" = true ] && [ "$SETUP_EXECUTED" = false ]; then + echo -e "" + log "Restarting services that were stopped earlier..." + + # Check services individually and provide detailed feedback + failed_services=() + if ! sudo network-cli service start all 2>&1 | tee /tmp/service-start.log; then + # Parse which services failed + while IFS= read -r line; do + if [[ $line =~ "Failed to start" ]]; then + service_name=$(echo "$line" | grep -oP '(?<=Failed to start\s)\S+(?=:)' || echo "unknown") + failed_services+=("$service_name") + fi + done < /tmp/service-start.log + + if [ ${#failed_services[@]} -gt 0 ]; then + error "Failed to restart: ${failed_services[*]}" + error "Please check service status: sudo network-cli service status all" + fi + else + success "Services restarted successfully" + fi + fi + + # Post-install health check + perform_health_check + # Show completion message show_completion } From 0388c3a76674f526129ebf92a794a8dfbad130a6 Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Sun, 9 Nov 2025 18:28:24 +0200 Subject: [PATCH 13/57] refactor: streamline development and production command structure - Consolidated development commands into a new `dev` command group for better organization. - Introduced a `prod` command group to manage production environment operations. - Updated Makefile to simplify the development environment setup and improve logging. - Enhanced README to clarify the development process and health check requirements. - Removed deprecated configuration and service management commands to streamline the CLI interface. --- CHANGELOG.md | 16 + Makefile | 390 +-- README.md | 2 +- cmd/cli/main.go | 46 +- pkg/cli/config_commands.go | 552 ---- pkg/cli/dev_commands.go | 191 ++ pkg/cli/prod_commands.go | 313 ++ pkg/cli/rqlite_commands.go | 327 --- pkg/cli/service.go | 243 -- pkg/cli/setup.go | 2529 ----------------- pkg/environments/development/checks.go | 173 ++ pkg/environments/development/checks_test.go | 91 + pkg/environments/development/config.go | 270 ++ pkg/environments/development/health.go | 217 ++ pkg/environments/development/runner.go | 716 +++++ pkg/environments/production/checks.go | 215 ++ pkg/environments/production/config.go | 238 ++ pkg/environments/production/installers.go | 307 ++ pkg/environments/production/orchestrator.go | 436 +++ pkg/environments/production/provisioner.go | 208 ++ pkg/environments/production/services.go | 333 +++ pkg/environments/templates/bootstrap.yaml | 41 + pkg/environments/templates/gateway.yaml | 19 + pkg/environments/templates/node.yaml | 44 + pkg/environments/templates/olric.yaml | 8 + pkg/environments/templates/render.go | 191 ++ pkg/environments/templates/render_test.go | 166 ++ .../templates/systemd_gateway.service | 29 + .../templates/systemd_ipfs.service | 27 + .../templates/systemd_ipfs_cluster.service | 28 + .../templates/systemd_node.service | 27 + .../templates/systemd_olric.service | 26 + .../templates/systemd_rqlite.service | 25 + scripts/install-debros-network.sh | 682 +---- 34 files changed, 4451 insertions(+), 4675 deletions(-) delete mode 100644 pkg/cli/config_commands.go create mode 100644 pkg/cli/dev_commands.go create mode 100644 pkg/cli/prod_commands.go delete mode 100644 pkg/cli/rqlite_commands.go delete mode 100644 pkg/cli/service.go delete mode 100644 pkg/cli/setup.go create mode 100644 pkg/environments/development/checks.go create mode 100644 pkg/environments/development/checks_test.go create mode 100644 pkg/environments/development/config.go create mode 100644 pkg/environments/development/health.go create mode 100644 pkg/environments/development/runner.go create mode 100644 pkg/environments/production/checks.go create mode 100644 pkg/environments/production/config.go create mode 100644 pkg/environments/production/installers.go create mode 100644 pkg/environments/production/orchestrator.go create mode 100644 pkg/environments/production/provisioner.go create mode 100644 pkg/environments/production/services.go create mode 100644 pkg/environments/templates/bootstrap.yaml create mode 100644 pkg/environments/templates/gateway.yaml create mode 100644 pkg/environments/templates/node.yaml create mode 100644 pkg/environments/templates/olric.yaml create mode 100644 pkg/environments/templates/render.go create mode 100644 pkg/environments/templates/render_test.go create mode 100644 pkg/environments/templates/systemd_gateway.service create mode 100644 pkg/environments/templates/systemd_ipfs.service create mode 100644 pkg/environments/templates/systemd_ipfs_cluster.service create mode 100644 pkg/environments/templates/systemd_node.service create mode 100644 pkg/environments/templates/systemd_olric.service create mode 100644 pkg/environments/templates/systemd_rqlite.service diff --git a/CHANGELOG.md b/CHANGELOG.md index 8de2ddc..c56bb54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,22 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed +## [0.60.0] - 2025-11-09 + +### Added +- Introduced comprehensive `network-cli dev` commands for managing the local development environment (start, stop, status, logs). +- Added `network-cli prod` commands for streamlined production installation, upgrade, and service management on Linux systems (requires root). + +### Changed +- Refactored `Makefile` targets (`dev` and `kill`) to use the new `network-cli dev up` and `network-cli dev down` commands, significantly simplifying the development workflow. +- Removed deprecated `network-cli config`, `network-cli setup`, `network-cli service`, and `network-cli rqlite` commands, consolidating functionality under `dev` and `prod`. + +### Deprecated + +### Removed + +### Fixed +\n ## [0.59.2] - 2025-11-08 ### Added diff --git a/Makefile b/Makefile index de2e029..6ee06dc 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ test-e2e: .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill -VERSION := 0.59.2 +VERSION := 0.60.0 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' @@ -82,336 +82,14 @@ run-gateway: @echo "Generate it with: network-cli config init --type gateway" go run ./cmd/gateway -# One-command dev: Start bootstrap, node2, node3, gateway, and anon in background -# Requires: configs already exist in ~/.debros +# Development environment target +# Uses network-cli dev up to start full stack with dependency and port checking dev: build - @echo "🚀 Starting development network stack..." - @mkdir -p .dev/pids - @mkdir -p $$HOME/.debros/logs - @echo "Starting Anyone client (anon proxy)..." - @if [ "$$(uname)" = "Darwin" ]; then \ - echo " Detected macOS - using npx anyone-client"; \ - if command -v npx >/dev/null 2>&1; then \ - nohup npx anyone-client > $$HOME/.debros/logs/anon.log 2>&1 & echo $$! > .dev/pids/anon.pid; \ - echo " Anyone client started (PID: $$(cat .dev/pids/anon.pid))"; \ - else \ - echo " ⚠️ npx not found - skipping Anyone client"; \ - echo " Install with: npm install -g npm"; \ - fi; \ - elif [ "$$(uname)" = "Linux" ]; then \ - echo " Detected Linux - checking systemctl"; \ - if systemctl is-active --quiet anon 2>/dev/null; then \ - echo " ✓ Anon service already running"; \ - elif command -v systemctl >/dev/null 2>&1; then \ - echo " Starting anon service..."; \ - sudo systemctl start anon 2>/dev/null || echo " ⚠️ Failed to start anon service"; \ - else \ - echo " ⚠️ systemctl not found - skipping Anon"; \ - fi; \ - fi - @echo "Initializing IPFS and Cluster for all nodes..." - @if command -v ipfs >/dev/null 2>&1 && command -v ipfs-cluster-service >/dev/null 2>&1; then \ - CLUSTER_SECRET=$$HOME/.debros/cluster-secret; \ - if [ ! -f $$CLUSTER_SECRET ]; then \ - echo " Generating shared cluster secret..."; \ - ipfs-cluster-service --version >/dev/null 2>&1 && openssl rand -hex 32 > $$CLUSTER_SECRET || echo "0000000000000000000000000000000000000000000000000000000000000000" > $$CLUSTER_SECRET; \ - fi; \ - SECRET=$$(cat $$CLUSTER_SECRET); \ - SWARM_KEY=$$HOME/.debros/swarm.key; \ - if [ ! -f $$SWARM_KEY ]; then \ - echo " Generating private swarm key..."; \ - KEY_HEX=$$(openssl rand -hex 32 | tr '[:lower:]' '[:upper:]'); \ - printf "/key/swarm/psk/1.0.0/\n/base16/\n%s\n" "$$KEY_HEX" > $$SWARM_KEY; \ - chmod 600 $$SWARM_KEY; \ - fi; \ - echo " Setting up bootstrap node (IPFS: 5001, Cluster: 9094)..."; \ - if [ ! -d $$HOME/.debros/bootstrap/ipfs/repo ]; then \ - echo " Initializing IPFS..."; \ - mkdir -p $$HOME/.debros/bootstrap/ipfs; \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - cp $$SWARM_KEY $$HOME/.debros/bootstrap/ipfs/repo/swarm.key; \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5001"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8080"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4101","/ip6/::/tcp/4101"]' 2>&1 | grep -v "generating" || true; \ - else \ - if [ ! -f $$HOME/.debros/bootstrap/ipfs/repo/swarm.key ]; then \ - cp $$SWARM_KEY $$HOME/.debros/bootstrap/ipfs/repo/swarm.key; \ - fi; \ - fi; \ - echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \ - mkdir -p $$HOME/.debros/bootstrap/ipfs-cluster; \ - echo " Setting up node2 (IPFS: 5002, Cluster: 9104)..."; \ - if [ ! -d $$HOME/.debros/node2/ipfs/repo ]; then \ - echo " Initializing IPFS..."; \ - mkdir -p $$HOME/.debros/node2/ipfs; \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - cp $$SWARM_KEY $$HOME/.debros/node2/ipfs/repo/swarm.key; \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5002"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8081"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4102","/ip6/::/tcp/4102"]' 2>&1 | grep -v "generating" || true; \ - else \ - if [ ! -f $$HOME/.debros/node2/ipfs/repo/swarm.key ]; then \ - cp $$SWARM_KEY $$HOME/.debros/node2/ipfs/repo/swarm.key; \ - fi; \ - fi; \ - echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \ - mkdir -p $$HOME/.debros/node2/ipfs-cluster; \ - echo " Setting up node3 (IPFS: 5003, Cluster: 9114)..."; \ - if [ ! -d $$HOME/.debros/node3/ipfs/repo ]; then \ - echo " Initializing IPFS..."; \ - mkdir -p $$HOME/.debros/node3/ipfs; \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \ - cp $$SWARM_KEY $$HOME/.debros/node3/ipfs/repo/swarm.key; \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5003"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8082"]' 2>&1 | grep -v "generating" || true; \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4103","/ip6/::/tcp/4103"]' 2>&1 | grep -v "generating" || true; \ - else \ - if [ ! -f $$HOME/.debros/node3/ipfs/repo/swarm.key ]; then \ - cp $$SWARM_KEY $$HOME/.debros/node3/ipfs/repo/swarm.key; \ - fi; \ - fi; \ - echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \ - mkdir -p $$HOME/.debros/node3/ipfs-cluster; \ - echo "Starting IPFS daemons..."; \ - if [ ! -f .dev/pids/ipfs-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-bootstrap.pid) 2>/dev/null; then \ - IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-bootstrap.pid; \ - echo " Bootstrap IPFS started (PID: $$(cat .dev/pids/ipfs-bootstrap.pid), API: 5001)"; \ - sleep 3; \ - else \ - echo " ✓ Bootstrap IPFS already running"; \ - fi; \ - if [ ! -f .dev/pids/ipfs-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-node2.pid) 2>/dev/null; then \ - IPFS_PATH=$$HOME/.debros/node2/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-node2.pid; \ - echo " Node2 IPFS started (PID: $$(cat .dev/pids/ipfs-node2.pid), API: 5002)"; \ - sleep 3; \ - else \ - echo " ✓ Node2 IPFS already running"; \ - fi; \ - if [ ! -f .dev/pids/ipfs-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-node3.pid) 2>/dev/null; then \ - IPFS_PATH=$$HOME/.debros/node3/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-node3.pid; \ - echo " Node3 IPFS started (PID: $$(cat .dev/pids/ipfs-node3.pid), API: 5003)"; \ - sleep 3; \ - else \ - echo " ✓ Node3 IPFS already running"; \ - fi; \ - else \ - echo " ⚠️ ipfs or ipfs-cluster-service not found - skipping IPFS setup"; \ - echo " Install with: https://docs.ipfs.tech/install/ and https://ipfscluster.io/documentation/guides/install/"; \ - fi - @sleep 2 - @echo "Starting bootstrap node..." - @nohup ./bin/node --config bootstrap.yaml > $$HOME/.debros/logs/bootstrap.log 2>&1 & echo $$! > .dev/pids/bootstrap.pid - @sleep 3 - @echo "Starting node2..." - @nohup ./bin/node --config node2.yaml > $$HOME/.debros/logs/node2.log 2>&1 & echo $$! > .dev/pids/node2.pid - @sleep 2 - @echo "Starting node3..." - @nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid - @sleep 3 - @echo "Starting IPFS Cluster daemons (after Go nodes have configured them)..." - @if command -v ipfs-cluster-service >/dev/null 2>&1; then \ - if [ ! -f .dev/pids/ipfs-cluster-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null; then \ - if [ -f $$HOME/.debros/bootstrap/ipfs-cluster/service.json ]; then \ - env IPFS_CLUSTER_PATH=$$HOME/.debros/bootstrap/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-bootstrap.pid; \ - echo " Bootstrap Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-bootstrap.pid), API: 9094)"; \ - echo " Waiting for bootstrap cluster to be ready..."; \ - for i in $$(seq 1 30); do \ - if curl -s http://localhost:9094/peers >/dev/null 2>&1; then \ - break; \ - fi; \ - sleep 1; \ - done; \ - sleep 2; \ - else \ - echo " ⚠️ Bootstrap cluster config not ready yet"; \ - fi; \ - else \ - echo " ✓ Bootstrap Cluster already running"; \ - fi; \ - if [ ! -f .dev/pids/ipfs-cluster-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null; then \ - if [ -f $$HOME/.debros/node2/ipfs-cluster/service.json ]; then \ - env IPFS_CLUSTER_PATH=$$HOME/.debros/node2/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node2.pid; \ - echo " Node2 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node2.pid), API: 9104)"; \ - sleep 3; \ - else \ - echo " ⚠️ Node2 cluster config not ready yet"; \ - fi; \ - else \ - echo " ✓ Node2 Cluster already running"; \ - fi; \ - if [ ! -f .dev/pids/ipfs-cluster-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null; then \ - if [ -f $$HOME/.debros/node3/ipfs-cluster/service.json ]; then \ - env IPFS_CLUSTER_PATH=$$HOME/.debros/node3/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node3.pid; \ - echo " Node3 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node3.pid), API: 9114)"; \ - sleep 3; \ - else \ - echo " ⚠️ Node3 cluster config not ready yet"; \ - fi; \ - else \ - echo " ✓ Node3 Cluster already running"; \ - fi; \ - else \ - echo " ⚠️ ipfs-cluster-service not found - skipping cluster daemon startup"; \ - fi - @sleep 1 - @echo "Starting Olric cache server..." - @if command -v olric-server >/dev/null 2>&1; then \ - if [ ! -f $$HOME/.debros/olric-config.yaml ]; then \ - echo " Creating Olric config..."; \ - mkdir -p $$HOME/.debros; \ - fi; \ - if ! pgrep -f "olric-server" >/dev/null 2>&1; then \ - OLRIC_SERVER_CONFIG=$$HOME/.debros/olric-config.yaml nohup olric-server > $$HOME/.debros/logs/olric.log 2>&1 & echo $$! > .dev/pids/olric.pid; \ - echo " Olric cache server started (PID: $$(cat .dev/pids/olric.pid))"; \ - sleep 3; \ - else \ - echo " ✓ Olric cache server already running"; \ - fi; \ - else \ - echo " ⚠️ olric-server command not found - skipping Olric (cache endpoints will be disabled)"; \ - echo " Install with: go install github.com/olric-data/olric/cmd/olric-server@v0.7.0"; \ - fi - @sleep 1 - @echo "Starting gateway..." - @nohup ./bin/gateway --config gateway.yaml > $$HOME/.debros/logs/gateway.log 2>&1 & echo $$! > .dev/pids/gateway.pid - @echo "" - @echo "============================================================" - @echo "✅ Development stack started!" - @echo "============================================================" - @echo "" - @echo "Processes:" - @if [ -f .dev/pids/anon.pid ]; then \ - echo " Anon: PID=$$(cat .dev/pids/anon.pid) (SOCKS: 9050)"; \ - fi - @if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \ - echo " Bootstrap IPFS: PID=$$(cat .dev/pids/ipfs-bootstrap.pid) (API: 5001)"; \ - fi - @if [ -f .dev/pids/ipfs-node2.pid ]; then \ - echo " Node2 IPFS: PID=$$(cat .dev/pids/ipfs-node2.pid) (API: 5002)"; \ - fi - @if [ -f .dev/pids/ipfs-node3.pid ]; then \ - echo " Node3 IPFS: PID=$$(cat .dev/pids/ipfs-node3.pid) (API: 5003)"; \ - fi - @if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \ - echo " Bootstrap Cluster: PID=$$(cat .dev/pids/ipfs-cluster-bootstrap.pid) (API: 9094)"; \ - fi - @if [ -f .dev/pids/ipfs-cluster-node2.pid ]; then \ - echo " Node2 Cluster: PID=$$(cat .dev/pids/ipfs-cluster-node2.pid) (API: 9104)"; \ - fi - @if [ -f .dev/pids/ipfs-cluster-node3.pid ]; then \ - echo " Node3 Cluster: PID=$$(cat .dev/pids/ipfs-cluster-node3.pid) (API: 9114)"; \ - fi - @if [ -f .dev/pids/olric.pid ]; then \ - echo " Olric: PID=$$(cat .dev/pids/olric.pid) (API: 3320)"; \ - fi - @echo " Bootstrap: PID=$$(cat .dev/pids/bootstrap.pid)" - @echo " Node2: PID=$$(cat .dev/pids/node2.pid)" - @echo " Node3: PID=$$(cat .dev/pids/node3.pid)" - @echo " Gateway: PID=$$(cat .dev/pids/gateway.pid)" - @echo "" - @echo "Ports:" - @echo " Anon SOCKS: 9050 (proxy endpoint: POST /v1/proxy/anon)" - @if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \ - echo " Bootstrap IPFS API: 5001"; \ - echo " Node2 IPFS API: 5002"; \ - echo " Node3 IPFS API: 5003"; \ - echo " Bootstrap Cluster: 9094 (pin management)"; \ - echo " Node2 Cluster: 9104 (pin management)"; \ - echo " Node3 Cluster: 9114 (pin management)"; \ - fi - @if [ -f .dev/pids/olric.pid ]; then \ - echo " Olric: 3320 (cache API)"; \ - fi - @echo " Bootstrap P2P: 4001, HTTP: 5001, Raft: 7001" - @echo " Node2 P2P: 4002, HTTP: 5002, Raft: 7002" - @echo " Node3 P2P: 4003, HTTP: 5003, Raft: 7003" - @echo " Gateway: 6001" - @echo "" - @echo "Press Ctrl+C to stop all processes" - @echo "============================================================" - @echo "" - @LOGS="$$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log"; \ - if [ -f .dev/pids/anon.pid ]; then \ - LOGS="$$LOGS $$HOME/.debros/logs/anon.log"; \ - fi; \ - if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \ - LOGS="$$LOGS $$HOME/.debros/logs/ipfs-bootstrap.log $$HOME/.debros/logs/ipfs-node2.log $$HOME/.debros/logs/ipfs-node3.log"; \ - fi; \ - if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \ - LOGS="$$LOGS $$HOME/.debros/logs/ipfs-cluster-bootstrap.log $$HOME/.debros/logs/ipfs-cluster-node2.log $$HOME/.debros/logs/ipfs-cluster-node3.log"; \ - fi; \ - if [ -f .dev/pids/olric.pid ]; then \ - LOGS="$$LOGS $$HOME/.debros/logs/olric.log"; \ - fi; \ - trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \ - tail -f $$LOGS + @./bin/network-cli dev up -# Kill all processes +# Kill all processes using network-cli dev down kill: - @echo "🛑 Stopping all DeBros network services..." - @echo "" - @echo "Stopping DeBros nodes and gateway..." - @if [ -f .dev/pids/gateway.pid ]; then \ - kill -TERM $$(cat .dev/pids/gateway.pid) 2>/dev/null && echo " ✓ Gateway stopped" || echo " ✗ Gateway not running"; \ - rm -f .dev/pids/gateway.pid; \ - fi - @if [ -f .dev/pids/bootstrap.pid ]; then \ - kill -TERM $$(cat .dev/pids/bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap node stopped" || echo " ✗ Bootstrap not running"; \ - rm -f .dev/pids/bootstrap.pid; \ - fi - @if [ -f .dev/pids/node2.pid ]; then \ - kill -TERM $$(cat .dev/pids/node2.pid) 2>/dev/null && echo " ✓ Node2 stopped" || echo " ✗ Node2 not running"; \ - rm -f .dev/pids/node2.pid; \ - fi - @if [ -f .dev/pids/node3.pid ]; then \ - kill -TERM $$(cat .dev/pids/node3.pid) 2>/dev/null && echo " ✓ Node3 stopped" || echo " ✗ Node3 not running"; \ - rm -f .dev/pids/node3.pid; \ - fi - @echo "" - @echo "Stopping IPFS Cluster peers..." - @if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \ - kill -TERM $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap Cluster stopped" || echo " ✗ Bootstrap Cluster not running"; \ - rm -f .dev/pids/ipfs-cluster-bootstrap.pid; \ - fi - @if [ -f .dev/pids/ipfs-cluster-node2.pid ]; then \ - kill -TERM $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null && echo " ✓ Node2 Cluster stopped" || echo " ✗ Node2 Cluster not running"; \ - rm -f .dev/pids/ipfs-cluster-node2.pid; \ - fi - @if [ -f .dev/pids/ipfs-cluster-node3.pid ]; then \ - kill -TERM $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null && echo " ✓ Node3 Cluster stopped" || echo " ✗ Node3 Cluster not running"; \ - rm -f .dev/pids/ipfs-cluster-node3.pid; \ - fi - @echo "" - @echo "Stopping IPFS daemons..." - @if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \ - kill -TERM $$(cat .dev/pids/ipfs-bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap IPFS stopped" || echo " ✗ Bootstrap IPFS not running"; \ - rm -f .dev/pids/ipfs-bootstrap.pid; \ - fi - @if [ -f .dev/pids/ipfs-node2.pid ]; then \ - kill -TERM $$(cat .dev/pids/ipfs-node2.pid) 2>/dev/null && echo " ✓ Node2 IPFS stopped" || echo " ✗ Node2 IPFS not running"; \ - rm -f .dev/pids/ipfs-node2.pid; \ - fi - @if [ -f .dev/pids/ipfs-node3.pid ]; then \ - kill -TERM $$(cat .dev/pids/ipfs-node3.pid) 2>/dev/null && echo " ✓ Node3 IPFS stopped" || echo " ✗ Node3 IPFS not running"; \ - rm -f .dev/pids/ipfs-node3.pid; \ - fi - @echo "" - @echo "Stopping Olric cache..." - @if [ -f .dev/pids/olric.pid ]; then \ - kill -TERM $$(cat .dev/pids/olric.pid) 2>/dev/null && echo " ✓ Olric stopped" || echo " ✗ Olric not running"; \ - rm -f .dev/pids/olric.pid; \ - fi - @echo "" - @echo "Stopping Anon proxy..." - @if [ -f .dev/pids/anyone.pid ]; then \ - kill -TERM $$(cat .dev/pids/anyone.pid) 2>/dev/null && echo " ✓ Anon proxy stopped" || echo " ✗ Anon proxy not running"; \ - rm -f .dev/pids/anyone.pid; \ - fi - @echo "" - @echo "Cleaning up any remaining processes on ports..." - @lsof -ti:7001,7002,7003,5001,5002,5003,6001,4001,4002,4003,9050,3320,3322,9094,9095,9096,9097,9104,9105,9106,9107,9114,9115,9116,9117,8080,8081,8082 2>/dev/null | xargs kill -9 2>/dev/null && echo " ✓ Cleaned up remaining port bindings" || echo " ✓ No lingering processes found" - @echo "" - @echo "✅ All services stopped!" + @./bin/network-cli dev down # Help help: @@ -420,42 +98,25 @@ help: @echo " clean - Clean build artifacts" @echo " test - Run tests" @echo "" - @echo "Development:" - @echo " dev - Start full dev stack (bootstrap + 2 nodes + gateway)" - @echo " Requires: configs in ~/.debros (run 'network-cli config init' first)" + @echo "Local Development (Recommended):" + @echo " make dev - Start full development stack with one command" + @echo " - Checks dependencies and available ports" + @echo " - Generates configs (bootstrap + node2 + node3 + gateway)" + @echo " - Starts IPFS, RQLite, Olric, nodes, and gateway" + @echo " - Validates cluster health (IPFS peers, RQLite, LibP2P)" + @echo " - Stops all services if health checks fail" + @echo " - Includes comprehensive logging" + @echo " make kill - Stop all development services" @echo "" - @echo "Configuration (NEW):" - @echo " First, generate config files in ~/.debros with:" - @echo " make build # Build CLI first" - @echo " ./bin/network-cli config init # Generate full stack" + @echo "Development Management (via network-cli):" + @echo " ./bin/network-cli dev status - Show status of all dev services" + @echo " ./bin/network-cli dev logs [--follow]" @echo "" - @echo "Network Targets (requires config files in ~/.debros):" - @echo " run-node - Start bootstrap node" - @echo " run-node2 - Start second node" - @echo " run-node3 - Start third node" - @echo " run-gateway - Start HTTP gateway" - @echo " run-example - Run usage example" - @echo "" - @echo "Running Multiple Nodes:" - @echo " Nodes use --config flag to select which YAML file in ~/.debros to load:" - @echo " go run ./cmd/node --config bootstrap.yaml" - @echo " go run ./cmd/node --config node.yaml" - @echo " go run ./cmd/node --config node2.yaml" - @echo " Generate configs with: ./bin/network-cli config init --name " - @echo "" - @echo "CLI Commands:" - @echo " run-cli - Run network CLI help" - @echo " cli-health - Check network health" - @echo " cli-peers - List network peers" - @echo " cli-status - Get network status" - @echo " cli-storage-test - Test storage operations" - @echo " cli-pubsub-test - Test pub/sub operations" - @echo "" - @echo "Development:" - @echo " test-multinode - Full multi-node test with 1 bootstrap + 2 nodes" - @echo " test-peer-discovery - Test peer discovery (requires running nodes)" - @echo " test-replication - Test data replication (requires running nodes)" - @echo " test-consensus - Test database consensus (requires running nodes)" + @echo "Individual Node Targets (advanced):" + @echo " run-node - Start bootstrap node directly" + @echo " run-node2 - Start second node directly" + @echo " run-node3 - Start third node directly" + @echo " run-gateway - Start HTTP gateway directly" @echo "" @echo "Maintenance:" @echo " deps - Download dependencies" @@ -463,9 +124,4 @@ help: @echo " fmt - Format code" @echo " vet - Vet code" @echo " lint - Lint code (fmt + vet)" - @echo " clear-ports - Clear common dev ports" - @echo " kill - Stop all running services (nodes, IPFS, cluster, gateway, olric)" - @echo " dev-setup - Setup development environment" - @echo " dev-cluster - Show cluster startup commands" - @echo " dev - Full development workflow" @echo " help - Show this help" diff --git a/README.md b/README.md index dd2e561..451141e 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ DeBros Network is a decentralized peer-to-peer data platform built in Go. It com make dev ``` - This starts three nodes and the HTTP gateway. Stop with `Ctrl+C`. + This starts three nodes and the HTTP gateway. **The command will not complete successfully until all services pass health checks** (IPFS peer connectivity, RQLite cluster formation, and LibP2P connectivity). If health checks fail, all services are stopped automatically. Stop with `Ctrl+C`. 4. Validate the network from another terminal: diff --git a/cmd/cli/main.go b/cmd/cli/main.go index e396013..c9db844 100644 --- a/cmd/cli/main.go +++ b/cmd/cli/main.go @@ -64,20 +64,18 @@ func main() { os.Exit(1) } - // Setup and service commands - case "setup": - cli.HandleSetupCommand(args) - case "service": - cli.HandleServiceCommand(args) + // Development environment commands + case "dev": + cli.HandleDevCommand(args) + + // Production environment commands + case "prod": + cli.HandleProdCommand(args) // Authentication commands case "auth": cli.HandleAuthCommand(args) - // Config commands - case "config": - cli.HandleConfigCommand(args) - // Basic network commands case "health": cli.HandleHealthCommand(format, timeout) @@ -108,10 +106,6 @@ func main() { } cli.HandleConnectCommand(args[0], timeout) - // RQLite commands - case "rqlite": - cli.HandleRQLiteCommand(args) - // Help case "help", "--help", "-h": showHelp() @@ -151,13 +145,18 @@ func showHelp() { fmt.Printf(" devnet enable - Shorthand for switching to devnet\n") fmt.Printf(" testnet enable - Shorthand for switching to testnet\n\n") - fmt.Printf("🚀 Setup & Services:\n") - fmt.Printf(" setup [--force] - Interactive VPS setup (Linux only, requires root)\n") - fmt.Printf(" service start - Start service (node, gateway, all)\n") - fmt.Printf(" service stop - Stop service\n") - fmt.Printf(" service restart - Restart service\n") - fmt.Printf(" service status [target] - Show service status\n") - fmt.Printf(" service logs [opts] - View service logs (--follow, --since=1h)\n\n") + fmt.Printf("💻 Local Development:\n") + fmt.Printf(" dev up - Start full local dev environment\n") + fmt.Printf(" dev down - Stop all dev services\n") + fmt.Printf(" dev status - Show status of dev services\n") + fmt.Printf(" dev logs - View dev component logs\n\n") + + fmt.Printf("🚀 Production Deployment:\n") + fmt.Printf(" prod install [--bootstrap] - Full production bootstrap (requires root)\n") + fmt.Printf(" prod upgrade - Upgrade existing installation\n") + fmt.Printf(" prod status - Show production service status\n") + fmt.Printf(" prod logs - View production service logs\n") + fmt.Printf(" prod uninstall - Remove production services (preserves data)\n\n") fmt.Printf("🔐 Authentication:\n") fmt.Printf(" auth login - Authenticate with wallet\n") @@ -165,10 +164,6 @@ func showHelp() { fmt.Printf(" auth whoami - Show current authentication\n") fmt.Printf(" auth status - Show detailed auth info\n\n") - fmt.Printf("⚙️ Configuration:\n") - fmt.Printf(" config init [--type ] - Generate configs (full stack or single)\n") - fmt.Printf(" config validate --name - Validate config file\n\n") - fmt.Printf("🌐 Network Commands:\n") fmt.Printf(" health - Check network health\n") fmt.Printf(" peers - List connected peers\n") @@ -179,9 +174,6 @@ func showHelp() { fmt.Printf("🗄️ Database:\n") fmt.Printf(" query 🔐 Execute database query\n\n") - fmt.Printf("🔧 RQLite:\n") - fmt.Printf(" rqlite fix 🔧 Fix misconfigured join address and clean raft state\n\n") - fmt.Printf("📡 PubSub:\n") fmt.Printf(" pubsub publish 🔐 Publish message\n") fmt.Printf(" pubsub subscribe 🔐 Subscribe to topic\n") diff --git a/pkg/cli/config_commands.go b/pkg/cli/config_commands.go deleted file mode 100644 index 6f5ea4b..0000000 --- a/pkg/cli/config_commands.go +++ /dev/null @@ -1,552 +0,0 @@ -package cli - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/DeBrosOfficial/network/pkg/config" - "github.com/DeBrosOfficial/network/pkg/encryption" -) - -// HandleConfigCommand handles config management commands -func HandleConfigCommand(args []string) { - if len(args) == 0 { - showConfigHelp() - return - } - - subcommand := args[0] - subargs := args[1:] - - switch subcommand { - case "init": - handleConfigInit(subargs) - case "validate": - handleConfigValidate(subargs) - case "help": - showConfigHelp() - default: - fmt.Fprintf(os.Stderr, "Unknown config subcommand: %s\n", subcommand) - showConfigHelp() - os.Exit(1) - } -} - -func showConfigHelp() { - fmt.Printf("Config Management Commands\n\n") - fmt.Printf("Usage: network-cli config [options]\n\n") - fmt.Printf("Subcommands:\n") - fmt.Printf(" init - Generate full network stack in ~/.debros (bootstrap + 2 nodes + gateway)\n") - fmt.Printf(" validate --name - Validate a config file\n\n") - fmt.Printf("Init Default Behavior (no --type):\n") - fmt.Printf(" Generates bootstrap.yaml, node2.yaml, node3.yaml, gateway.yaml with:\n") - fmt.Printf(" - Auto-generated identities for bootstrap, node2, node3\n") - fmt.Printf(" - Correct bootstrap_peers and join addresses\n") - fmt.Printf(" - Default ports: P2P 4001-4003, HTTP 5001-5003, Raft 7001-7003\n\n") - fmt.Printf("Init Options:\n") - fmt.Printf(" --type - Single config type: node, bootstrap, gateway (skips stack generation)\n") - fmt.Printf(" --name - Output filename (default: depends on --type or 'stack' for full stack)\n") - fmt.Printf(" --force - Overwrite existing config/stack files\n\n") - fmt.Printf("Single Config Options (with --type):\n") - fmt.Printf(" --id - Node ID for bootstrap peers\n") - fmt.Printf(" --listen-port - LibP2P listen port (default: 4001)\n") - fmt.Printf(" --rqlite-http-port - RQLite HTTP port (default: 5001)\n") - fmt.Printf(" --rqlite-raft-port - RQLite Raft port (default: 7001)\n") - fmt.Printf(" --join - RQLite address to join (required for non-bootstrap)\n") - fmt.Printf(" --bootstrap-peers - Comma-separated bootstrap peer multiaddrs\n\n") - fmt.Printf("Examples:\n") - fmt.Printf(" network-cli config init # Generate full stack\n") - fmt.Printf(" network-cli config init --force # Overwrite existing stack\n") - fmt.Printf(" network-cli config init --type bootstrap # Single bootstrap config (legacy)\n") - fmt.Printf(" network-cli config validate --name node.yaml\n") -} - -func handleConfigInit(args []string) { - // Parse flags - var ( - cfgType = "" - name = "" // Will be set based on type if not provided - id string - listenPort = 4001 - rqliteHTTPPort = 5001 - rqliteRaftPort = 7001 - joinAddr string - bootstrapPeers string - force bool - ) - - for i := 0; i < len(args); i++ { - switch args[i] { - case "--type": - if i+1 < len(args) { - cfgType = args[i+1] - i++ - } - case "--name": - if i+1 < len(args) { - name = args[i+1] - i++ - } - case "--id": - if i+1 < len(args) { - id = args[i+1] - i++ - } - case "--listen-port": - if i+1 < len(args) { - if p, err := strconv.Atoi(args[i+1]); err == nil { - listenPort = p - } - i++ - } - case "--rqlite-http-port": - if i+1 < len(args) { - if p, err := strconv.Atoi(args[i+1]); err == nil { - rqliteHTTPPort = p - } - i++ - } - case "--rqlite-raft-port": - if i+1 < len(args) { - if p, err := strconv.Atoi(args[i+1]); err == nil { - rqliteRaftPort = p - } - i++ - } - case "--join": - if i+1 < len(args) { - joinAddr = args[i+1] - i++ - } - case "--bootstrap-peers": - if i+1 < len(args) { - bootstrapPeers = args[i+1] - i++ - } - case "--force": - force = true - } - } - - // If --type is not specified, generate full stack - if cfgType == "" { - initFullStack(force) - return - } - - // Otherwise, continue with single-file generation - // Validate type - if cfgType != "node" && cfgType != "bootstrap" && cfgType != "gateway" { - fmt.Fprintf(os.Stderr, "Invalid --type: %s (expected: node, bootstrap, or gateway)\n", cfgType) - os.Exit(1) - } - - // Set default name based on type if not provided - if name == "" { - switch cfgType { - case "bootstrap": - name = "bootstrap.yaml" - case "gateway": - name = "gateway.yaml" - default: - name = "node.yaml" - } - } - - // Ensure config directory exists - configDir, err := config.EnsureConfigDir() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to ensure config directory: %v\n", err) - os.Exit(1) - } - - configPath := filepath.Join(configDir, name) - - // Check if file exists - if !force { - if _, err := os.Stat(configPath); err == nil { - fmt.Fprintf(os.Stderr, "Config file already exists at %s (use --force to overwrite)\n", configPath) - os.Exit(1) - } - } - - // Generate config based on type - var configContent string - switch cfgType { - case "node": - configContent = GenerateNodeConfig(name, id, listenPort, rqliteHTTPPort, rqliteRaftPort, joinAddr, bootstrapPeers) - case "bootstrap": - configContent = GenerateBootstrapConfig(name, id, listenPort, rqliteHTTPPort, rqliteRaftPort) - case "gateway": - configContent = GenerateGatewayConfig(bootstrapPeers) - } - - // Write config file - if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write config file: %v\n", err) - os.Exit(1) - } - - fmt.Printf("✅ Configuration file created: %s\n", configPath) - fmt.Printf(" Type: %s\n", cfgType) - fmt.Printf("\nYou can now start the %s using the generated config.\n", cfgType) -} - -func handleConfigValidate(args []string) { - var name string - for i := 0; i < len(args); i++ { - if args[i] == "--name" && i+1 < len(args) { - name = args[i+1] - i++ - } - } - - if name == "" { - fmt.Fprintf(os.Stderr, "Missing --name flag\n") - showConfigHelp() - os.Exit(1) - } - - configDir, err := config.ConfigDir() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get config directory: %v\n", err) - os.Exit(1) - } - - configPath := filepath.Join(configDir, name) - file, err := os.Open(configPath) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to open config file: %v\n", err) - os.Exit(1) - } - defer file.Close() - - var cfg config.Config - if err := config.DecodeStrict(file, &cfg); err != nil { - fmt.Fprintf(os.Stderr, "Failed to parse config: %v\n", err) - os.Exit(1) - } - - // Run validation - errs := cfg.Validate() - if len(errs) > 0 { - fmt.Fprintf(os.Stderr, "\n❌ Configuration errors (%d):\n", len(errs)) - for _, err := range errs { - fmt.Fprintf(os.Stderr, " - %s\n", err) - } - os.Exit(1) - } - - fmt.Printf("✅ Config is valid: %s\n", configPath) -} - -func initFullStack(force bool) { - fmt.Printf("🚀 Initializing full network stack...\n") - - // Ensure ~/.debros directory exists - homeDir, err := os.UserHomeDir() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get home directory: %v\n", err) - os.Exit(1) - } - debrosDir := filepath.Join(homeDir, ".debros") - if err := os.MkdirAll(debrosDir, 0755); err != nil { - fmt.Fprintf(os.Stderr, "Failed to create ~/.debros directory: %v\n", err) - os.Exit(1) - } - - // Step 1: Generate bootstrap identity - bootstrapIdentityDir := filepath.Join(debrosDir, "bootstrap") - bootstrapIdentityPath := filepath.Join(bootstrapIdentityDir, "identity.key") - - if !force { - if _, err := os.Stat(bootstrapIdentityPath); err == nil { - fmt.Fprintf(os.Stderr, "Bootstrap identity already exists at %s (use --force to overwrite)\n", bootstrapIdentityPath) - os.Exit(1) - } - } - - bootstrapInfo, err := encryption.GenerateIdentity() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate bootstrap identity: %v\n", err) - os.Exit(1) - } - if err := os.MkdirAll(bootstrapIdentityDir, 0755); err != nil { - fmt.Fprintf(os.Stderr, "Failed to create bootstrap data directory: %v\n", err) - os.Exit(1) - } - if err := encryption.SaveIdentity(bootstrapInfo, bootstrapIdentityPath); err != nil { - fmt.Fprintf(os.Stderr, "Failed to save bootstrap identity: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated bootstrap identity: %s (Peer ID: %s)\n", bootstrapIdentityPath, bootstrapInfo.PeerID.String()) - - // Construct bootstrap multiaddr - bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String()) - fmt.Printf(" Bootstrap multiaddr: %s\n", bootstrapMultiaddr) - - // Generate configs for all nodes... - // (rest of the implementation - similar to what was in main.go) - // I'll keep it similar to the original for consistency - - // Step 2: Generate bootstrap.yaml - bootstrapName := "bootstrap.yaml" - bootstrapPath := filepath.Join(debrosDir, bootstrapName) - if !force { - if _, err := os.Stat(bootstrapPath); err == nil { - fmt.Fprintf(os.Stderr, "Bootstrap config already exists at %s (use --force to overwrite)\n", bootstrapPath) - os.Exit(1) - } - } - bootstrapContent := GenerateBootstrapConfig(bootstrapName, "", 4001, 5001, 7001) - if err := os.WriteFile(bootstrapPath, []byte(bootstrapContent), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write bootstrap config: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated bootstrap config: %s\n", bootstrapPath) - - // Step 3: Generate node2.yaml - node2Name := "node2.yaml" - node2Path := filepath.Join(debrosDir, node2Name) - if !force { - if _, err := os.Stat(node2Path); err == nil { - fmt.Fprintf(os.Stderr, "Node2 config already exists at %s (use --force to overwrite)\n", node2Path) - os.Exit(1) - } - } - node2Content := GenerateNodeConfig(node2Name, "", 4002, 5002, 7002, "localhost:5001", bootstrapMultiaddr) - if err := os.WriteFile(node2Path, []byte(node2Content), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write node2 config: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated node2 config: %s\n", node2Path) - - // Step 4: Generate node3.yaml - node3Name := "node3.yaml" - node3Path := filepath.Join(debrosDir, node3Name) - if !force { - if _, err := os.Stat(node3Path); err == nil { - fmt.Fprintf(os.Stderr, "Node3 config already exists at %s (use --force to overwrite)\n", node3Path) - os.Exit(1) - } - } - node3Content := GenerateNodeConfig(node3Name, "", 4003, 5003, 7003, "localhost:5001", bootstrapMultiaddr) - if err := os.WriteFile(node3Path, []byte(node3Content), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write node3 config: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated node3 config: %s\n", node3Path) - - // Step 5: Generate gateway.yaml - gatewayName := "gateway.yaml" - gatewayPath := filepath.Join(debrosDir, gatewayName) - if !force { - if _, err := os.Stat(gatewayPath); err == nil { - fmt.Fprintf(os.Stderr, "Gateway config already exists at %s (use --force to overwrite)\n", gatewayPath) - os.Exit(1) - } - } - gatewayContent := GenerateGatewayConfig(bootstrapMultiaddr) - if err := os.WriteFile(gatewayPath, []byte(gatewayContent), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write gateway config: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated gateway config: %s\n", gatewayPath) - - fmt.Printf("\n" + strings.Repeat("=", 60) + "\n") - fmt.Printf("✅ Full network stack initialized successfully!\n") - fmt.Printf(strings.Repeat("=", 60) + "\n") - fmt.Printf("\nBootstrap Peer ID: %s\n", bootstrapInfo.PeerID.String()) - fmt.Printf("Bootstrap Multiaddr: %s\n", bootstrapMultiaddr) - fmt.Printf("\nGenerated configs:\n") - fmt.Printf(" - %s\n", bootstrapPath) - fmt.Printf(" - %s\n", node2Path) - fmt.Printf(" - %s\n", node3Path) - fmt.Printf(" - %s\n", gatewayPath) - fmt.Printf("\nStart the network with: make dev\n") -} - -// GenerateNodeConfig generates a node configuration -func GenerateNodeConfig(name, id string, listenPort, rqliteHTTPPort, rqliteRaftPort int, joinAddr, bootstrapPeers string) string { - nodeID := id - if nodeID == "" { - nodeID = fmt.Sprintf("node-%d", time.Now().Unix()) - } - - // Parse bootstrap peers - var peers []string - if bootstrapPeers != "" { - for _, p := range strings.Split(bootstrapPeers, ",") { - if p = strings.TrimSpace(p); p != "" { - peers = append(peers, p) - } - } - } - - // Construct data_dir from name stem (remove .yaml) - dataDir := strings.TrimSuffix(name, ".yaml") - dataDir = filepath.Join(os.ExpandEnv("~"), ".debros", dataDir) - - var peersYAML strings.Builder - if len(peers) == 0 { - peersYAML.WriteString(" bootstrap_peers: []") - } else { - peersYAML.WriteString(" bootstrap_peers:\n") - for _, p := range peers { - fmt.Fprintf(&peersYAML, " - \"%s\"\n", p) - } - } - - if joinAddr == "" { - joinAddr = "localhost:5001" - } - - // Calculate IPFS cluster API port (9094 for bootstrap, 9104+ for nodes) - // Pattern: Bootstrap (5001) -> 9094, Node2 (5002) -> 9104, Node3 (5003) -> 9114 - clusterAPIPort := 9094 + (rqliteHTTPPort-5001)*10 - - return fmt.Sprintf(`node: - id: "%s" - type: "node" - listen_addresses: - - "/ip4/0.0.0.0/tcp/%d" - data_dir: "%s" - max_connections: 50 - -database: - data_dir: "%s/rqlite" - replication_factor: 3 - shard_count: 16 - max_database_size: 1073741824 - backup_interval: "24h" - rqlite_port: %d - rqlite_raft_port: %d - rqlite_join_address: "%s" - cluster_sync_interval: "30s" - peer_inactivity_limit: "24h" - min_cluster_size: 1 - ipfs: - # IPFS Cluster API endpoint for pin management (leave empty to disable) - cluster_api_url: "http://localhost:%d" - # IPFS HTTP API endpoint for content retrieval - api_url: "http://localhost:%d" - # Timeout for IPFS operations - timeout: "60s" - # Replication factor for pinned content - replication_factor: 3 - # Enable client-side encryption before upload - enable_encryption: true - -discovery: -%s - discovery_interval: "15s" - bootstrap_port: %d - http_adv_address: "localhost:%d" - raft_adv_address: "localhost:%d" - node_namespace: "default" - -security: - enable_tls: false - -logging: - level: "info" - format: "console" -`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, joinAddr, clusterAPIPort, rqliteHTTPPort, peersYAML.String(), 4001, rqliteHTTPPort, rqliteRaftPort) -} - -// GenerateBootstrapConfig generates a bootstrap configuration -func GenerateBootstrapConfig(name, id string, listenPort, rqliteHTTPPort, rqliteRaftPort int) string { - nodeID := id - if nodeID == "" { - nodeID = "bootstrap" - } - - dataDir := filepath.Join(os.ExpandEnv("~"), ".debros", "bootstrap") - - return fmt.Sprintf(`node: - id: "%s" - type: "bootstrap" - listen_addresses: - - "/ip4/0.0.0.0/tcp/%d" - data_dir: "%s" - max_connections: 50 - -database: - data_dir: "%s/rqlite" - replication_factor: 3 - shard_count: 16 - max_database_size: 1073741824 - backup_interval: "24h" - rqlite_port: %d - rqlite_raft_port: %d - rqlite_join_address: "" - cluster_sync_interval: "30s" - peer_inactivity_limit: "24h" - min_cluster_size: 1 - ipfs: - # IPFS Cluster API endpoint for pin management (leave empty to disable) - cluster_api_url: "http://localhost:9094" - # IPFS HTTP API endpoint for content retrieval - api_url: "http://localhost:%d" - # Timeout for IPFS operations - timeout: "60s" - # Replication factor for pinned content - replication_factor: 3 - # Enable client-side encryption before upload - enable_encryption: true - -discovery: - bootstrap_peers: [] - discovery_interval: "15s" - bootstrap_port: %d - http_adv_address: "localhost:%d" - raft_adv_address: "localhost:%d" - node_namespace: "default" - -security: - enable_tls: false - -logging: - level: "info" - format: "console" -`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, rqliteHTTPPort, 4001, rqliteHTTPPort, rqliteRaftPort) -} - -// GenerateGatewayConfig generates a gateway configuration -func GenerateGatewayConfig(bootstrapPeers string) string { - var peers []string - if bootstrapPeers != "" { - for _, p := range strings.Split(bootstrapPeers, ",") { - if p = strings.TrimSpace(p); p != "" { - peers = append(peers, p) - } - } - } - - var peersYAML strings.Builder - if len(peers) == 0 { - peersYAML.WriteString("bootstrap_peers: []") - } else { - peersYAML.WriteString("bootstrap_peers:\n") - for _, p := range peers { - fmt.Fprintf(&peersYAML, " - \"%s\"\n", p) - } - } - - return fmt.Sprintf(`listen_addr: ":6001" -client_namespace: "default" -rqlite_dsn: "" -%s -olric_servers: - - "127.0.0.1:3320" -olric_timeout: "10s" -ipfs_cluster_api_url: "http://localhost:9094" -ipfs_api_url: "http://localhost:5001" -ipfs_timeout: "60s" -ipfs_replication_factor: 3 -`, peersYAML.String()) -} diff --git a/pkg/cli/dev_commands.go b/pkg/cli/dev_commands.go new file mode 100644 index 0000000..8d087b3 --- /dev/null +++ b/pkg/cli/dev_commands.go @@ -0,0 +1,191 @@ +package cli + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/DeBrosOfficial/network/pkg/environments/development" +) + +// HandleDevCommand handles the dev command group +func HandleDevCommand(args []string) { + if len(args) == 0 { + showDevHelp() + return + } + + subcommand := args[0] + subargs := args[1:] + + switch subcommand { + case "up": + handleDevUp(subargs) + case "down": + handleDevDown(subargs) + case "status": + handleDevStatus(subargs) + case "logs": + handleDevLogs(subargs) + case "help": + showDevHelp() + default: + fmt.Fprintf(os.Stderr, "Unknown dev subcommand: %s\n", subcommand) + showDevHelp() + os.Exit(1) + } +} + +func showDevHelp() { + fmt.Printf("🚀 Development Environment Commands\n\n") + fmt.Printf("Usage: network-cli dev [options]\n\n") + fmt.Printf("Subcommands:\n") + fmt.Printf(" up - Start development environment (bootstrap + 2 nodes + gateway)\n") + fmt.Printf(" down - Stop all development services\n") + fmt.Printf(" status - Show status of running services\n") + fmt.Printf(" logs - Tail logs for a component\n") + fmt.Printf(" help - Show this help\n\n") + fmt.Printf("Examples:\n") + fmt.Printf(" network-cli dev up\n") + fmt.Printf(" network-cli dev down\n") + fmt.Printf(" network-cli dev status\n") + fmt.Printf(" network-cli dev logs bootstrap --follow\n") +} + +func handleDevUp(args []string) { + ctx := context.Background() + + // Get home directory and .debros path + homeDir, err := os.UserHomeDir() + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err) + os.Exit(1) + } + debrosDir := filepath.Join(homeDir, ".debros") + + // Step 1: Check dependencies + fmt.Printf("📋 Checking dependencies...\n\n") + checker := development.NewDependencyChecker() + if _, err := checker.CheckAll(); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } + fmt.Printf("✓ All required dependencies available\n\n") + + // Step 2: Check ports + fmt.Printf("🔌 Checking port availability...\n\n") + portChecker := development.NewPortChecker() + if _, err := portChecker.CheckAll(); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n\n", err) + fmt.Fprintf(os.Stderr, "Port mapping:\n") + for port, service := range development.PortMap() { + fmt.Fprintf(os.Stderr, " %d - %s\n", port, service) + } + fmt.Fprintf(os.Stderr, "\n") + os.Exit(1) + } + fmt.Printf("✓ All required ports available\n\n") + + // Step 3: Ensure configs + fmt.Printf("⚙️ Preparing configuration files...\n\n") + ensurer := development.NewConfigEnsurer(debrosDir) + if err := ensurer.EnsureAll(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to prepare configs: %v\n", err) + os.Exit(1) + } + fmt.Printf("\n") + + // Step 4: Start services + pm := development.NewProcessManager(debrosDir, os.Stdout) + if err := pm.StartAll(ctx); err != nil { + fmt.Fprintf(os.Stderr, "❌ Error starting services: %v\n", err) + os.Exit(1) + } + + // Step 5: Show summary + fmt.Printf("🎉 Development environment is running!\n\n") + fmt.Printf("Key endpoints:\n") + fmt.Printf(" Gateway: http://localhost:6001\n") + fmt.Printf(" Bootstrap IPFS: http://localhost:4501\n") + fmt.Printf(" Node2 IPFS: http://localhost:4502\n") + fmt.Printf(" Node3 IPFS: http://localhost:4503\n") + fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n") + fmt.Printf(" Olric Cache: http://localhost:3320\n\n") + fmt.Printf("Useful commands:\n") + fmt.Printf(" network-cli dev status - Show status\n") + fmt.Printf(" network-cli dev logs bootstrap - Bootstrap logs\n") + fmt.Printf(" network-cli dev down - Stop all services\n\n") + fmt.Printf("Logs directory: %s/logs\n\n", debrosDir) +} + +func handleDevDown(args []string) { + homeDir, err := os.UserHomeDir() + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err) + os.Exit(1) + } + debrosDir := filepath.Join(homeDir, ".debros") + + pm := development.NewProcessManager(debrosDir, os.Stdout) + ctx := context.Background() + + if err := pm.StopAll(ctx); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Error stopping services: %v\n", err) + } +} + +func handleDevStatus(args []string) { + homeDir, err := os.UserHomeDir() + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err) + os.Exit(1) + } + debrosDir := filepath.Join(homeDir, ".debros") + + pm := development.NewProcessManager(debrosDir, os.Stdout) + ctx := context.Background() + + pm.Status(ctx) +} + +func handleDevLogs(args []string) { + if len(args) == 0 { + fmt.Fprintf(os.Stderr, "Usage: network-cli dev logs [--follow]\n") + fmt.Fprintf(os.Stderr, "\nComponents: bootstrap, node2, node3, gateway, ipfs-bootstrap, ipfs-node2, ipfs-node3, olric, anon\n") + os.Exit(1) + } + + component := args[0] + follow := len(args) > 1 && args[1] == "--follow" + + homeDir, err := os.UserHomeDir() + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err) + os.Exit(1) + } + debrosDir := filepath.Join(homeDir, ".debros") + + logPath := filepath.Join(debrosDir, "logs", fmt.Sprintf("%s.log", component)) + if _, err := os.Stat(logPath); os.IsNotExist(err) { + fmt.Fprintf(os.Stderr, "❌ Log file not found: %s\n", logPath) + os.Exit(1) + } + + if follow { + // Run tail -f + tailCmd := fmt.Sprintf("tail -f %s", logPath) + fmt.Printf("Following %s (press Ctrl+C to stop)...\n\n", logPath) + // syscall.Exec doesn't work in all environments, use exec.Command instead + cmd := exec.Command("sh", "-c", tailCmd) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + cmd.Run() + } else { + // Cat the file + data, _ := os.ReadFile(logPath) + fmt.Print(string(data)) + } +} diff --git a/pkg/cli/prod_commands.go b/pkg/cli/prod_commands.go new file mode 100644 index 0000000..262317f --- /dev/null +++ b/pkg/cli/prod_commands.go @@ -0,0 +1,313 @@ +package cli + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/DeBrosOfficial/network/pkg/environments/production" +) + +// HandleProdCommand handles production environment commands +func HandleProdCommand(args []string) { + if len(args) == 0 { + showProdHelp() + return + } + + subcommand := args[0] + subargs := args[1:] + + switch subcommand { + case "install": + handleProdInstall(subargs) + case "upgrade": + handleProdUpgrade(subargs) + case "status": + handleProdStatus() + case "logs": + handleProdLogs(subargs) + case "uninstall": + handleProdUninstall() + case "help": + showProdHelp() + default: + fmt.Fprintf(os.Stderr, "Unknown prod subcommand: %s\n", subcommand) + showProdHelp() + os.Exit(1) + } +} + +func showProdHelp() { + fmt.Printf("Production Environment Commands\n\n") + fmt.Printf("Usage: network-cli prod [options]\n\n") + fmt.Printf("Subcommands:\n") + fmt.Printf(" install - Full production bootstrap (requires root/sudo)\n") + fmt.Printf(" Options:\n") + fmt.Printf(" --force - Reconfigure all settings\n") + fmt.Printf(" --bootstrap - Install as bootstrap node\n") + fmt.Printf(" --peers ADDRS - Comma-separated bootstrap peers (for non-bootstrap)\n") + fmt.Printf(" --vps-ip IP - VPS public IP address\n") + fmt.Printf(" --domain DOMAIN - Domain for HTTPS (optional)\n") + fmt.Printf(" upgrade - Upgrade existing installation (requires root/sudo)\n") + fmt.Printf(" status - Show status of production services\n") + fmt.Printf(" logs - View production service logs\n") + fmt.Printf(" Options:\n") + fmt.Printf(" --follow - Follow logs in real-time\n") + fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n") + fmt.Printf("Examples:\n") + fmt.Printf(" sudo network-cli prod install --bootstrap\n") + fmt.Printf(" sudo network-cli prod install --peers /ip4/1.2.3.4/tcp/4001/p2p/Qm...\n") + fmt.Printf(" network-cli prod status\n") + fmt.Printf(" network-cli prod logs node --follow\n") +} + +func handleProdInstall(args []string) { + // Parse arguments + force := false + isBootstrap := false + var vpsIP, domain, peersStr string + + for i, arg := range args { + switch arg { + case "--force": + force = true + case "--bootstrap": + isBootstrap = true + case "--peers": + if i+1 < len(args) { + peersStr = args[i+1] + } + case "--vps-ip": + if i+1 < len(args) { + vpsIP = args[i+1] + } + case "--domain": + if i+1 < len(args) { + domain = args[i+1] + } + } + } + + // Parse bootstrap peers if provided + var bootstrapPeers []string + if peersStr != "" { + bootstrapPeers = strings.Split(peersStr, ",") + } + + // Validate setup requirements + if os.Geteuid() != 0 { + fmt.Fprintf(os.Stderr, "❌ Production install must be run as root (use sudo)\n") + os.Exit(1) + } + + debrosHome := "/home/debros" + setup := production.NewProductionSetup(debrosHome, os.Stdout, force) + + // Phase 1: Check prerequisites + fmt.Printf("\n📋 Phase 1: Checking prerequisites...\n") + if err := setup.Phase1CheckPrerequisites(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Prerequisites check failed: %v\n", err) + os.Exit(1) + } + + // Phase 2: Provision environment + fmt.Printf("\n🛠️ Phase 2: Provisioning environment...\n") + if err := setup.Phase2ProvisionEnvironment(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Environment provisioning failed: %v\n", err) + os.Exit(1) + } + + // Phase 2b: Install binaries + fmt.Printf("\nPhase 2b: Installing binaries...\n") + if err := setup.Phase2bInstallBinaries(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Binary installation failed: %v\n", err) + os.Exit(1) + } + + // Phase 2c: Initialize services + nodeType := "node" + if isBootstrap { + nodeType = "bootstrap" + } + fmt.Printf("\nPhase 2c: Initializing services...\n") + if err := setup.Phase2cInitializeServices(nodeType); err != nil { + fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err) + os.Exit(1) + } + + // Phase 3: Generate secrets + fmt.Printf("\n🔐 Phase 3: Generating secrets...\n") + if err := setup.Phase3GenerateSecrets(isBootstrap); err != nil { + fmt.Fprintf(os.Stderr, "❌ Secret generation failed: %v\n", err) + os.Exit(1) + } + + // Phase 4: Generate configs + fmt.Printf("\n⚙️ Phase 4: Generating configurations...\n") + enableHTTPS := domain != "" + if err := setup.Phase4GenerateConfigs(isBootstrap, bootstrapPeers, vpsIP, enableHTTPS, domain); err != nil { + fmt.Fprintf(os.Stderr, "❌ Configuration generation failed: %v\n", err) + os.Exit(1) + } + + // Phase 5: Create systemd services + fmt.Printf("\n🔧 Phase 5: Creating systemd services...\n") + if err := setup.Phase5CreateSystemdServices(nodeType); err != nil { + fmt.Fprintf(os.Stderr, "❌ Service creation failed: %v\n", err) + os.Exit(1) + } + + // Log completion + setup.LogSetupComplete("< peer ID from config >") + fmt.Printf("✅ Production installation complete!\n\n") +} + +func handleProdUpgrade(args []string) { + // Parse arguments + force := false + for _, arg := range args { + if arg == "--force" { + force = true + } + } + + if os.Geteuid() != 0 { + fmt.Fprintf(os.Stderr, "❌ Production upgrade must be run as root (use sudo)\n") + os.Exit(1) + } + + debrosHome := "/home/debros" + fmt.Printf("🔄 Upgrading production installation...\n") + fmt.Printf(" This will preserve existing configurations and data\n\n") + + // For now, just re-run the install with force flag + setup := production.NewProductionSetup(debrosHome, os.Stdout, force) + + if err := setup.Phase1CheckPrerequisites(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Prerequisites check failed: %v\n", err) + os.Exit(1) + } + + if err := setup.Phase2ProvisionEnvironment(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Environment provisioning failed: %v\n", err) + os.Exit(1) + } + + fmt.Printf("✅ Upgrade complete!\n") + fmt.Printf(" Services will use existing configurations\n") + fmt.Printf(" To restart services: sudo systemctl restart debros-*\n\n") +} + +func handleProdStatus() { + fmt.Printf("Production Environment Status\n\n") + + servicesList := []struct { + name string + desc string + }{ + {"debros-ipfs-bootstrap", "IPFS Daemon (Bootstrap)"}, + {"debros-ipfs-cluster-bootstrap", "IPFS Cluster (Bootstrap)"}, + {"debros-rqlite-bootstrap", "RQLite Database (Bootstrap)"}, + {"debros-olric", "Olric Cache Server"}, + {"debros-node-bootstrap", "DeBros Node (Bootstrap)"}, + {"debros-gateway", "DeBros Gateway"}, + } + + fmt.Printf("Services:\n") + for _, svc := range servicesList { + cmd := "systemctl" + err := exec.Command(cmd, "is-active", "--quiet", svc.name).Run() + status := "❌ Inactive" + if err == nil { + status = "✅ Active" + } + fmt.Printf(" %s: %s\n", status, svc.desc) + } + + fmt.Printf("\nDirectories:\n") + debrosDir := "/home/debros/.debros" + if _, err := os.Stat(debrosDir); err == nil { + fmt.Printf(" ✅ %s exists\n", debrosDir) + } else { + fmt.Printf(" ❌ %s not found\n", debrosDir) + } + + fmt.Printf("\nView logs with: network-cli prod logs \n") +} + +func handleProdLogs(args []string) { + if len(args) == 0 { + fmt.Fprintf(os.Stderr, "Usage: network-cli prod logs [--follow]\n") + os.Exit(1) + } + + service := args[0] + follow := false + if len(args) > 1 && (args[1] == "--follow" || args[1] == "-f") { + follow = true + } + + if follow { + fmt.Printf("Following logs for %s (press Ctrl+C to stop)...\n\n", service) + cmd := exec.Command("journalctl", "-u", service, "-f") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + cmd.Run() + } else { + cmd := exec.Command("journalctl", "-u", service, "-n", "50") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Run() + } +} + +func handleProdUninstall() { + if os.Geteuid() != 0 { + fmt.Fprintf(os.Stderr, "❌ Production uninstall must be run as root (use sudo)\n") + os.Exit(1) + } + + fmt.Printf("⚠️ This will stop and remove all DeBros production services\n") + fmt.Printf("⚠️ Configuration and data will be preserved in /home/debros/.debros\n\n") + fmt.Printf("Continue? (yes/no): ") + + reader := bufio.NewReader(os.Stdin) + response, _ := reader.ReadString('\n') + response = strings.ToLower(strings.TrimSpace(response)) + + if response != "yes" && response != "y" { + fmt.Printf("Uninstall cancelled\n") + return + } + + services := []string{ + "debros-gateway", + "debros-node-node", + "debros-node-bootstrap", + "debros-olric", + "debros-rqlite-bootstrap", + "debros-rqlite-node", + "debros-ipfs-cluster-bootstrap", + "debros-ipfs-cluster-node", + "debros-ipfs-bootstrap", + "debros-ipfs-node", + } + + fmt.Printf("Stopping services...\n") + for _, svc := range services { + exec.Command("systemctl", "stop", svc).Run() + exec.Command("systemctl", "disable", svc).Run() + unitPath := filepath.Join("/etc/systemd/system", svc+".service") + os.Remove(unitPath) + } + + exec.Command("systemctl", "daemon-reload").Run() + fmt.Printf("✅ Services uninstalled\n") + fmt.Printf(" Configuration and data preserved in /home/debros/.debros\n") + fmt.Printf(" To remove all data: rm -rf /home/debros/.debros\n\n") +} diff --git a/pkg/cli/rqlite_commands.go b/pkg/cli/rqlite_commands.go deleted file mode 100644 index b9961cb..0000000 --- a/pkg/cli/rqlite_commands.go +++ /dev/null @@ -1,327 +0,0 @@ -package cli - -import ( - "fmt" - "net" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - "github.com/DeBrosOfficial/network/pkg/config" - "gopkg.in/yaml.v3" -) - -// HandleRQLiteCommand handles rqlite-related commands -func HandleRQLiteCommand(args []string) { - if len(args) == 0 { - showRQLiteHelp() - return - } - - if runtime.GOOS != "linux" { - fmt.Fprintf(os.Stderr, "❌ RQLite commands are only supported on Linux\n") - os.Exit(1) - } - - subcommand := args[0] - subargs := args[1:] - - switch subcommand { - case "fix": - handleRQLiteFix(subargs) - case "help": - showRQLiteHelp() - default: - fmt.Fprintf(os.Stderr, "Unknown rqlite subcommand: %s\n", subcommand) - showRQLiteHelp() - os.Exit(1) - } -} - -func showRQLiteHelp() { - fmt.Printf("🗄️ RQLite Commands\n\n") - fmt.Printf("Usage: network-cli rqlite [options]\n\n") - fmt.Printf("Subcommands:\n") - fmt.Printf(" fix - Fix misconfigured join address and clean stale raft state\n\n") - fmt.Printf("Description:\n") - fmt.Printf(" The 'fix' command automatically repairs common rqlite cluster issues:\n") - fmt.Printf(" - Corrects join address from HTTP port (5001) to Raft port (7001) if misconfigured\n") - fmt.Printf(" - Cleans stale raft state that prevents proper cluster formation\n") - fmt.Printf(" - Restarts the node service with corrected configuration\n\n") - fmt.Printf("Requirements:\n") - fmt.Printf(" - Must be run as root (use sudo)\n") - fmt.Printf(" - Only works on non-bootstrap nodes (nodes with join_address configured)\n") - fmt.Printf(" - Stops and restarts the debros-node service\n\n") - fmt.Printf("Examples:\n") - fmt.Printf(" sudo network-cli rqlite fix\n") -} - -func handleRQLiteFix(args []string) { - requireRoot() - - // Parse optional flags - dryRun := false - for _, arg := range args { - if arg == "--dry-run" || arg == "-n" { - dryRun = true - } - } - - if dryRun { - fmt.Printf("🔍 Dry-run mode - no changes will be made\n\n") - } - - fmt.Printf("🔧 RQLite Cluster Repair\n\n") - - // Load config - configPath, err := config.DefaultPath("node.yaml") - if err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to determine config path: %v\n", err) - os.Exit(1) - } - - cfg, err := loadConfigForRepair(configPath) - if err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to load config: %v\n", err) - os.Exit(1) - } - - // Check if this is a bootstrap node - if cfg.Node.Type == "bootstrap" || cfg.Database.RQLiteJoinAddress == "" { - fmt.Printf("ℹ️ This is a bootstrap node (no join address configured)\n") - fmt.Printf(" Bootstrap nodes don't need repair - they are the cluster leader\n") - fmt.Printf(" Run this command on follower nodes instead\n") - return - } - - joinAddr := cfg.Database.RQLiteJoinAddress - - // Check if join address needs fixing - needsConfigFix := needsFix(joinAddr, cfg.Database.RQLiteRaftPort, cfg.Database.RQLitePort) - var fixedAddr string - - if needsConfigFix { - fmt.Printf("⚠️ Detected misconfigured join address: %s\n", joinAddr) - fmt.Printf(" Expected Raft port (%d) but found HTTP port (%d)\n", cfg.Database.RQLiteRaftPort, cfg.Database.RQLitePort) - - // Extract host from join address - host, _, err := parseJoinAddress(joinAddr) - if err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to parse join address: %v\n", err) - os.Exit(1) - } - - // Fix the join address - rqlite expects Raft port for -join - fixedAddr = fmt.Sprintf("%s:%d", host, cfg.Database.RQLiteRaftPort) - fmt.Printf(" Corrected address: %s\n\n", fixedAddr) - } else { - fmt.Printf("✅ Join address looks correct: %s\n", joinAddr) - fmt.Printf(" Will clean stale raft state to ensure proper cluster formation\n\n") - fixedAddr = joinAddr // No change needed - } - - if dryRun { - fmt.Printf("🔍 Dry-run: Would clean raft state") - if needsConfigFix { - fmt.Printf(" and fix config") - } - fmt.Printf("\n") - return - } - - // Stop the service - fmt.Printf("⏹️ Stopping debros-node service...\n") - if err := stopService("debros-node"); err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to stop service: %v\n", err) - os.Exit(1) - } - fmt.Printf(" ✓ Service stopped\n\n") - - // Update config file if needed - if needsConfigFix { - fmt.Printf("📝 Updating configuration file...\n") - if err := updateConfigJoinAddress(configPath, fixedAddr); err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to update config: %v\n", err) - fmt.Fprintf(os.Stderr, " Service is stopped - please fix manually and restart\n") - os.Exit(1) - } - fmt.Printf(" ✓ Config updated: %s\n\n", configPath) - } - - // Clean raft state - fmt.Printf("🧹 Cleaning stale raft state...\n") - dataDir := expandDataDir(cfg.Node.DataDir) - raftDir := filepath.Join(dataDir, "rqlite", "raft") - if err := cleanRaftState(raftDir); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ Failed to clean raft state: %v\n", err) - fmt.Fprintf(os.Stderr, " Continuing anyway - raft state may still exist\n") - } else { - fmt.Printf(" ✓ Raft state cleaned\n\n") - } - - // Restart the service - fmt.Printf("🚀 Restarting debros-node service...\n") - if err := startService("debros-node"); err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to start service: %v\n", err) - fmt.Fprintf(os.Stderr, " Config has been fixed - please restart manually:\n") - fmt.Fprintf(os.Stderr, " sudo systemctl start debros-node\n") - os.Exit(1) - } - fmt.Printf(" ✓ Service started\n\n") - - fmt.Printf("✅ Repair complete!\n\n") - fmt.Printf("The node should now join the cluster correctly.\n") - fmt.Printf("Monitor logs with: sudo network-cli service logs node --follow\n") -} - -func loadConfigForRepair(path string) (*config.Config, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open config file: %w", err) - } - defer file.Close() - - var cfg config.Config - if err := config.DecodeStrict(file, &cfg); err != nil { - return nil, fmt.Errorf("failed to parse config: %w", err) - } - - return &cfg, nil -} - -func needsFix(joinAddr string, raftPort int, httpPort int) bool { - if joinAddr == "" { - return false - } - - // Remove http:// or https:// prefix if present - addr := joinAddr - if strings.HasPrefix(addr, "http://") { - addr = strings.TrimPrefix(addr, "http://") - } else if strings.HasPrefix(addr, "https://") { - addr = strings.TrimPrefix(addr, "https://") - } - - // Parse host:port - _, port, err := net.SplitHostPort(addr) - if err != nil { - return false // Can't parse, assume it's fine - } - - // Check if port matches HTTP port (incorrect - should be Raft port) - if port == fmt.Sprintf("%d", httpPort) { - return true - } - - // If it matches Raft port, it's correct - if port == fmt.Sprintf("%d", raftPort) { - return false - } - - // Unknown port - assume it's fine - return false -} - -func parseJoinAddress(joinAddr string) (host, port string, err error) { - // Remove http:// or https:// prefix if present - addr := joinAddr - if strings.HasPrefix(addr, "http://") { - addr = strings.TrimPrefix(addr, "http://") - } else if strings.HasPrefix(addr, "https://") { - addr = strings.TrimPrefix(addr, "https://") - } - - host, port, err = net.SplitHostPort(addr) - if err != nil { - return "", "", fmt.Errorf("invalid join address format: %w", err) - } - - return host, port, nil -} - -func updateConfigJoinAddress(configPath string, newJoinAddr string) error { - // Read the file - data, err := os.ReadFile(configPath) - if err != nil { - return fmt.Errorf("failed to read config file: %w", err) - } - - // Parse YAML into a generic map to preserve structure - var yamlData map[string]interface{} - if err := yaml.Unmarshal(data, &yamlData); err != nil { - return fmt.Errorf("failed to parse YAML: %w", err) - } - - // Navigate to database.rqlite_join_address - database, ok := yamlData["database"].(map[string]interface{}) - if !ok { - return fmt.Errorf("database section not found in config") - } - - database["rqlite_join_address"] = newJoinAddr - - // Write back to file - updatedData, err := yaml.Marshal(yamlData) - if err != nil { - return fmt.Errorf("failed to marshal YAML: %w", err) - } - - if err := os.WriteFile(configPath, updatedData, 0644); err != nil { - return fmt.Errorf("failed to write config file: %w", err) - } - - return nil -} - -func expandDataDir(dataDir string) string { - expanded := os.ExpandEnv(dataDir) - if strings.HasPrefix(expanded, "~") { - home, err := os.UserHomeDir() - if err != nil { - return expanded // Fallback to original - } - expanded = filepath.Join(home, expanded[1:]) - } - return expanded -} - -func cleanRaftState(raftDir string) error { - if _, err := os.Stat(raftDir); os.IsNotExist(err) { - return nil // Directory doesn't exist, nothing to clean - } - - // Remove raft state files - filesToRemove := []string{ - "peers.json", - "peers.json.backup", - "peers.info", - "raft.db", - } - - for _, file := range filesToRemove { - filePath := filepath.Join(raftDir, file) - if err := os.Remove(filePath); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to remove %s: %w", filePath, err) - } - } - - return nil -} - -func stopService(serviceName string) error { - cmd := exec.Command("systemctl", "stop", serviceName) - if err := cmd.Run(); err != nil { - return fmt.Errorf("systemctl stop failed: %w", err) - } - return nil -} - -func startService(serviceName string) error { - cmd := exec.Command("systemctl", "start", serviceName) - if err := cmd.Run(); err != nil { - return fmt.Errorf("systemctl start failed: %w", err) - } - return nil -} diff --git a/pkg/cli/service.go b/pkg/cli/service.go deleted file mode 100644 index 6379db2..0000000 --- a/pkg/cli/service.go +++ /dev/null @@ -1,243 +0,0 @@ -package cli - -import ( - "fmt" - "os" - "os/exec" - "runtime" - "strings" -) - -// HandleServiceCommand handles systemd service management commands -func HandleServiceCommand(args []string) { - if len(args) == 0 { - showServiceHelp() - return - } - - if runtime.GOOS != "linux" { - fmt.Fprintf(os.Stderr, "❌ Service commands are only supported on Linux with systemd\n") - os.Exit(1) - } - - subcommand := args[0] - subargs := args[1:] - - switch subcommand { - case "start": - handleServiceStart(subargs) - case "stop": - handleServiceStop(subargs) - case "restart": - handleServiceRestart(subargs) - case "status": - handleServiceStatus(subargs) - case "logs": - handleServiceLogs(subargs) - case "help": - showServiceHelp() - default: - fmt.Fprintf(os.Stderr, "Unknown service subcommand: %s\n", subcommand) - showServiceHelp() - os.Exit(1) - } -} - -func showServiceHelp() { - fmt.Printf("🔧 Service Management Commands\n\n") - fmt.Printf("Usage: network-cli service [options]\n\n") - fmt.Printf("Subcommands:\n") - fmt.Printf(" start - Start services\n") - fmt.Printf(" stop - Stop services\n") - fmt.Printf(" restart - Restart services\n") - fmt.Printf(" status - Show service status\n") - fmt.Printf(" logs - View service logs\n\n") - fmt.Printf("Targets:\n") - fmt.Printf(" node - DeBros node service\n") - fmt.Printf(" gateway - DeBros gateway service\n") - fmt.Printf(" all - All DeBros services\n\n") - fmt.Printf("Logs Options:\n") - fmt.Printf(" --follow - Follow logs in real-time (-f)\n") - fmt.Printf(" --since=