Merge pull request #68 from DeBrosOfficial/nightly

Bugs, IPFS, Olric
This commit is contained in:
anonpenguin 2025-11-14 08:59:01 +02:00 committed by GitHub
commit a8b3574a4c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
101 changed files with 20713 additions and 5106 deletions

View File

@ -30,6 +30,15 @@ if [ -z "$OTHER_FILES" ]; then
exit 0
fi
# Check for skip flag
# To skip changelog generation, set SKIP_CHANGELOG=1 before committing:
# SKIP_CHANGELOG=1 git commit -m "your message"
# SKIP_CHANGELOG=1 git commit
if [ "$SKIP_CHANGELOG" = "1" ] || [ "$SKIP_CHANGELOG" = "true" ]; then
echo -e "${YELLOW}Skipping changelog update (SKIP_CHANGELOG is set)${NOCOLOR}"
exit 0
fi
# Update changelog before commit
if [ -f "$CHANGELOG_SCRIPT" ]; then
echo -e "\n${CYAN}Updating changelog...${NOCOLOR}"

View File

@ -1,6 +1,6 @@
# GoReleaser Configuration for DeBros Network
# Builds and releases the network-cli binary for multiple platforms
# Other binaries (node, gateway, identity) are installed via: network-cli setup
# Builds and releases the dbn binary for multiple platforms
# Other binaries (node, gateway, identity) are installed via: dbn setup
project_name: debros-network
@ -8,10 +8,10 @@ env:
- GO111MODULE=on
builds:
# network-cli binary - only build the CLI
- id: network-cli
# dbn binary - only build the CLI
- id: dbn
main: ./cmd/cli
binary: network-cli
binary: dbn
goos:
- linux
- darwin
@ -23,10 +23,10 @@ builds:
- -X main.version={{.Version}}
- -X main.commit={{.ShortCommit}}
- -X main.date={{.Date}}
mod_timestamp: '{{ .CommitTimestamp }}'
mod_timestamp: "{{ .CommitTimestamp }}"
archives:
# Tar.gz archives for network-cli
# Tar.gz archives for dbn
- id: binaries
format: tar.gz
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
@ -50,10 +50,10 @@ changelog:
abbrev: -1
filters:
exclude:
- '^docs:'
- '^test:'
- '^chore:'
- '^ci:'
- "^docs:"
- "^test:"
- "^chore:"
- "^ci:"
- Merge pull request
- Merge branch

View File

@ -11,7 +11,7 @@
"program": "./cmd/gateway",
"env": {
"GATEWAY_ADDR": ":6001",
"GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee",
"GATEWAY_BOOTSTRAP_PEERS": "/ip4/localhost/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee",
"GATEWAY_NAMESPACE": "default",
"GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default"
}
@ -36,7 +36,7 @@
"program": "./cmd/gateway",
"env": {
"GATEWAY_ADDR": ":6001",
"GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee",
"GATEWAY_BOOTSTRAP_PEERS": "/ip4/localhost/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee",
"GATEWAY_NAMESPACE": "default",
"GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default"
}

View File

@ -13,11 +13,756 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Deprecated
### Fixed
## [0.53.18] - 2025-11-03
## [0.69.13] - 2025-11-14
### Added
\n
### Changed
- The Gateway service now waits for the Olric cache service to start before attempting initialization.
- Improved robustness of Olric cache client initialization with retry logic and exponential backoff.
### Deprecated
### Removed
### Fixed
- Corrected the default path logic for 'gateway.yaml' to prioritize the production data directory while maintaining fallback to legacy paths.
## [0.69.12] - 2025-11-14
### Added
- The `prod install` command now requires the `--cluster-secret` flag for all non-bootstrap nodes to ensure correct IPFS Cluster configuration.
### Changed
- Updated IPFS configuration to bind API and Gateway addresses to `0.0.0.0` instead of `127.0.0.1` for better network accessibility.
### Deprecated
### Removed
### Fixed
\n
## [0.69.11] - 2025-11-13
### Added
- Added a new comprehensive shell script (`scripts/test-cluster-health.sh`) for checking the health and replication status of RQLite, IPFS, and IPFS Cluster across production environments.
### Changed
- Improved RQLite cluster discovery logic to ensure `peers.json` is correctly generated and includes the local node, which is crucial for reliable cluster recovery.
- Refactored logging across discovery and RQLite components for cleaner, more concise output, especially for routine operations.
- Updated the installation and upgrade process to correctly configure IPFS Cluster bootstrap peers using the node's public IP, improving cluster formation reliability.
### Deprecated
### Removed
### Fixed
- Fixed an issue where RQLite recovery operations (like clearing Raft state) did not correctly force the regeneration of `peers.json`, preventing successful cluster rejoin.
- Corrected the port calculation logic for IPFS Cluster to ensure the correct LibP2P listen port (9098) is used for bootstrap peer addressing.
## [0.69.10] - 2025-11-13
### Added
- Automatic health monitoring and recovery for RQLite cluster split-brain scenarios.
- RQLite now waits indefinitely for the minimum cluster size to be met before starting, preventing single-node cluster formation.
### Changed
- Updated default IPFS swarm port from 4001 to 4101 to avoid conflicts with LibP2P.
### Deprecated
### Removed
### Fixed
- Resolved an issue where RQLite could start as a single-node cluster if peer discovery was slow, by enforcing minimum cluster size before startup.
- Improved cluster recovery logic to correctly use `bootstrap-expect` for new clusters and ensure proper process restart during recovery.
## [0.69.9] - 2025-11-12
### Added
- Added automatic recovery logic for RQLite (database) nodes stuck in a configuration mismatch, which attempts to clear stale Raft state if peers have more recent data.
- Added logic to discover IPFS Cluster peers directly from the LibP2P host's peerstore, improving peer discovery before the Cluster API is fully operational.
### Changed
- Improved the IPFS Cluster configuration update process to prioritize writing to the `peerstore` file before updating `service.json`, ensuring the source of truth is updated first.
### Deprecated
### Removed
### Fixed
\n
## [0.69.8] - 2025-11-12
### Added
- Improved `dbn prod start` to automatically unmask and re-enable services if they were previously masked or disabled.
- Added automatic discovery and configuration of all IPFS Cluster peers during runtime to improve cluster connectivity.
### Changed
- Enhanced `dbn prod start` and `dbn prod stop` reliability by adding service state resets, retries, and ensuring services are disabled when stopped.
- Filtered peer exchange addresses in LibP2P discovery to only include the standard LibP2P port (4001), preventing exposure of internal service ports.
### Deprecated
### Removed
### Fixed
- Improved IPFS Cluster bootstrap configuration repair logic to automatically infer and update bootstrap peer addresses if the bootstrap node is available.
## [0.69.7] - 2025-11-12
### Added
\n
### Changed
- Improved logic for determining Olric server addresses during configuration generation, especially for bootstrap and non-bootstrap nodes.
- Enhanced IPFS cluster configuration to correctly handle IPv6 addresses when updating bootstrap peers.
### Deprecated
### Removed
### Fixed
\n
## [0.69.6] - 2025-11-12
### Added
- Improved production service health checks and port availability validation during install, upgrade, start, and restart commands.
- Added service aliases (node, ipfs, cluster, gateway, olric) to `dbn prod logs` command for easier log viewing.
### Changed
- Updated node configuration logic to correctly advertise public IP addresses in multiaddrs (for P2P discovery) and RQLite addresses, improving connectivity for nodes behind NAT/firewalls.
- Enhanced `dbn prod install` and `dbn prod upgrade` to automatically detect and preserve existing VPS IP, domain, and cluster join information.
- Improved RQLite cluster discovery to automatically replace localhost/loopback addresses with the actual public IP when exchanging metadata between peers.
- Updated `dbn prod install` to require `--vps-ip` for all node types (bootstrap and regular) for proper network configuration.
- Improved error handling and robustness in the installation script when fetching the latest release from GitHub.
### Deprecated
### Removed
### Fixed
- Fixed an issue where the RQLite process would wait indefinitely for a join target; now uses a 5-minute timeout.
- Corrected the location of the gateway configuration file reference in the README.
## [0.69.5] - 2025-11-11
### Added
\n
### Changed
- Moved the default location for `gateway.yaml` configuration file from `configs/` to the new `data/` directory for better organization.
- Updated configuration path logic to search for `gateway.yaml` in the new `data/` directory first.
### Deprecated
### Removed
### Fixed
\n
## [0.69.4] - 2025-11-11
### Added
\n
### Changed
- RQLite database management is now integrated directly into the main node process, removing separate RQLite systemd services (debros-rqlite-*).
- Improved log file provisioning to only create necessary log files based on the node type being installed (bootstrap or node).
### Deprecated
### Removed
### Fixed
\n
## [0.69.3] - 2025-11-11
### Added
- Added `--ignore-resource-checks` flag to the install command to skip disk, RAM, and CPU prerequisite validation.
### Changed
\n
### Deprecated
### Removed
### Fixed
\n
## [0.69.2] - 2025-11-11
### Added
- Added `--no-pull` flag to `dbn prod upgrade` to skip git repository updates and use existing source code.
### Changed
- Removed deprecated environment management commands (`env`, `devnet`, `testnet`, `local`).
- Removed deprecated network commands (`health`, `peers`, `status`, `peer-id`, `connect`, `query`, `pubsub`) from the main CLI interface.
### Deprecated
### Removed
### Fixed
\n
## [0.69.1] - 2025-11-11
### Added
- Added automatic service stopping before binary upgrades during the `prod upgrade` process to ensure a clean update.
- Added logic to preserve existing configuration settings (like `bootstrap_peers`, `domain`, and `rqlite_join_address`) when regenerating configurations during `prod upgrade`.
### Changed
- Improved the `prod upgrade` process to be more robust by preserving critical configuration details and gracefully stopping services.
### Deprecated
### Removed
### Fixed
\n
## [0.69.0] - 2025-11-11
### Added
- Added comprehensive documentation for setting up HTTPS using a domain name, including configuration steps for both installation and existing setups.
- Added the `--force` flag to the `install` command for reconfiguring all settings.
- Added new log targets (`ipfs-cluster`, `rqlite`, `olric`) and improved the `dbn prod logs` command documentation.
### Changed
- Improved the IPFS Cluster configuration logic to ensure the cluster secret and IPFS API port are correctly synchronized during updates.
- Refined the directory structure creation process to ensure node-specific data directories are created only when initializing services.
### Deprecated
### Removed
### Fixed
\n
## [0.68.1] - 2025-11-11
### Added
- Pre-create log files during setup to ensure correct permissions for systemd logging.
### Changed
- Improved binary installation process to handle copying files individually, preventing potential shell wildcard issues.
- Enhanced ownership fixing logic during installation to ensure all files created by root (especially during service initialization) are correctly owned by the 'debros' user.
### Deprecated
### Removed
### Fixed
\n
## [0.68.0] - 2025-11-11
### Added
- Added comprehensive documentation for production deployment, including installation, upgrade, service management, and troubleshooting.
- Added new CLI commands (`dbn prod start`, `dbn prod stop`, `dbn prod restart`) for convenient management of production systemd services.
### Changed
- Updated IPFS configuration during production installation to use port 4501 for the API (to avoid conflicts with RQLite on port 5001) and port 8080 for the Gateway.
### Deprecated
### Removed
### Fixed
- Ensured that IPFS configuration automatically disables AutoConf when a private swarm key is present during installation and upgrade, preventing startup errors.
## [0.67.7] - 2025-11-11
### Added
- Added support for specifying the Git branch (main or nightly) during `prod install` and `prod upgrade`.
- The chosen branch is now saved and automatically used for future upgrades unless explicitly overridden.
### Changed
- Updated help messages and examples for production commands to include branch options.
### Deprecated
### Removed
### Fixed
\n
## [0.67.6] - 2025-11-11
### Added
\n
### Changed
- The binary installer now updates the source repository if it already exists, instead of only cloning it if missing.
### Deprecated
### Removed
### Fixed
- Resolved an issue where disabling AutoConf in the IPFS repository could leave 'auto' placeholders in the config, causing startup errors.
## [0.67.5] - 2025-11-11
### Added
- Added `--restart` option to `dbn prod upgrade` to automatically restart services after upgrade.
- The gateway now supports an optional `--config` flag to specify the configuration file path.
### Changed
- Improved `dbn prod upgrade` process to better handle existing installations, including detecting node type and ensuring configurations are updated to the latest format.
- Configuration loading logic for `node` and `gateway` commands now correctly handles absolute paths passed via command line or systemd.
### Deprecated
### Removed
### Fixed
- Fixed an issue during production upgrades where IPFS repositories in private swarms might fail to start due to `AutoConf` not being disabled.
## [0.67.4] - 2025-11-11
### Added
\n
### Changed
- Improved configuration file loading logic to support absolute paths for config files.
- Updated IPFS Cluster initialization during setup to run `ipfs-cluster-service init` and automatically configure the cluster secret.
- IPFS repositories initialized with a private swarm key will now automatically disable AutoConf.
### Deprecated
### Removed
### Fixed
- Fixed configuration path resolution to correctly check for config files in both the legacy (`~/.debros/`) and production (`~/.debros/configs/`) directories.
## [0.67.3] - 2025-11-11
### Added
\n
### Changed
- Improved reliability of IPFS (Kubo) installation by switching from a single install script to the official step-by-step download and extraction process.
- Updated IPFS (Kubo) installation to use version v0.38.2.
- Enhanced binary installation routines (RQLite, IPFS, Go) to ensure the installed binaries are immediately available in the current process's PATH.
### Deprecated
### Removed
### Fixed
- Fixed potential installation failures for RQLite by adding error checking to the binary copy command.
## [0.67.2] - 2025-11-11
### Added
- Added a new utility function to reliably resolve the full path of required external binaries (like ipfs, rqlited, etc.).
### Changed
- Improved service initialization by validating the availability and path of all required external binaries before creating systemd service units.
- Updated systemd service generation logic to use the resolved, fully-qualified paths for external binaries instead of relying on hardcoded paths.
### Deprecated
### Removed
### Fixed
- Changed IPFS initialization from a warning to a fatal error if the repo fails to initialize, ensuring setup stops on critical failures.
## [0.67.1] - 2025-11-11
### Added
\n
### Changed
- Improved disk space check logic to correctly check the parent directory if the specified path does not exist.
### Deprecated
### Removed
### Fixed
- Fixed an issue in the installation script where the extracted CLI binary might be named 'dbn' instead of 'network-cli', ensuring successful installation regardless of the extracted filename.
## [0.67.0] - 2025-11-11
### Added
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
### Changed
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
### Deprecated
### Removed
### Fixed
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
## [0.67.0] - 2025-11-11
### Added
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
### Changed
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
### Deprecated
### Removed
### Fixed
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
## [0.66.1] - 2025-11-11
### Added
\n
### Changed
- Allow bootstrap nodes to optionally define a join address to synchronize with another bootstrap cluster.
### Deprecated
### Removed
### Fixed
\n
## [0.66.0] - 2025-11-11
### Added
- Pre-installation checks for minimum system resources (10GB disk space, 2GB RAM, 2 CPU cores) are now performed during setup.
- All systemd services (IPFS, RQLite, Olric, Node, Gateway) now log directly to dedicated files in the logs directory instead of using the system journal.
### Changed
- Improved logging instructions in the setup completion message to reference the new dedicated log files.
### Deprecated
### Removed
### Fixed
\n
## [0.65.0] - 2025-11-11
### Added
- Expanded the local development environment (`dbn dev up`) from 3 nodes to 5 nodes (2 bootstraps and 3 regular nodes) for better testing of cluster resilience and quorum.
- Added a new `bootstrap2` node configuration and service to the development topology.
### Changed
- Updated the `dbn dev up` command to configure and start all 5 nodes and associated services (IPFS, RQLite, IPFS Cluster).
- Modified RQLite and LibP2P health checks in the development environment to require a quorum of 3 out of 5 nodes.
- Refactored development environment configuration logic using a new `Topology` structure for easier management of node ports and addresses.
### Deprecated
### Removed
### Fixed
- Ensured that secondary bootstrap nodes can correctly join the primary RQLite cluster in the development environment.
## [0.64.1] - 2025-11-10
### Added
\n
### Changed
- Improved the accuracy of the Raft log index reporting by falling back to reading persisted snapshot metadata from disk if the running RQLite instance is not yet reachable or reports a zero index.
### Deprecated
### Removed
### Fixed
\n
## [0.64.0] - 2025-11-10
### Added
- Comprehensive End-to-End (E2E) test suite for Gateway API endpoints (Cache, RQLite, Storage, Network, Auth).
- New E2E tests for concurrent operations and TTL expiry in the distributed cache.
- New E2E tests for LibP2P peer connectivity and discovery.
### Changed
- Improved Gateway E2E test configuration: automatically discovers Gateway URL and API Key from local `~/.debros` configuration files, removing the need for environment variables.
- The `/v1/network/peers` endpoint now returns a flattened list of multiaddresses for all connected peers.
- Improved robustness of Cache API handlers to correctly identify and return 404 (Not Found) errors when keys are missing, even when wrapped by underlying library errors.
- The RQLite transaction handler now supports the legacy `statements` array format in addition to the `ops` array format for easier use.
- The RQLite schema endpoint now returns tables under the `tables` key instead of `objects`.
### Deprecated
### Removed
### Fixed
- Corrected IPFS Add operation to return the actual file size (byte count) instead of the DAG size in the response.
## [0.63.3] - 2025-11-10
### Added
\n
### Changed
- Improved RQLite cluster stability by automatically clearing stale Raft state on startup if peers have a higher log index, allowing the node to join cleanly.
### Deprecated
### Removed
### Fixed
\n
## [0.63.2] - 2025-11-10
### Added
\n
### Changed
- Improved process termination logic in development environments to ensure child processes are also killed.
- Enhanced the `dev-kill-all.sh` script to reliably kill all processes using development ports, including orphaned processes and their children.
### Deprecated
### Removed
### Fixed
\n
## [0.63.1] - 2025-11-10
### Added
\n
### Changed
- Increased the default minimum cluster size for database environments from 1 to 3.
### Deprecated
### Removed
### Fixed
- Prevented unnecessary cluster recovery attempts when a node starts up as the first node (fresh bootstrap).
## [0.63.0] - 2025-11-10
### Added
- Added a new `kill` command to the Makefile for forcefully shutting down all development processes.
- Introduced a new `stop` command in the Makefile for graceful shutdown of development processes.
### Changed
- The `kill` command now performs a graceful shutdown attempt followed by a force kill of any lingering processes and verifies that development ports are free.
### Deprecated
### Removed
### Fixed
\n
## [0.62.0] - 2025-11-10
### Added
- The `prod status` command now correctly checks for both 'bootstrap' and 'node' service variants.
### Changed
- The production installation process now generates secrets (like the cluster secret and peer ID) before initializing services. This ensures all necessary secrets are available when services start.
- The `prod install` command now displays the actual Peer ID upon completion instead of a placeholder.
### Deprecated
### Removed
### Fixed
- Fixed an issue where IPFS Cluster initialization was using a hardcoded configuration file instead of relying on the standard `ipfs-cluster-service init` process.
## [0.61.0] - 2025-11-10
### Added
- Introduced a new simplified authentication flow (`dbn auth login`) that allows users to generate an API key directly from a wallet address without signature verification (for development/testing purposes).
- Added a new `PRODUCTION_INSTALL.md` guide for production deployment using the `dbn prod` command suite.
### Changed
- Renamed the primary CLI binary from `network-cli` to `dbn` across all configurations, documentation, and source code.
- Refactored the IPFS configuration logic in the development environment to directly modify the IPFS config file instead of relying on shell commands, improving stability.
- Improved the IPFS Cluster peer count logic to correctly handle NDJSON streaming responses from the `/peers` endpoint.
- Enhanced RQLite connection logic to retry connecting to the database if the store is not yet open, particularly for joining nodes during recovery, improving cluster stability.
### Deprecated
### Removed
### Fixed
\n
## [0.60.1] - 2025-11-09
### Added
- Improved IPFS Cluster startup logic in development environment to ensure proper peer discovery and configuration.
### Changed
- Refactored IPFS Cluster initialization in the development environment to use a multi-phase startup (bootstrap first, then followers) and explicitly clean stale cluster state (pebble, peerstore) before initialization.
### Deprecated
### Removed
### Fixed
- Fixed an issue where IPFS Cluster nodes in the development environment might fail to join due to incorrect bootstrap configuration or stale state.
## [0.60.0] - 2025-11-09
### Added
- Introduced comprehensive `dbn dev` commands for managing the local development environment (start, stop, status, logs).
- Added `dbn prod` commands for streamlined production installation, upgrade, and service management on Linux systems (requires root).
### Changed
- Refactored `Makefile` targets (`dev` and `kill`) to use the new `dbn dev up` and `dbn dev down` commands, significantly simplifying the development workflow.
- Removed deprecated `dbn config`, `dbn setup`, `dbn service`, and `dbn rqlite` commands, consolidating functionality under `dev` and `prod`.
### Deprecated
### Removed
### Fixed
\n
## [0.59.2] - 2025-11-08
### Added
- Added health checks to the installation script to verify the gateway and node services are running after setup or upgrade.
- The installation script now attempts to verify the downloaded binary using checksums.txt if available.
- Added checks in the CLI setup to ensure systemd is available before attempting to create service files.
### Changed
- Improved the installation script to detect existing installations, stop services before upgrading, and restart them afterward to minimize downtime.
- Enhanced the CLI setup process by detecting the VPS IP address earlier and improving validation feedback for cluster secrets and swarm keys.
- Modified directory setup to log warnings instead of exiting if `chown` fails, providing manual instructions for fixing ownership issues.
- Improved the HTTPS configuration flow to check for port 80/443 availability before prompting for a domain name.
### Deprecated
### Removed
### Fixed
\n
## [0.59.1] - 2025-11-08
### Added
\n
### Changed
- Improved interactive setup to prompt for existing IPFS Cluster secret and Swarm key, allowing easier joining of existing private networks.
- Updated default IPFS API URL in configuration files from `http://localhost:9105` to the standard `http://localhost:5001`.
- Updated systemd service files (debros-ipfs.service and debros-ipfs-cluster.service) to correctly determine and use the IPFS and Cluster repository paths.
### Deprecated
### Removed
### Fixed
\n
## [0.59.0] - 2025-11-08
### Added
- Added support for asynchronous pinning of uploaded files, improving upload speed.
- Added an optional `pin` flag to the storage upload endpoint to control whether content is pinned (defaults to true).
### Changed
- Improved handling of IPFS Cluster responses during the Add operation to correctly process streaming NDJSON output.
### Deprecated
### Removed
### Fixed
\n
## [0.58.0] - 2025-11-07
### Added
- Added default configuration for IPFS Cluster and IPFS API settings in node and gateway configurations.
- Added `ipfs` configuration section to node configuration, including settings for cluster API URL, replication factor, and encryption.
### Changed
- Improved error logging for cache operations in the Gateway.
### Deprecated
### Removed
### Fixed
\n
## [0.57.0] - 2025-11-07
### Added
- Added a new endpoint `/v1/cache/mget` to retrieve multiple keys from the distributed cache in a single request.
### Changed
- Improved API key extraction logic to prioritize the `X-API-Key` header and better handle different authorization schemes (Bearer, ApiKey) while avoiding confusion with JWTs.
- Refactored cache retrieval logic to use a dedicated function for decoding values from the distributed cache.
### Deprecated
### Removed
### Fixed
\n
## [0.56.0] - 2025-11-05
### Added
- Added IPFS storage endpoints to the Gateway for content upload, pinning, status, retrieval, and unpinning.
- Introduced `StorageClient` interface and implementation in the Go client library for interacting with the new IPFS storage endpoints.
- Added support for automatically starting IPFS daemon, IPFS Cluster daemon, and Olric cache server in the `dev` environment setup.
### Changed
- Updated Gateway configuration to include settings for IPFS Cluster API URL, IPFS API URL, timeout, and replication factor.
- Refactored Olric configuration generation to use a simpler, local-environment focused setup.
- Improved IPFS content retrieval (`Get`) to fall back to the IPFS Gateway (port 8080) if the IPFS API (port 5001) returns a 404.
### Deprecated
### Removed
### Fixed
## [0.54.0] - 2025-11-03
### Added
- Integrated Olric distributed cache for high-speed key-value storage and caching.
- Added new HTTP Gateway endpoints for cache operations (GET, PUT, DELETE, SCAN) via `/v1/cache/`.
- Added `olric_servers` and `olric_timeout` configuration options to the Gateway.
- Updated the automated installation script (`install-debros-network.sh`) to include Olric installation, configuration, and firewall rules (ports 3320, 3322).
### Changed
- Refactored README for better clarity and organization, focusing on quick start and core features.
### Deprecated
### Removed
### Fixed
\n
## [0.53.18] - 2025-11-03
### Added
\n
### Changed
- Increased the connection timeout during peer discovery from 15 seconds to 20 seconds to improve connection reliability.
- Removed unnecessary debug logging related to filtering out ephemeral port addresses during peer exchange.
@ -26,13 +771,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.53.17] - 2025-11-03
### Added
- Added a new Git `pre-commit` hook to automatically update the changelog and version before committing, ensuring version consistency.
### Changed
- Refactored the `update_changelog.sh` script to support different execution contexts (pre-commit vs. pre-push), allowing it to analyze only staged changes during commit.
- The Git `pre-push` hook was simplified by removing the changelog update logic, which is now handled by the `pre-commit` hook.
@ -41,12 +790,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.53.16] - 2025-11-03
### Added
\n
### Changed
- Improved the changelog generation script to prevent infinite loops when the only unpushed commit is a previous changelog update.
### Deprecated
@ -54,12 +808,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.53.15] - 2025-11-03
### Added
\n
### Changed
- Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation.
- Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security.
- Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility.
@ -71,12 +830,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.53.15] - 2025-11-03
### Added
\n
### Changed
- Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation.
- Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security.
- Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility.
@ -88,14 +852,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.53.14] - 2025-11-03
### Added
- Added a new `install-hooks` target to the Makefile to easily set up git hooks.
- Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`.
### Changed
- Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog.
- Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration.
- Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments.
@ -107,14 +875,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.53.14] - 2025-11-03
### Added
- Added a new `install-hooks` target to the Makefile to easily set up git hooks.
- Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`.
### Changed
- Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog.
- Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration.
- Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments.
@ -124,6 +896,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.53.8] - 2025-10-31
@ -131,7 +904,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Added
- **HTTPS/ACME Support**: Gateway now supports automatic HTTPS with Let's Encrypt certificates via ACME
- Interactive domain configuration during `network-cli setup` command
- Interactive domain configuration during `dbn setup` command
- Automatic port availability checking for ports 80 and 443 before enabling HTTPS
- DNS resolution verification to ensure domain points to the server IP
- TLS certificate cache directory management (`~/.debros/tls-cache`)
@ -177,8 +950,8 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Changed
- **GoReleaser**: Updated to build only `network-cli` binary (v0.52.2+)
- Other binaries (node, gateway, identity) now installed via `network-cli setup`
- **GoReleaser**: Updated to build only `dbn` binary (v0.52.2+)
- Other binaries (node, gateway, identity) now installed via `dbn setup`
- Cleaner, smaller release packages
- Resolves archive mismatch errors
- **GitHub Actions**: Updated artifact actions from v3 to v4 (deprecated versions)
@ -196,7 +969,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
- **CLI Refactor**: Modularized monolithic CLI into `pkg/cli/` package structure for better maintainability
- New `environment.go`: Multi-environment management system (local, devnet, testnet)
- New `env_commands.go`: Environment switching commands (`env list`, `env switch`, `devnet enable`, `testnet enable`)
- New `setup.go`: Interactive VPS installation command (`network-cli setup`) that replaces bash install script
- New `setup.go`: Interactive VPS installation command (`dbn setup`) that replaces bash install script
- New `service.go`: Systemd service management commands (`service start|stop|restart|status|logs`)
- New `auth_commands.go`, `config_commands.go`, `basic_commands.go`: Refactored commands into modular pkg/cli
- **Release Pipeline**: Complete automated release infrastructure via `.goreleaser.yaml` and GitHub Actions
@ -218,7 +991,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
- All business logic moved to modular `pkg/cli/` functions
- Easier to test, maintain, and extend individual commands
- **Installation**: `scripts/install-debros-network.sh` now APT-ready with fallback to source build
- **Setup Process**: Consolidated all installation logic into `network-cli setup` command
- **Setup Process**: Consolidated all installation logic into `dbn setup` command
- Single unified installation regardless of installation method
- Interactive user experience with clear progress indicators
@ -229,7 +1002,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Added
- One-command `make dev` target to start full development stack (bootstrap + node2 + node3 + gateway in background)
- New `network-cli config init` (no --type) generates complete development stack with all configs and identities
- New `dbn config init` (no --type) generates complete development stack with all configs and identities
- Full stack initialization with auto-generated peer identities for bootstrap and all nodes
- Explicit control over LibP2P listen addresses for better localhost/development support
- Production/development mode detection for NAT services (disabled for localhost, enabled for production)
@ -240,8 +1013,8 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
- Simplified Makefile: removed legacy dev commands, replaced with unified `make dev` target
- Updated README with clearer getting started instructions (single `make dev` command)
- Simplified `network-cli config init` behavior: defaults to generating full stack instead of single node
- `network-cli config init` now handles bootstrap peer discovery and join addresses automatically
- Simplified `dbn config init` behavior: defaults to generating full stack instead of single node
- `dbn config init` now handles bootstrap peer discovery and join addresses automatically
- LibP2P configuration: removed always-on NAT services for development environments
- Code formatting in pkg/node/node.go (indentation fixes in bootstrapPeerSource)
@ -437,7 +1210,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
- Removed cli, network-cli binaries from project
- Removed cli, dbn binaries from project
- Removed AI_CONTEXT.md
- Removed Network.md
- Removed unused log from monitoring.go

View File

@ -22,19 +22,19 @@ make deps
- Test: `make test`
- Format/Vet: `make fmt vet` (or `make lint`)
```
````
Useful CLI commands:
```bash
./bin/network-cli health
./bin/network-cli peers
./bin/network-cli status
```
./bin/dbn health
./bin/dbn peers
./bin/dbn status
````
## Versioning
- The CLI reports its version via `network-cli version`.
- The CLI reports its version via `dbn version`.
- Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser.
## Pull Requests

166
Makefile
View File

@ -6,22 +6,20 @@ test:
go test -v $(TEST)
# Gateway-focused E2E tests assume gateway and nodes are already running
# Configure via env:
# GATEWAY_BASE_URL (default http://127.0.0.1:6001)
# GATEWAY_API_KEY (required for auth-protected routes)
# Auto-discovers configuration from ~/.debros and queries database for API key
# No environment variables required
.PHONY: test-e2e
test-e2e:
@echo "Running gateway E2E tests (HTTP/WS only)..."
@echo "Base URL: $${GATEWAY_BASE_URL:-http://127.0.0.1:6001}"
@test -n "$$GATEWAY_API_KEY" || (echo "GATEWAY_API_KEY must be set" && exit 1)
@echo "Running comprehensive E2E tests..."
@echo "Auto-discovering configuration from ~/.debros..."
go test -v -tags e2e ./e2e
# Network - Distributed P2P Database System
# Makefile for development and build tasks
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
VERSION := 0.53.18
VERSION := 0.69.13
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
@ -32,10 +30,10 @@ build: deps
@mkdir -p bin
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
go build -ldflags "$(LDFLAGS)" -o bin/node ./cmd/node
go build -ldflags "$(LDFLAGS)" -o bin/network-cli cmd/cli/main.go
go build -ldflags "$(LDFLAGS)" -o bin/dbn cmd/cli/main.go
# Inject gateway build metadata via pkg path variables
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
@echo "Build complete! Run ./bin/network-cli version"
@echo "Build complete! Run ./bin/dbn version"
# Install git hooks
install-hooks:
@ -53,105 +51,46 @@ clean:
run-node:
@echo "Starting bootstrap node..."
@echo "Config: ~/.debros/bootstrap.yaml"
@echo "Generate it with: network-cli config init --type bootstrap"
@echo "Generate it with: dbn config init --type bootstrap"
go run ./cmd/node --config node.yaml
# Run second node (regular) - requires join address of bootstrap node
# Usage: make run-node2 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002
# Usage: make run-node2 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002
run-node2:
@echo "Starting regular node (node.yaml)..."
@echo "Config: ~/.debros/node.yaml"
@echo "Generate it with: network-cli config init --type node --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
@echo "Generate it with: dbn config init --type node --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
go run ./cmd/node --config node2.yaml
# Run third node (regular) - requires join address of bootstrap node
# Usage: make run-node3 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003
# Usage: make run-node3 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003
run-node3:
@echo "Starting regular node (node2.yaml)..."
@echo "Config: ~/.debros/node2.yaml"
@echo "Generate it with: network-cli config init --type node --name node2.yaml --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
@echo "Generate it with: dbn config init --type node --name node2.yaml --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
go run ./cmd/node --config node3.yaml
# Run gateway HTTP server
# Usage examples:
# make run-gateway # uses ~/.debros/gateway.yaml
# Config generated with: network-cli config init --type gateway
# Config generated with: dbn config init --type gateway
run-gateway:
@echo "Starting gateway HTTP server..."
@echo "Note: Config must be in ~/.debros/gateway.yaml"
@echo "Generate it with: network-cli config init --type gateway"
@echo "Generate it with: dbn config init --type gateway"
go run ./cmd/gateway
# One-command dev: Start bootstrap, node2, node3, gateway, and anon in background
# Requires: configs already exist in ~/.debros
# Development environment target
# Uses dbn dev up to start full stack with dependency and port checking
dev: build
@echo "🚀 Starting development network stack..."
@mkdir -p .dev/pids
@mkdir -p $$HOME/.debros/logs
@echo "Starting Anyone client (anon proxy)..."
@if [ "$$(uname)" = "Darwin" ]; then \
echo " Detected macOS - using npx anyone-client"; \
if command -v npx >/dev/null 2>&1; then \
nohup npx anyone-client > $$HOME/.debros/logs/anon.log 2>&1 & echo $$! > .dev/pids/anon.pid; \
echo " Anyone client started (PID: $$(cat .dev/pids/anon.pid))"; \
else \
echo " ⚠️ npx not found - skipping Anyone client"; \
echo " Install with: npm install -g npm"; \
fi; \
elif [ "$$(uname)" = "Linux" ]; then \
echo " Detected Linux - checking systemctl"; \
if systemctl is-active --quiet anon 2>/dev/null; then \
echo " ✓ Anon service already running"; \
elif command -v systemctl >/dev/null 2>&1; then \
echo " Starting anon service..."; \
sudo systemctl start anon 2>/dev/null || echo " ⚠️ Failed to start anon service"; \
else \
echo " ⚠️ systemctl not found - skipping Anon"; \
fi; \
fi
@sleep 2
@echo "Starting bootstrap node..."
@nohup ./bin/node --config bootstrap.yaml > $$HOME/.debros/logs/bootstrap.log 2>&1 & echo $$! > .dev/pids/bootstrap.pid
@sleep 2
@echo "Starting node2..."
@nohup ./bin/node --config node2.yaml > $$HOME/.debros/logs/node2.log 2>&1 & echo $$! > .dev/pids/node2.pid
@sleep 1
@echo "Starting node3..."
@nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid
@sleep 1
@echo "Starting gateway..."
@nohup ./bin/gateway --config gateway.yaml > $$HOME/.debros/logs/gateway.log 2>&1 & echo $$! > .dev/pids/gateway.pid
@echo ""
@echo "============================================================"
@echo "✅ Development stack started!"
@echo "============================================================"
@echo ""
@echo "Processes:"
@if [ -f .dev/pids/anon.pid ]; then \
echo " Anon: PID=$$(cat .dev/pids/anon.pid) (SOCKS: 9050)"; \
fi
@echo " Bootstrap: PID=$$(cat .dev/pids/bootstrap.pid)"
@echo " Node2: PID=$$(cat .dev/pids/node2.pid)"
@echo " Node3: PID=$$(cat .dev/pids/node3.pid)"
@echo " Gateway: PID=$$(cat .dev/pids/gateway.pid)"
@echo ""
@echo "Ports:"
@echo " Anon SOCKS: 9050 (proxy endpoint: POST /v1/proxy/anon)"
@echo " Bootstrap P2P: 4001, HTTP: 5001, Raft: 7001"
@echo " Node2 P2P: 4002, HTTP: 5002, Raft: 7002"
@echo " Node3 P2P: 4003, HTTP: 5003, Raft: 7003"
@echo " Gateway: 6001"
@echo ""
@echo "Press Ctrl+C to stop all processes"
@echo "============================================================"
@echo ""
@if [ -f .dev/pids/anon.pid ]; then \
trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \
tail -f $$HOME/.debros/logs/anon.log $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \
else \
trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \
tail -f $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \
fi
@./bin/dbn dev up
# Kill all processes (graceful shutdown + force kill stray processes)
kill:
@bash scripts/dev-kill-all.sh
stop:
@./bin/dbn dev down
# Help
help:
@ -160,42 +99,25 @@ help:
@echo " clean - Clean build artifacts"
@echo " test - Run tests"
@echo ""
@echo "Development:"
@echo " dev - Start full dev stack (bootstrap + 2 nodes + gateway)"
@echo " Requires: configs in ~/.debros (run 'network-cli config init' first)"
@echo "Local Development (Recommended):"
@echo " make dev - Start full development stack with one command"
@echo " - Checks dependencies and available ports"
@echo " - Generates configs (2 bootstraps + 3 nodes + gateway)"
@echo " - Starts IPFS, RQLite, Olric, all nodes, and gateway"
@echo " - Validates cluster health (IPFS peers, RQLite, LibP2P)"
@echo " - Stops all services if health checks fail"
@echo " - Includes comprehensive logging"
@echo " make kill - Stop all development services"
@echo ""
@echo "Configuration (NEW):"
@echo " First, generate config files in ~/.debros with:"
@echo " make build # Build CLI first"
@echo " ./bin/network-cli config init # Generate full stack"
@echo "Development Management (via dbn):"
@echo " ./bin/dbn dev status - Show status of all dev services"
@echo " ./bin/dbn dev logs <component> [--follow]"
@echo ""
@echo "Network Targets (requires config files in ~/.debros):"
@echo " run-node - Start bootstrap node"
@echo " run-node2 - Start second node"
@echo " run-node3 - Start third node"
@echo " run-gateway - Start HTTP gateway"
@echo " run-example - Run usage example"
@echo ""
@echo "Running Multiple Nodes:"
@echo " Nodes use --config flag to select which YAML file in ~/.debros to load:"
@echo " go run ./cmd/node --config bootstrap.yaml"
@echo " go run ./cmd/node --config node.yaml"
@echo " go run ./cmd/node --config node2.yaml"
@echo " Generate configs with: ./bin/network-cli config init --name <filename.yaml>"
@echo ""
@echo "CLI Commands:"
@echo " run-cli - Run network CLI help"
@echo " cli-health - Check network health"
@echo " cli-peers - List network peers"
@echo " cli-status - Get network status"
@echo " cli-storage-test - Test storage operations"
@echo " cli-pubsub-test - Test pub/sub operations"
@echo ""
@echo "Development:"
@echo " test-multinode - Full multi-node test with 1 bootstrap + 2 nodes"
@echo " test-peer-discovery - Test peer discovery (requires running nodes)"
@echo " test-replication - Test data replication (requires running nodes)"
@echo " test-consensus - Test database consensus (requires running nodes)"
@echo "Individual Node Targets (advanced):"
@echo " run-node - Start bootstrap node directly"
@echo " run-node2 - Start second node directly"
@echo " run-node3 - Start third node directly"
@echo " run-gateway - Start HTTP gateway directly"
@echo ""
@echo "Maintenance:"
@echo " deps - Download dependencies"
@ -203,8 +125,4 @@ help:
@echo " fmt - Format code"
@echo " vet - Vet code"
@echo " lint - Lint code (fmt + vet)"
@echo " clear-ports - Clear common dev ports"
@echo " dev-setup - Setup development environment"
@echo " dev-cluster - Show cluster startup commands"
@echo " dev - Full development workflow"
@echo " help - Show this help"

175
PRODUCTION_INSTALL.md Normal file
View File

@ -0,0 +1,175 @@
# Production Installation Guide - DeBros Network
This guide covers production deployment of the DeBros Network using the `dbn prod` command suite.
## System Requirements
- **OS**: Ubuntu 20.04 LTS or later, Debian 11+, or other Linux distributions
- **Architecture**: x86_64 (amd64) or ARM64 (aarch64)
- **RAM**: Minimum 4GB, recommended 8GB+
- **Storage**: Minimum 50GB SSD recommended
- **Ports**:
- 4001 (P2P networking)
- 4501 (IPFS HTTP API - bootstrap), 4502/4503 (node2/node3)
- 5001-5003 (RQLite HTTP - one per node)
- 6001 (Gateway)
- 7001-7003 (RQLite Raft - one per node)
- 9094 (IPFS Cluster API - bootstrap), 9104/9114 (node2/node3)
- 3320/3322 (Olric)
- 80, 443 (for HTTPS with Let's Encrypt)
## Installation
### Prerequisites
1. **Root access required**: All production operations require sudo/root privileges
2. **Supported distros**: Ubuntu, Debian, Fedora (via package manager)
3. **Basic tools**: `curl`, `git`, `make`, `build-essential`, `wget`
### Single-Node Bootstrap Installation
Deploy the first node (bootstrap node) on a VPS:
```bash
sudo dbn prod install --bootstrap
```
This will:
1. Check system prerequisites (OS, arch, root privileges, basic tools)
2. Provision the `debros` system user and filesystem structure at `~/.debros`
3. Download and install all required binaries (Go, RQLite, IPFS, IPFS Cluster, Olric, DeBros)
4. Generate secrets (cluster secret, swarm key, node identity)
5. Initialize repositories (IPFS, IPFS Cluster, RQLite)
6. Generate configurations for bootstrap node
7. Create and start systemd services
All files will be under `/home/debros/.debros`:
```
~/.debros/
├── bin/ # Compiled binaries
├── configs/ # YAML configurations
├── data/
│ ├── ipfs/ # IPFS repository
│ ├── ipfs-cluster/ # IPFS Cluster state
│ └── rqlite/ # RQLite database
├── logs/ # Service logs
└── secrets/ # Keys and certificates
```
### Joining Additional Nodes
Every non-bootstrap node must use the exact same IPFS Cluster secret as the bootstrap host. When you provision a follower node:
1. Copy the secret from the bootstrap machine:
```bash
scp debros@<bootstrap-ip>:/home/debros/.debros/secrets/cluster-secret ./cluster-secret
```
2. Run the installer with the `--cluster-secret` flag:
```bash
sudo dbn prod install --vps-ip <public_ip> \
--peers /ip4/<bootstrap-ip>/tcp/4001/p2p/<peer-id> \
--cluster-secret $(cat ./cluster-secret)
```
The installer now enforces `--cluster-secret` for all non-bootstrap nodes, which prevents mismatched cluster PSKs during deployment.
## Service Management
### Check Service Status
```bash
sudo systemctl status debros-node-bootstrap
sudo systemctl status debros-gateway
sudo systemctl status debros-rqlite-bootstrap
```
### View Service Logs
```bash
# Bootstrap node logs
sudo journalctl -u debros-node-bootstrap -f
# Gateway logs
sudo journalctl -u debros-gateway -f
# All services
sudo journalctl -u "debros-*" -f
```
## Health Checks
After installation, verify services are running:
```bash
# Check IPFS
curl http://localhost:4501/api/v0/id
# Check RQLite cluster
curl http://localhost:5001/status
# Check Gateway
curl http://localhost:6001/health
# Check Olric
curl http://localhost:3320/ping
```
## Port Reference
### Development Environment (via `make dev`)
- IPFS API: 4501 (bootstrap), 4502 (node2), 4503 (node3)
- RQLite HTTP: 5001, 5002, 5003
- RQLite Raft: 7001, 7002, 7003
- IPFS Cluster: 9094, 9104, 9114
- P2P: 4001, 4002, 4003
- Gateway: 6001
- Olric: 3320, 3322
### Production Environment (via `sudo dbn prod install`)
- Same port assignments as development for consistency
## Configuration Files
Key configuration files are located in `~/.debros/configs/`:
- **bootstrap.yaml**: Bootstrap node configuration
- **node.yaml**: Regular node configuration
- **gateway.yaml**: HTTP gateway configuration
- **olric.yaml**: In-memory cache configuration
Edit these files directly for advanced configuration, then restart services:
```bash
sudo systemctl restart debros-node-bootstrap
```
## Troubleshooting
### Port already in use
Check which process is using the port:
```bash
sudo lsof -i :4501
sudo lsof -i :5001
sudo lsof -i :7001
```
Kill conflicting processes or change ports in config.
### RQLite cluster not forming
Ensure:
1. Bootstrap node is running: `systemctl status debros-rqlite-bootstrap`
2. Network connectivity between nodes on ports 5001+ (HTTP) and 7001+ (Raft)
3. Check logs: `journalctl -u debros-rqlite-bootstrap -f`
---
**Last Updated**: November 2024
**Compatible with**: Network v1.0.0+

1251
README.md

File diff suppressed because it is too large Load Diff

View File

@ -34,7 +34,7 @@ func main() {
switch command {
case "version":
fmt.Printf("network-cli %s", version)
fmt.Printf("dbn %s", version)
if commit != "" {
fmt.Printf(" (commit %s)", commit)
}
@ -44,74 +44,18 @@ func main() {
fmt.Println()
return
// Environment commands
case "env":
cli.HandleEnvCommand(args)
case "devnet", "testnet", "local":
// Shorthand for switching environments
if len(args) > 0 && (args[0] == "enable" || args[0] == "switch") {
if err := cli.SwitchEnvironment(command); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to switch environment: %v\n", err)
os.Exit(1)
}
env, _ := cli.GetActiveEnvironment()
fmt.Printf("✅ Switched to %s environment\n", command)
if env != nil {
fmt.Printf(" Gateway URL: %s\n", env.GatewayURL)
}
} else {
fmt.Fprintf(os.Stderr, "Usage: network-cli %s enable\n", command)
os.Exit(1)
}
// Development environment commands
case "dev":
cli.HandleDevCommand(args)
// Setup and service commands
case "setup":
cli.HandleSetupCommand(args)
case "service":
cli.HandleServiceCommand(args)
// Production environment commands
case "prod":
cli.HandleProdCommand(args)
// Authentication commands
case "auth":
cli.HandleAuthCommand(args)
// Config commands
case "config":
cli.HandleConfigCommand(args)
// Basic network commands
case "health":
cli.HandleHealthCommand(format, timeout)
case "peers":
cli.HandlePeersCommand(format, timeout)
case "status":
cli.HandleStatusCommand(format, timeout)
case "peer-id":
cli.HandlePeerIDCommand(format, timeout)
// Query command
case "query":
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: network-cli query <sql>\n")
os.Exit(1)
}
cli.HandleQueryCommand(args[0], format, timeout)
// PubSub commands
case "pubsub":
cli.HandlePubSubCommand(args, format, timeout)
// Connect command
case "connect":
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: network-cli connect <peer_address>\n")
os.Exit(1)
}
cli.HandleConnectCommand(args[0], timeout)
// RQLite commands
case "rqlite":
cli.HandleRQLiteCommand(args)
// Help
case "help", "--help", "-h":
showHelp()
@ -142,69 +86,49 @@ func parseGlobalFlags(args []string) {
func showHelp() {
fmt.Printf("Network CLI - Distributed P2P Network Management Tool\n\n")
fmt.Printf("Usage: network-cli <command> [args...]\n\n")
fmt.Printf("Usage: dbn <command> [args...]\n\n")
fmt.Printf("🌍 Environment Management:\n")
fmt.Printf(" env list - List available environments\n")
fmt.Printf(" env current - Show current environment\n")
fmt.Printf(" env switch <env> - Switch to environment (local, devnet, testnet)\n")
fmt.Printf(" devnet enable - Shorthand for switching to devnet\n")
fmt.Printf(" testnet enable - Shorthand for switching to testnet\n\n")
fmt.Printf("💻 Local Development:\n")
fmt.Printf(" dev up - Start full local dev environment\n")
fmt.Printf(" dev down - Stop all dev services\n")
fmt.Printf(" dev status - Show status of dev services\n")
fmt.Printf(" dev logs <component> - View dev component logs\n")
fmt.Printf(" dev help - Show dev command help\n\n")
fmt.Printf("🚀 Setup & Services:\n")
fmt.Printf(" setup [--force] - Interactive VPS setup (Linux only, requires root)\n")
fmt.Printf(" service start <target> - Start service (node, gateway, all)\n")
fmt.Printf(" service stop <target> - Stop service\n")
fmt.Printf(" service restart <target> - Restart service\n")
fmt.Printf(" service status [target] - Show service status\n")
fmt.Printf(" service logs <target> [opts] - View service logs (--follow, --since=1h)\n\n")
fmt.Printf("🚀 Production Deployment:\n")
fmt.Printf(" prod install [--bootstrap] - Full production bootstrap (requires root/sudo)\n")
fmt.Printf(" prod upgrade - Upgrade existing installation\n")
fmt.Printf(" prod status - Show production service status\n")
fmt.Printf(" prod start - Start all production services (requires root/sudo)\n")
fmt.Printf(" prod stop - Stop all production services (requires root/sudo)\n")
fmt.Printf(" prod restart - Restart all production services (requires root/sudo)\n")
fmt.Printf(" prod logs <service> - View production service logs\n")
fmt.Printf(" prod uninstall - Remove production services (requires root/sudo)\n")
fmt.Printf(" prod help - Show prod command help\n\n")
fmt.Printf("🔐 Authentication:\n")
fmt.Printf(" auth login - Authenticate with wallet\n")
fmt.Printf(" auth logout - Clear stored credentials\n")
fmt.Printf(" auth whoami - Show current authentication\n")
fmt.Printf(" auth status - Show detailed auth info\n\n")
fmt.Printf("⚙️ Configuration:\n")
fmt.Printf(" config init [--type <type>] - Generate configs (full stack or single)\n")
fmt.Printf(" config validate --name <file> - Validate config file\n\n")
fmt.Printf("🌐 Network Commands:\n")
fmt.Printf(" health - Check network health\n")
fmt.Printf(" peers - List connected peers\n")
fmt.Printf(" status - Show network status\n")
fmt.Printf(" peer-id - Show this node's peer ID\n")
fmt.Printf(" connect <peer_address> - Connect to peer\n\n")
fmt.Printf("🗄️ Database:\n")
fmt.Printf(" query <sql> 🔐 Execute database query\n\n")
fmt.Printf("🔧 RQLite:\n")
fmt.Printf(" rqlite fix 🔧 Fix misconfigured join address and clean raft state\n\n")
fmt.Printf("📡 PubSub:\n")
fmt.Printf(" pubsub publish <topic> <msg> 🔐 Publish message\n")
fmt.Printf(" pubsub subscribe <topic> 🔐 Subscribe to topic\n")
fmt.Printf(" pubsub topics 🔐 List topics\n\n")
fmt.Printf(" auth status - Show detailed auth info\n")
fmt.Printf(" auth help - Show auth command help\n\n")
fmt.Printf("Global Flags:\n")
fmt.Printf(" -f, --format <format> - Output format: table, json (default: table)\n")
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n\n")
fmt.Printf("🔐 = Requires authentication (auto-prompts if needed)\n\n")
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n")
fmt.Printf(" --help, -h - Show this help message\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" # Switch to devnet\n")
fmt.Printf(" network-cli devnet enable\n\n")
fmt.Printf(" # Authenticate\n")
fmt.Printf(" dbn auth login\n\n")
fmt.Printf(" # Authenticate and query\n")
fmt.Printf(" network-cli auth login\n")
fmt.Printf(" network-cli query \"SELECT * FROM users LIMIT 10\"\n\n")
fmt.Printf(" # Start local dev environment\n")
fmt.Printf(" dbn dev up\n")
fmt.Printf(" dbn dev status\n\n")
fmt.Printf(" # Setup VPS (Linux only)\n")
fmt.Printf(" sudo network-cli setup\n\n")
fmt.Printf(" # Manage services\n")
fmt.Printf(" sudo network-cli service status all\n")
fmt.Printf(" sudo network-cli service logs node --follow\n")
fmt.Printf(" # Production deployment (requires root/sudo)\n")
fmt.Printf(" sudo dbn prod install --bootstrap\n")
fmt.Printf(" sudo dbn prod upgrade\n")
fmt.Printf(" dbn prod status\n")
fmt.Printf(" dbn prod logs node --follow\n")
}

View File

@ -1,10 +1,12 @@
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/gateway"
@ -39,24 +41,52 @@ func getEnvBoolDefault(key string, def bool) bool {
}
// parseGatewayConfig loads gateway.yaml from ~/.debros exclusively.
// It accepts an optional --config flag for absolute paths (used by systemd services).
func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
// Parse --config flag (optional, for systemd services that pass absolute paths)
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.debros)")
flag.Parse()
// Determine config path
configPath, err := config.DefaultPath("gateway.yaml")
if err != nil {
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
os.Exit(1)
var configPath string
var err error
if *configFlag != "" {
// If --config flag is provided, use it (handles both absolute and relative paths)
if filepath.IsAbs(*configFlag) {
configPath = *configFlag
} else {
configPath, err = config.DefaultPath(*configFlag)
if err != nil {
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
os.Exit(1)
}
}
} else {
// Default behavior: look for gateway.yaml in ~/.debros/data/, ~/.debros/configs/, or ~/.debros/
configPath, err = config.DefaultPath("gateway.yaml")
if err != nil {
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
os.Exit(1)
}
}
// Load YAML
type yamlCfg struct {
ListenAddr string `yaml:"listen_addr"`
ClientNamespace string `yaml:"client_namespace"`
RQLiteDSN string `yaml:"rqlite_dsn"`
BootstrapPeers []string `yaml:"bootstrap_peers"`
EnableHTTPS bool `yaml:"enable_https"`
DomainName string `yaml:"domain_name"`
TLSCacheDir string `yaml:"tls_cache_dir"`
ListenAddr string `yaml:"listen_addr"`
ClientNamespace string `yaml:"client_namespace"`
RQLiteDSN string `yaml:"rqlite_dsn"`
BootstrapPeers []string `yaml:"bootstrap_peers"`
EnableHTTPS bool `yaml:"enable_https"`
DomainName string `yaml:"domain_name"`
TLSCacheDir string `yaml:"tls_cache_dir"`
OlricServers []string `yaml:"olric_servers"`
OlricTimeout string `yaml:"olric_timeout"`
IPFSClusterAPIURL string `yaml:"ipfs_cluster_api_url"`
IPFSAPIURL string `yaml:"ipfs_api_url"`
IPFSTimeout string `yaml:"ipfs_timeout"`
IPFSReplicationFactor int `yaml:"ipfs_replication_factor"`
}
data, err := os.ReadFile(configPath)
@ -65,7 +95,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
zap.String("path", configPath),
zap.Error(err))
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
fmt.Fprintf(os.Stderr, "Generate it using: network-cli config init --type gateway\n")
fmt.Fprintf(os.Stderr, "Generate it using: dbn config init --type gateway\n")
os.Exit(1)
}
@ -79,13 +109,19 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
// Build config from YAML
cfg := &gateway.Config{
ListenAddr: ":6001",
ClientNamespace: "default",
BootstrapPeers: nil,
RQLiteDSN: "",
EnableHTTPS: false,
DomainName: "",
TLSCacheDir: "",
ListenAddr: ":6001",
ClientNamespace: "default",
BootstrapPeers: nil,
RQLiteDSN: "",
EnableHTTPS: false,
DomainName: "",
TLSCacheDir: "",
OlricServers: nil,
OlricTimeout: 0,
IPFSClusterAPIURL: "",
IPFSAPIURL: "",
IPFSTimeout: 0,
IPFSReplicationFactor: 0,
}
if v := strings.TrimSpace(y.ListenAddr); v != "" {
@ -125,6 +161,36 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
}
}
// Olric configuration
if len(y.OlricServers) > 0 {
cfg.OlricServers = y.OlricServers
}
if v := strings.TrimSpace(y.OlricTimeout); v != "" {
if parsed, err := time.ParseDuration(v); err == nil {
cfg.OlricTimeout = parsed
} else {
logger.ComponentWarn(logging.ComponentGeneral, "invalid olric_timeout, using default", zap.String("value", v), zap.Error(err))
}
}
// IPFS configuration
if v := strings.TrimSpace(y.IPFSClusterAPIURL); v != "" {
cfg.IPFSClusterAPIURL = v
}
if v := strings.TrimSpace(y.IPFSAPIURL); v != "" {
cfg.IPFSAPIURL = v
}
if v := strings.TrimSpace(y.IPFSTimeout); v != "" {
if parsed, err := time.ParseDuration(v); err == nil {
cfg.IPFSTimeout = parsed
} else {
logger.ComponentWarn(logging.ComponentGeneral, "invalid ipfs_timeout, using default", zap.String("value", v), zap.Error(err))
}
}
if y.IPFSReplicationFactor > 0 {
cfg.IPFSReplicationFactor = y.IPFSReplicationFactor
}
// Validate configuration
if errs := cfg.ValidateConfig(); len(errs) > 0 {
fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs))

View File

@ -4,6 +4,7 @@ import (
"context"
"flag"
"fmt"
"net"
"os"
"os/signal"
"path/filepath"
@ -66,23 +67,32 @@ func check_if_should_open_help(help *bool) {
func select_data_dir_check(configName *string) {
logger := setup_logger(logging.ComponentNode)
// Ensure config directory exists and is writable
_, err := config.EnsureConfigDir()
if err != nil {
logger.Error("Failed to ensure config directory", zap.Error(err))
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
fmt.Fprintf(os.Stderr, "Failed to create/access config directory: %v\n", err)
fmt.Fprintf(os.Stderr, "\nPlease ensure:\n")
fmt.Fprintf(os.Stderr, " 1. Home directory is accessible: %s\n", os.ExpandEnv("~"))
fmt.Fprintf(os.Stderr, " 2. You have write permissions to home directory\n")
fmt.Fprintf(os.Stderr, " 3. Disk space is available\n")
os.Exit(1)
}
var configPath string
var err error
configPath, err := config.DefaultPath(*configName)
if err != nil {
logger.Error("Failed to determine config path", zap.Error(err))
os.Exit(1)
// Check if configName is an absolute path
if filepath.IsAbs(*configName) {
// Use absolute path directly
configPath = *configName
} else {
// Ensure config directory exists and is writable
_, err = config.EnsureConfigDir()
if err != nil {
logger.Error("Failed to ensure config directory", zap.Error(err))
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
fmt.Fprintf(os.Stderr, "Failed to create/access config directory: %v\n", err)
fmt.Fprintf(os.Stderr, "\nPlease ensure:\n")
fmt.Fprintf(os.Stderr, " 1. Home directory is accessible: %s\n", os.ExpandEnv("~"))
fmt.Fprintf(os.Stderr, " 2. You have write permissions to home directory\n")
fmt.Fprintf(os.Stderr, " 3. Disk space is available\n")
os.Exit(1)
}
configPath, err = config.DefaultPath(*configName)
if err != nil {
logger.Error("Failed to determine config path", zap.Error(err))
os.Exit(1)
}
}
if _, err := os.Stat(configPath); err != nil {
@ -92,8 +102,8 @@ func select_data_dir_check(configName *string) {
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
fmt.Fprintf(os.Stderr, "Config file not found at %s\n", configPath)
fmt.Fprintf(os.Stderr, "\nGenerate it with one of:\n")
fmt.Fprintf(os.Stderr, " network-cli config init --type bootstrap\n")
fmt.Fprintf(os.Stderr, " network-cli config init --type node --bootstrap-peers '<peer_multiaddr>'\n")
fmt.Fprintf(os.Stderr, " dbn config init --type bootstrap\n")
fmt.Fprintf(os.Stderr, " dbn config init --type node --bootstrap-peers '<peer_multiaddr>'\n")
os.Exit(1)
}
}
@ -128,7 +138,26 @@ func startNode(ctx context.Context, cfg *config.Config, port int) error {
// Save the peer ID to a file for CLI access (especially useful for bootstrap)
peerID := n.GetPeerID()
peerInfoFile := filepath.Join(dataDir, "peer.info")
peerMultiaddr := fmt.Sprintf("/ip4/0.0.0.0/tcp/%d/p2p/%s", port, peerID)
// Extract advertise IP from config (prefer http_adv_address, fallback to raft_adv_address)
advertiseIP := "0.0.0.0" // Default fallback
if cfg.Discovery.HttpAdvAddress != "" {
if host, _, err := net.SplitHostPort(cfg.Discovery.HttpAdvAddress); err == nil && host != "" && host != "localhost" {
advertiseIP = host
}
} else if cfg.Discovery.RaftAdvAddress != "" {
if host, _, err := net.SplitHostPort(cfg.Discovery.RaftAdvAddress); err == nil && host != "" && host != "localhost" {
advertiseIP = host
}
}
// Determine IP protocol (IPv4 or IPv6) for multiaddr
ipProtocol := "ip4"
if ip := net.ParseIP(advertiseIP); ip != nil && ip.To4() == nil {
ipProtocol = "ip6"
}
peerMultiaddr := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", ipProtocol, advertiseIP, port, peerID)
if err := os.WriteFile(peerInfoFile, []byte(peerMultiaddr), 0644); err != nil {
logger.Error("Failed to save peer info: %v", zap.Error(err))
@ -232,15 +261,24 @@ func main() {
check_if_should_open_help(help)
// Check if config file exists
// Check if config file exists and determine path
select_data_dir_check(configName)
// Load configuration from ~/.debros/node.yaml
configPath, err := config.DefaultPath(*configName)
if err != nil {
logger.Error("Failed to determine config path", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
os.Exit(1)
// Determine config path (handle both absolute and relative paths)
// Note: select_data_dir_check already validated the path exists, so we can safely determine it here
var configPath string
var err error
if filepath.IsAbs(*configName) {
// Absolute path passed directly (e.g., from systemd service)
configPath = *configName
} else {
// Relative path - use DefaultPath which checks both ~/.debros/configs/ and ~/.debros/
configPath, err = config.DefaultPath(*configName)
if err != nil {
logger.Error("Failed to determine config path", zap.Error(err))
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
os.Exit(1)
}
}
var cfg *config.Config
@ -255,10 +293,10 @@ func main() {
// Set default advertised addresses if empty
if cfg.Discovery.HttpAdvAddress == "" {
cfg.Discovery.HttpAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLitePort)
cfg.Discovery.HttpAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLitePort)
}
if cfg.Discovery.RaftAdvAddress == "" {
cfg.Discovery.RaftAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLiteRaftPort)
cfg.Discovery.RaftAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLiteRaftPort)
}
// Validate configuration

294
e2e/auth_negative_test.go Normal file
View File

@ -0,0 +1,294 @@
//go:build e2e
package e2e
import (
"context"
"net/http"
"testing"
"time"
"unicode"
)
func TestAuth_MissingAPIKey(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request without auth headers
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/network/status", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for missing auth, got %d (auth may not be enforced on this endpoint)", resp.StatusCode)
}
}
func TestAuth_InvalidAPIKey(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with invalid API key
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer invalid-key-xyz")
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for invalid key, got %d", resp.StatusCode)
}
}
func TestAuth_CacheWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request cache endpoint without auth
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/cache/health",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
// Should fail with 401 or 403
if status != http.StatusUnauthorized && status != http.StatusForbidden {
t.Logf("warning: expected 401/403 for cache without auth, got %d", status)
}
}
func TestAuth_StorageWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request storage endpoint without auth
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/storage/status/QmTest",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
// Should fail with 401 or 403
if status != http.StatusUnauthorized && status != http.StatusForbidden {
t.Logf("warning: expected 401/403 for storage without auth, got %d", status)
}
}
func TestAuth_RQLiteWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request rqlite endpoint without auth
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/rqlite/schema",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
// Should fail with 401 or 403
if status != http.StatusUnauthorized && status != http.StatusForbidden {
t.Logf("warning: expected 401/403 for rqlite without auth, got %d", status)
}
}
func TestAuth_MalformedBearerToken(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with malformed bearer token
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
// Missing "Bearer " prefix
req.Header.Set("Authorization", "invalid-token-format")
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for malformed token, got %d", resp.StatusCode)
}
}
func TestAuth_ExpiredJWT(t *testing.T) {
// Skip if JWT is not being used
if GetJWT() == "" && GetAPIKey() == "" {
t.Skip("No JWT or API key configured")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// This test would require an expired JWT token
// For now, test with a clearly invalid JWT structure
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer expired.jwt.token")
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for expired JWT, got %d", resp.StatusCode)
}
}
func TestAuth_EmptyBearerToken(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with empty bearer token
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer ")
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for empty token, got %d", resp.StatusCode)
}
}
func TestAuth_DuplicateAuthHeaders(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with both API key and invalid JWT
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/cache/health",
Headers: map[string]string{
"Authorization": "Bearer " + GetAPIKey(),
"X-API-Key": GetAPIKey(),
},
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
// Should succeed if API key is valid
if status != http.StatusOK {
t.Logf("request with both headers returned %d", status)
}
}
func TestAuth_CaseSensitiveAPIKey(t *testing.T) {
if GetAPIKey() == "" {
t.Skip("No API key configured")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with incorrectly cased API key
apiKey := GetAPIKey()
incorrectKey := ""
for i, ch := range apiKey {
if i%2 == 0 && unicode.IsLetter(ch) {
incorrectKey += string(unicode.ToUpper(ch)) // Convert to uppercase
} else {
incorrectKey += string(ch)
}
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer "+incorrectKey)
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// API keys should be case-sensitive
if resp.StatusCode == http.StatusOK {
t.Logf("warning: API key check may not be case-sensitive (got 200)")
}
}
func TestAuth_HealthEndpointNoAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Health endpoint at /health should not require auth
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should succeed without auth
if resp.StatusCode != http.StatusOK {
t.Fatalf("expected 200 for /health without auth, got %d", resp.StatusCode)
}
}

511
e2e/cache_http_test.go Normal file
View File

@ -0,0 +1,511 @@
//go:build e2e
package e2e
import (
"context"
"fmt"
"net/http"
"testing"
"time"
)
func TestCache_Health(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/cache/health",
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("health check failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if resp["status"] != "ok" {
t.Fatalf("expected status 'ok', got %v", resp["status"])
}
if resp["service"] != "olric" {
t.Fatalf("expected service 'olric', got %v", resp["service"])
}
}
func TestCache_PutGet(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
key := "test-key"
value := "test-value"
// Put value
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": value,
},
}
body, status, err := putReq.Do(ctx)
if err != nil {
t.Fatalf("put failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d: %s", status, string(body))
}
// Get value
getReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
body, status, err = getReq.Do(ctx)
if err != nil {
t.Fatalf("get failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var getResp map[string]interface{}
if err := DecodeJSON(body, &getResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if getResp["value"] != value {
t.Fatalf("expected value %q, got %v", value, getResp["value"])
}
}
func TestCache_PutGetJSON(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
key := "json-key"
jsonValue := map[string]interface{}{
"name": "John",
"age": 30,
"tags": []string{"developer", "golang"},
}
// Put JSON value
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": jsonValue,
},
}
_, status, err := putReq.Do(ctx)
if err != nil {
t.Fatalf("put failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
// Get JSON value
getReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
body, status, err := getReq.Do(ctx)
if err != nil {
t.Fatalf("get failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var getResp map[string]interface{}
if err := DecodeJSON(body, &getResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
retrievedValue := getResp["value"].(map[string]interface{})
if retrievedValue["name"] != jsonValue["name"] {
t.Fatalf("expected name %q, got %v", jsonValue["name"], retrievedValue["name"])
}
if retrievedValue["age"] != float64(30) {
t.Fatalf("expected age 30, got %v", retrievedValue["age"])
}
}
func TestCache_Delete(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
key := "delete-key"
value := "delete-value"
// Put value
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": value,
},
}
_, status, err := putReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("put failed: status %d, err %v", status, err)
}
// Delete value
deleteReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/delete",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
_, status, err = deleteReq.Do(ctx)
if err != nil {
t.Fatalf("delete failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
// Verify deletion
getReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
_, status, err = getReq.Do(ctx)
// Should get 404 for missing key
if status != http.StatusNotFound {
t.Fatalf("expected status 404 for deleted key, got %d", status)
}
}
func TestCache_TTL(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
key := "ttl-key"
value := "ttl-value"
// Put value with TTL
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": value,
"ttl": "2s",
},
}
_, status, err := putReq.Do(ctx)
if err != nil {
t.Fatalf("put with TTL failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
// Verify value exists
getReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
_, status, err = getReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("get immediately after put failed: status %d, err %v", status, err)
}
// Wait for TTL expiry (2 seconds + buffer)
Delay(2500)
// Verify value is expired
_, status, err = getReq.Do(ctx)
if status != http.StatusNotFound {
t.Logf("warning: TTL expiry may not be fully implemented; got status %d", status)
}
}
func TestCache_Scan(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
// Put multiple keys
keys := []string{"user-1", "user-2", "session-1", "session-2"}
for _, key := range keys {
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": "value-" + key,
},
}
_, status, err := putReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("put failed: status %d, err %v", status, err)
}
}
// Scan all keys
scanReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/scan",
Body: map[string]interface{}{
"dmap": dmap,
},
}
body, status, err := scanReq.Do(ctx)
if err != nil {
t.Fatalf("scan failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var scanResp map[string]interface{}
if err := DecodeJSON(body, &scanResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
keysResp := scanResp["keys"].([]interface{})
if len(keysResp) < 4 {
t.Fatalf("expected at least 4 keys, got %d", len(keysResp))
}
}
func TestCache_ScanWithRegex(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
// Put keys with different patterns
keys := []string{"user-1", "user-2", "session-1", "session-2"}
for _, key := range keys {
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": "value-" + key,
},
}
_, status, err := putReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("put failed: status %d, err %v", status, err)
}
}
// Scan with regex pattern
scanReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/scan",
Body: map[string]interface{}{
"dmap": dmap,
"pattern": "^user-",
},
}
body, status, err := scanReq.Do(ctx)
if err != nil {
t.Fatalf("scan with regex failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var scanResp map[string]interface{}
if err := DecodeJSON(body, &scanResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
keysResp := scanResp["keys"].([]interface{})
if len(keysResp) < 2 {
t.Fatalf("expected at least 2 keys matching pattern, got %d", len(keysResp))
}
}
func TestCache_MultiGet(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
keys := []string{"key-1", "key-2", "key-3"}
// Put values
for i, key := range keys {
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": fmt.Sprintf("value-%d", i),
},
}
_, status, err := putReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("put failed: status %d, err %v", status, err)
}
}
// Multi-get
multiGetReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/mget",
Body: map[string]interface{}{
"dmap": dmap,
"keys": keys,
},
}
body, status, err := multiGetReq.Do(ctx)
if err != nil {
t.Fatalf("mget failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var mgetResp map[string]interface{}
if err := DecodeJSON(body, &mgetResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
results := mgetResp["results"].([]interface{})
if len(results) != 3 {
t.Fatalf("expected 3 results, got %d", len(results))
}
}
func TestCache_MissingDMap(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
getReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": "",
"key": "any-key",
},
}
_, status, err := getReq.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
if status != http.StatusBadRequest {
t.Fatalf("expected status 400 for missing dmap, got %d", status)
}
}
func TestCache_MissingKey(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
getReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": "non-existent-key",
},
}
_, status, err := getReq.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
if status != http.StatusNotFound {
t.Fatalf("expected status 404 for missing key, got %d", status)
}
}

View File

@ -1,93 +0,0 @@
//go:build e2e
package e2e
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/DeBrosOfficial/network/pkg/client"
)
func getenv(k, def string) string {
if v := strings.TrimSpace(os.Getenv(k)); v != "" {
return v
}
return def
}
func requireEnv(t *testing.T, key string) string {
t.Helper()
v := strings.TrimSpace(os.Getenv(key))
if v == "" {
t.Skipf("%s not set; skipping", key)
}
return v
}
func TestClient_Database_CreateQueryMigrate(t *testing.T) {
apiKey := requireEnv(t, "GATEWAY_API_KEY")
namespace := getenv("E2E_CLIENT_NAMESPACE", "default")
cfg := client.DefaultClientConfig(namespace)
cfg.APIKey = apiKey
cfg.QuietMode = true
if v := strings.TrimSpace(os.Getenv("E2E_BOOTSTRAP_PEERS")); v != "" {
parts := strings.Split(v, ",")
var peers []string
for _, p := range parts {
p = strings.TrimSpace(p)
if p != "" {
peers = append(peers, p)
}
}
cfg.BootstrapPeers = peers
}
if v := strings.TrimSpace(os.Getenv("E2E_RQLITE_NODES")); v != "" {
nodes := strings.Fields(strings.ReplaceAll(v, ",", " "))
cfg.DatabaseEndpoints = nodes
}
c, err := client.NewClient(cfg)
if err != nil {
t.Fatalf("new client: %v", err)
}
if err := c.Connect(); err != nil {
t.Fatalf("connect: %v", err)
}
t.Cleanup(func() { _ = c.Disconnect() })
// Unique table per run
table := fmt.Sprintf("e2e_items_client_%d", time.Now().UnixNano())
schema := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)", table)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
if err := c.Database().CreateTable(ctx, schema); err != nil {
t.Fatalf("create table: %v", err)
}
// Insert via transaction
stmts := []string{
fmt.Sprintf("INSERT INTO %s(name) VALUES ('alpha')", table),
fmt.Sprintf("INSERT INTO %s(name) VALUES ('beta')", table),
}
ctx2, cancel2 := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel2()
if err := c.Database().Transaction(ctx2, stmts); err != nil {
t.Fatalf("transaction: %v", err)
}
// Query rows
ctx3, cancel3 := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel3()
res, err := c.Database().Query(ctx3, fmt.Sprintf("SELECT name FROM %s ORDER BY id", table))
if err != nil {
t.Fatalf("query: %v", err)
}
if res.Count < 2 {
t.Fatalf("expected at least 2 rows, got %d", res.Count)
}
}

503
e2e/concurrency_test.go Normal file
View File

@ -0,0 +1,503 @@
//go:build e2e
package e2e
import (
"context"
"fmt"
"net/http"
"sync"
"sync/atomic"
"testing"
"time"
)
// TestCache_ConcurrentWrites tests concurrent cache writes
func TestCache_ConcurrentWrites(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dmap := GenerateDMapName()
numGoroutines := 10
var wg sync.WaitGroup
var errorCount int32
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
key := fmt.Sprintf("key-%d", idx)
value := fmt.Sprintf("value-%d", idx)
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": value,
},
}
_, status, err := putReq.Do(ctx)
if err != nil || status != http.StatusOK {
atomic.AddInt32(&errorCount, 1)
}
}(i)
}
wg.Wait()
if errorCount > 0 {
t.Fatalf("expected no errors, got %d", errorCount)
}
// Verify all values exist
scanReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/scan",
Body: map[string]interface{}{
"dmap": dmap,
},
}
body, status, err := scanReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("scan failed: status %d, err %v", status, err)
}
var scanResp map[string]interface{}
if err := DecodeJSON(body, &scanResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
keys := scanResp["keys"].([]interface{})
if len(keys) < numGoroutines {
t.Fatalf("expected at least %d keys, got %d", numGoroutines, len(keys))
}
}
// TestCache_ConcurrentReads tests concurrent cache reads
func TestCache_ConcurrentReads(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dmap := GenerateDMapName()
key := "shared-key"
value := "shared-value"
// Put value first
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": value,
},
}
_, status, err := putReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("put failed: status %d, err %v", status, err)
}
// Read concurrently
numGoroutines := 10
var wg sync.WaitGroup
var errorCount int32
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
getReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
body, status, err := getReq.Do(ctx)
if err != nil || status != http.StatusOK {
atomic.AddInt32(&errorCount, 1)
return
}
var getResp map[string]interface{}
if err := DecodeJSON(body, &getResp); err != nil {
atomic.AddInt32(&errorCount, 1)
return
}
if getResp["value"] != value {
atomic.AddInt32(&errorCount, 1)
}
}()
}
wg.Wait()
if errorCount > 0 {
t.Fatalf("expected no errors, got %d", errorCount)
}
}
// TestCache_ConcurrentDeleteAndWrite tests concurrent delete and write
func TestCache_ConcurrentDeleteAndWrite(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dmap := GenerateDMapName()
var wg sync.WaitGroup
var errorCount int32
numWrites := 5
numDeletes := 3
// Write keys
for i := 0; i < numWrites; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
key := fmt.Sprintf("key-%d", idx)
value := fmt.Sprintf("value-%d", idx)
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": value,
},
}
_, status, err := putReq.Do(ctx)
if err != nil || status != http.StatusOK {
atomic.AddInt32(&errorCount, 1)
}
}(i)
}
wg.Wait()
// Delete some keys
for i := 0; i < numDeletes; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
key := fmt.Sprintf("key-%d", idx)
deleteReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/delete",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
_, status, err := deleteReq.Do(ctx)
if err != nil || status != http.StatusOK {
atomic.AddInt32(&errorCount, 1)
}
}(i)
}
wg.Wait()
if errorCount > 0 {
t.Fatalf("expected no errors, got %d", errorCount)
}
}
// TestRQLite_ConcurrentInserts tests concurrent database inserts
func TestRQLite_ConcurrentInserts(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
table := GenerateTableName()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)",
table,
)
// Create table
createReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
}
_, status, err := createReq.Do(ctx)
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
t.Fatalf("create table failed: status %d, err %v", status, err)
}
// Insert concurrently
numInserts := 10
var wg sync.WaitGroup
var errorCount int32
for i := 0; i < numInserts; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
txReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, idx),
},
},
}
_, status, err := txReq.Do(ctx)
if err != nil || status != http.StatusOK {
atomic.AddInt32(&errorCount, 1)
}
}(i)
}
wg.Wait()
if errorCount > 0 {
t.Logf("warning: %d concurrent inserts failed", errorCount)
}
// Verify count
queryReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
},
}
body, status, err := queryReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("count query failed: status %d, err %v", status, err)
}
var countResp map[string]interface{}
if err := DecodeJSON(body, &countResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 {
row := rows[0].([]interface{})
count := int(row[0].(float64))
if count < numInserts {
t.Logf("warning: expected %d inserts, got %d", numInserts, count)
}
}
}
// TestRQLite_LargeBatchTransaction tests a large transaction with many statements
func TestRQLite_LargeBatchTransaction(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
table := GenerateTableName()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value TEXT)",
table,
)
// Create table
createReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
}
_, status, err := createReq.Do(ctx)
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
t.Fatalf("create table failed: status %d, err %v", status, err)
}
// Create large batch (100 statements)
var ops []map[string]interface{}
for i := 0; i < 100; i++ {
ops = append(ops, map[string]interface{}{
"kind": "exec",
"sql": fmt.Sprintf("INSERT INTO %s(value) VALUES ('value-%d')", table, i),
})
}
txReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"ops": ops,
},
}
_, status, err = txReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("large batch transaction failed: status %d, err %v", status, err)
}
// Verify count
queryReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
},
}
body, status, err := queryReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("count query failed: status %d, err %v", status, err)
}
var countResp map[string]interface{}
if err := DecodeJSON(body, &countResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 {
row := rows[0].([]interface{})
if int(row[0].(float64)) != 100 {
t.Fatalf("expected 100 rows, got %v", row[0])
}
}
}
// TestCache_TTLExpiryWithSleep tests TTL expiry with a controlled sleep
func TestCache_TTLExpiryWithSleep(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
dmap := GenerateDMapName()
key := "ttl-expiry-key"
value := "ttl-expiry-value"
// Put value with 2 second TTL
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": value,
"ttl": "2s",
},
}
_, status, err := putReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("put with TTL failed: status %d, err %v", status, err)
}
// Verify exists immediately
getReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
_, status, err = getReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("get immediately after put failed: status %d, err %v", status, err)
}
// Sleep for TTL duration + buffer
Delay(2500)
// Try to get after TTL expires
_, status, err = getReq.Do(ctx)
if status == http.StatusOK {
t.Logf("warning: TTL expiry may not be fully implemented; key still exists after TTL")
}
}
// TestCache_ConcurrentWriteAndDelete tests concurrent writes and deletes on same key
func TestCache_ConcurrentWriteAndDelete(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dmap := GenerateDMapName()
key := "contested-key"
// Alternate between writes and deletes
numIterations := 5
for i := 0; i < numIterations; i++ {
// Write
putReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
"value": fmt.Sprintf("value-%d", i),
},
}
_, status, err := putReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("put failed at iteration %d: status %d, err %v", i, status, err)
}
// Read
getReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
_, status, err = getReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("get failed at iteration %d: status %d, err %v", i, status, err)
}
// Delete
deleteReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/delete",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
},
}
_, status, err = deleteReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Logf("warning: delete at iteration %d failed: status %d, err %v", i, status, err)
}
}
}

646
e2e/env.go Normal file
View File

@ -0,0 +1,646 @@
//go:build e2e
package e2e
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"fmt"
"io"
"math/rand"
"net/http"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/DeBrosOfficial/network/pkg/client"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/ipfs"
_ "github.com/mattn/go-sqlite3"
"go.uber.org/zap"
"gopkg.in/yaml.v2"
)
var (
gatewayURLCache string
apiKeyCache string
bootstrapCache []string
rqliteCache []string
ipfsClusterCache string
ipfsAPICache string
cacheMutex sync.RWMutex
)
// loadGatewayConfig loads gateway configuration from ~/.debros/gateway.yaml
func loadGatewayConfig() (map[string]interface{}, error) {
configPath, err := config.DefaultPath("gateway.yaml")
if err != nil {
return nil, fmt.Errorf("failed to get gateway config path: %w", err)
}
data, err := os.ReadFile(configPath)
if err != nil {
return nil, fmt.Errorf("failed to read gateway config: %w", err)
}
var cfg map[string]interface{}
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("failed to parse gateway config: %w", err)
}
return cfg, nil
}
// loadNodeConfig loads node configuration from ~/.debros/node.yaml or bootstrap.yaml
func loadNodeConfig(filename string) (map[string]interface{}, error) {
configPath, err := config.DefaultPath(filename)
if err != nil {
return nil, fmt.Errorf("failed to get config path: %w", err)
}
data, err := os.ReadFile(configPath)
if err != nil {
return nil, fmt.Errorf("failed to read config: %w", err)
}
var cfg map[string]interface{}
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
return cfg, nil
}
// GetGatewayURL returns the gateway base URL from config
func GetGatewayURL() string {
cacheMutex.RLock()
if gatewayURLCache != "" {
defer cacheMutex.RUnlock()
return gatewayURLCache
}
cacheMutex.RUnlock()
// Try to load from gateway config
gwCfg, err := loadGatewayConfig()
if err == nil {
if server, ok := gwCfg["server"].(map[interface{}]interface{}); ok {
if port, ok := server["port"].(int); ok {
url := fmt.Sprintf("http://localhost:%d", port)
cacheMutex.Lock()
gatewayURLCache = url
cacheMutex.Unlock()
return url
}
}
}
// Default fallback
return "http://localhost:6001"
}
// GetRQLiteNodes returns rqlite endpoint addresses from config
func GetRQLiteNodes() []string {
cacheMutex.RLock()
if len(rqliteCache) > 0 {
defer cacheMutex.RUnlock()
return rqliteCache
}
cacheMutex.RUnlock()
// Try bootstrap.yaml first, then all node variants
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
}
if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok {
if rqlitePort, ok := db["rqlite_port"].(int); ok {
nodes := []string{fmt.Sprintf("http://localhost:%d", rqlitePort)}
cacheMutex.Lock()
rqliteCache = nodes
cacheMutex.Unlock()
return nodes
}
}
}
// Default fallback
return []string{"http://localhost:5001"}
}
// queryAPIKeyFromRQLite queries the SQLite database directly for an API key
func queryAPIKeyFromRQLite() (string, error) {
// Build database path from bootstrap/node config
homeDir, err := os.UserHomeDir()
if err != nil {
return "", fmt.Errorf("failed to get home directory: %w", err)
}
// Try bootstrap first, then all nodes
dbPaths := []string{
filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "bootstrap2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node3", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node4", "rqlite", "db.sqlite"),
}
for _, dbPath := range dbPaths {
// Check if database file exists
if _, err := os.Stat(dbPath); err != nil {
continue
}
// Open SQLite database
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
continue
}
defer db.Close()
// Set timeout for connection
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// Query the api_keys table
row := db.QueryRowContext(ctx, "SELECT key FROM api_keys ORDER BY id LIMIT 1")
var apiKey string
if err := row.Scan(&apiKey); err != nil {
if err == sql.ErrNoRows {
continue // Try next database
}
continue // Skip this database on error
}
if apiKey != "" {
return apiKey, nil
}
}
return "", fmt.Errorf("failed to retrieve API key from any SQLite database")
}
// GetAPIKey returns the gateway API key from rqlite or cache
func GetAPIKey() string {
cacheMutex.RLock()
if apiKeyCache != "" {
defer cacheMutex.RUnlock()
return apiKeyCache
}
cacheMutex.RUnlock()
// Query rqlite for API key
apiKey, err := queryAPIKeyFromRQLite()
if err != nil {
return ""
}
cacheMutex.Lock()
apiKeyCache = apiKey
cacheMutex.Unlock()
return apiKey
}
// GetJWT returns the gateway JWT token (currently not auto-discovered)
func GetJWT() string {
return ""
}
// GetBootstrapPeers returns bootstrap peer addresses from config
func GetBootstrapPeers() []string {
cacheMutex.RLock()
if len(bootstrapCache) > 0 {
defer cacheMutex.RUnlock()
return bootstrapCache
}
cacheMutex.RUnlock()
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"}
seen := make(map[string]struct{})
var peers []string
for _, cfgFile := range configFiles {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
}
discovery, ok := nodeCfg["discovery"].(map[interface{}]interface{})
if !ok {
continue
}
rawPeers, ok := discovery["bootstrap_peers"].([]interface{})
if !ok {
continue
}
for _, v := range rawPeers {
peerStr, ok := v.(string)
if !ok || peerStr == "" {
continue
}
if _, exists := seen[peerStr]; exists {
continue
}
seen[peerStr] = struct{}{}
peers = append(peers, peerStr)
}
}
if len(peers) == 0 {
return nil
}
cacheMutex.Lock()
bootstrapCache = peers
cacheMutex.Unlock()
return peers
}
// GetIPFSClusterURL returns the IPFS cluster API URL from config
func GetIPFSClusterURL() string {
cacheMutex.RLock()
if ipfsClusterCache != "" {
defer cacheMutex.RUnlock()
return ipfsClusterCache
}
cacheMutex.RUnlock()
// Try to load from node config
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
}
if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok {
if ipfs, ok := db["ipfs"].(map[interface{}]interface{}); ok {
if url, ok := ipfs["cluster_api_url"].(string); ok && url != "" {
cacheMutex.Lock()
ipfsClusterCache = url
cacheMutex.Unlock()
return url
}
}
}
}
// Default fallback
return "http://localhost:9094"
}
// GetIPFSAPIURL returns the IPFS API URL from config
func GetIPFSAPIURL() string {
cacheMutex.RLock()
if ipfsAPICache != "" {
defer cacheMutex.RUnlock()
return ipfsAPICache
}
cacheMutex.RUnlock()
// Try to load from node config
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
}
if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok {
if ipfs, ok := db["ipfs"].(map[interface{}]interface{}); ok {
if url, ok := ipfs["api_url"].(string); ok && url != "" {
cacheMutex.Lock()
ipfsAPICache = url
cacheMutex.Unlock()
return url
}
}
}
}
// Default fallback
return "http://localhost:5001"
}
// GetClientNamespace returns the test client namespace from config
func GetClientNamespace() string {
// Try to load from node config
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
}
if discovery, ok := nodeCfg["discovery"].(map[interface{}]interface{}); ok {
if ns, ok := discovery["node_namespace"].(string); ok && ns != "" {
return ns
}
}
}
return "default"
}
// SkipIfMissingGateway skips the test if gateway is not accessible or API key not available
func SkipIfMissingGateway(t *testing.T) {
t.Helper()
apiKey := GetAPIKey()
if apiKey == "" {
t.Skip("API key not available from rqlite; gateway tests skipped")
}
// Verify gateway is accessible
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/health", nil)
if err != nil {
t.Skip("Gateway not accessible; tests skipped")
return
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Skip("Gateway not accessible; tests skipped")
return
}
resp.Body.Close()
}
// IsGatewayReady checks if the gateway is accessible and healthy
func IsGatewayReady(ctx context.Context) bool {
gatewayURL := GetGatewayURL()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, gatewayURL+"/v1/health", nil)
if err != nil {
return false
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false
}
defer resp.Body.Close()
return resp.StatusCode == http.StatusOK
}
// NewHTTPClient creates an authenticated HTTP client for gateway requests
func NewHTTPClient(timeout time.Duration) *http.Client {
if timeout == 0 {
timeout = 30 * time.Second
}
return &http.Client{Timeout: timeout}
}
// HTTPRequest is a helper for making authenticated HTTP requests
type HTTPRequest struct {
Method string
URL string
Body interface{}
Headers map[string]string
Timeout time.Duration
SkipAuth bool
}
// Do executes an HTTP request and returns the response body
func (hr *HTTPRequest) Do(ctx context.Context) ([]byte, int, error) {
if hr.Timeout == 0 {
hr.Timeout = 30 * time.Second
}
var reqBody io.Reader
if hr.Body != nil {
data, err := json.Marshal(hr.Body)
if err != nil {
return nil, 0, fmt.Errorf("failed to marshal request body: %w", err)
}
reqBody = bytes.NewReader(data)
}
req, err := http.NewRequestWithContext(ctx, hr.Method, hr.URL, reqBody)
if err != nil {
return nil, 0, fmt.Errorf("failed to create request: %w", err)
}
// Add headers
if hr.Headers != nil {
for k, v := range hr.Headers {
req.Header.Set(k, v)
}
}
// Add JSON content type if body is present
if hr.Body != nil && req.Header.Get("Content-Type") == "" {
req.Header.Set("Content-Type", "application/json")
}
// Add auth headers
if !hr.SkipAuth {
if apiKey := GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
req.Header.Set("X-API-Key", apiKey)
}
}
client := NewHTTPClient(hr.Timeout)
resp, err := client.Do(req)
if err != nil {
return nil, 0, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, resp.StatusCode, fmt.Errorf("failed to read response: %w", err)
}
return respBody, resp.StatusCode, nil
}
// DecodeJSON unmarshals response body into v
func DecodeJSON(data []byte, v interface{}) error {
return json.Unmarshal(data, v)
}
// NewNetworkClient creates a network client configured for e2e tests
func NewNetworkClient(t *testing.T) client.NetworkClient {
t.Helper()
namespace := GetClientNamespace()
cfg := client.DefaultClientConfig(namespace)
cfg.APIKey = GetAPIKey()
cfg.QuietMode = true // Suppress debug logs in tests
if jwt := GetJWT(); jwt != "" {
cfg.JWT = jwt
}
if peers := GetBootstrapPeers(); len(peers) > 0 {
cfg.BootstrapPeers = peers
}
if nodes := GetRQLiteNodes(); len(nodes) > 0 {
cfg.DatabaseEndpoints = nodes
}
c, err := client.NewClient(cfg)
if err != nil {
t.Fatalf("failed to create network client: %v", err)
}
return c
}
// GenerateUniqueID generates a unique identifier for test resources
func GenerateUniqueID(prefix string) string {
return fmt.Sprintf("%s_%d_%d", prefix, time.Now().UnixNano(), rand.Intn(10000))
}
// GenerateTableName generates a unique table name for database tests
func GenerateTableName() string {
return GenerateUniqueID("e2e_test")
}
// GenerateDMapName generates a unique dmap name for cache tests
func GenerateDMapName() string {
return GenerateUniqueID("test_dmap")
}
// GenerateTopic generates a unique topic name for pubsub tests
func GenerateTopic() string {
return GenerateUniqueID("e2e_topic")
}
// Delay pauses execution for the specified duration
func Delay(ms int) {
time.Sleep(time.Duration(ms) * time.Millisecond)
}
// WaitForCondition waits for a condition with exponential backoff
func WaitForCondition(maxWait time.Duration, check func() bool) error {
deadline := time.Now().Add(maxWait)
backoff := 100 * time.Millisecond
for {
if check() {
return nil
}
if time.Now().After(deadline) {
return fmt.Errorf("condition not met within %v", maxWait)
}
time.Sleep(backoff)
if backoff < 2*time.Second {
backoff = backoff * 2
}
}
}
// NewTestLogger creates a test logger for debugging
func NewTestLogger(t *testing.T) *zap.Logger {
t.Helper()
config := zap.NewDevelopmentConfig()
config.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
logger, err := config.Build()
if err != nil {
t.Fatalf("failed to create logger: %v", err)
}
return logger
}
// CleanupDatabaseTable drops a table from the database after tests
func CleanupDatabaseTable(t *testing.T, tableName string) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Query rqlite to drop the table
homeDir, err := os.UserHomeDir()
if err != nil {
t.Logf("warning: failed to get home directory for cleanup: %v", err)
return
}
dbPath := filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite")
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
t.Logf("warning: failed to open database for cleanup: %v", err)
return
}
defer db.Close()
dropSQL := fmt.Sprintf("DROP TABLE IF EXISTS %s", tableName)
if _, err := db.ExecContext(ctx, dropSQL); err != nil {
t.Logf("warning: failed to drop table %s: %v", tableName, err)
}
}
// CleanupDMapCache deletes a dmap from the cache after tests
func CleanupDMapCache(t *testing.T, dmapName string) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodDelete,
URL: GetGatewayURL() + "/v1/cache/dmap/" + dmapName,
Timeout: 10 * time.Second,
}
_, status, err := req.Do(ctx)
if err != nil {
t.Logf("warning: failed to delete dmap %s: %v", dmapName, err)
return
}
if status != http.StatusOK && status != http.StatusNoContent && status != http.StatusNotFound {
t.Logf("warning: delete dmap returned status %d", status)
}
}
// CleanupIPFSFile unpins a file from IPFS after tests
func CleanupIPFSFile(t *testing.T, cid string) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := &ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
client, err := ipfs.NewClient(*cfg, logger)
if err != nil {
t.Logf("warning: failed to create IPFS client for cleanup: %v", err)
return
}
if err := client.Unpin(ctx, cid); err != nil {
t.Logf("warning: failed to unpin file %s: %v", cid, err)
}
}
// CleanupCacheEntry deletes a cache entry after tests
func CleanupCacheEntry(t *testing.T, dmapName, key string) {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodDelete,
URL: GetGatewayURL() + "/v1/cache/dmap/" + dmapName + "/key/" + key,
Timeout: 10 * time.Second,
}
_, status, err := req.Do(ctx)
if err != nil {
t.Logf("warning: failed to delete cache entry: %v", err)
return
}
if status != http.StatusOK && status != http.StatusNoContent && status != http.StatusNotFound {
t.Logf("warning: delete cache entry returned status %d", status)
}
}

View File

@ -1,427 +0,0 @@
//go:build e2e
package e2e
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/gorilla/websocket"
)
func getEnv(key, def string) string {
if v := strings.TrimSpace(os.Getenv(key)); v != "" {
return v
}
return def
}
func requireAPIKey(t *testing.T) string {
t.Helper()
key := strings.TrimSpace(os.Getenv("GATEWAY_API_KEY"))
if key == "" {
t.Skip("GATEWAY_API_KEY not set; skipping gateway auth-required tests")
}
return key
}
func gatewayBaseURL() string {
return getEnv("GATEWAY_BASE_URL", "http://127.0.0.1:6001")
}
func httpClient() *http.Client {
return &http.Client{Timeout: 10 * time.Second}
}
func authHeader(key string) http.Header {
h := http.Header{}
h.Set("Authorization", "Bearer "+key)
h.Set("Content-Type", "application/json")
return h
}
func TestGateway_Health(t *testing.T) {
base := gatewayBaseURL()
resp, err := httpClient().Get(base + "/v1/health")
if err != nil {
t.Fatalf("health request error: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("unexpected status: %d", resp.StatusCode)
}
var body map[string]any
if err := json.NewDecoder(resp.Body).Decode(&body); err != nil {
t.Fatalf("decode: %v", err)
}
if body["status"] != "ok" {
t.Fatalf("status not ok: %+v", body)
}
}
func TestGateway_PubSub_WS_Echo(t *testing.T) {
key := requireAPIKey(t)
base := gatewayBaseURL()
topic := fmt.Sprintf("e2e-ws-%d", time.Now().UnixNano())
wsURL, hdr := toWSURL(base+"/v1/pubsub/ws?topic="+url.QueryEscape(topic)), http.Header{}
hdr.Set("Authorization", "Bearer "+key)
c, _, err := websocket.DefaultDialer.Dial(wsURL, hdr)
if err != nil {
t.Fatalf("ws dial: %v", err)
}
defer c.Close()
defer c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
msg := []byte("hello-ws")
if err := c.WriteMessage(websocket.TextMessage, msg); err != nil {
t.Fatalf("ws write: %v", err)
}
_, data, err := c.ReadMessage()
if err != nil {
t.Fatalf("ws read: %v", err)
}
if string(data) != string(msg) {
t.Fatalf("ws echo mismatch: %q", string(data))
}
}
func TestGateway_PubSub_RestPublishToWS(t *testing.T) {
key := requireAPIKey(t)
base := gatewayBaseURL()
topic := fmt.Sprintf("e2e-rest-%d", time.Now().UnixNano())
wsURL, hdr := toWSURL(base+"/v1/pubsub/ws?topic="+url.QueryEscape(topic)), http.Header{}
hdr.Set("Authorization", "Bearer "+key)
c, _, err := websocket.DefaultDialer.Dial(wsURL, hdr)
if err != nil {
t.Fatalf("ws dial: %v", err)
}
defer c.Close()
// Publish via REST
payload := randomBytes(24)
b64 := base64.StdEncoding.EncodeToString(payload)
body := fmt.Sprintf(`{"topic":"%s","data_base64":"%s"}`, topic, b64)
req, _ := http.NewRequest(http.MethodPost, base+"/v1/pubsub/publish", strings.NewReader(body))
req.Header = authHeader(key)
resp, err := httpClient().Do(req)
if err != nil {
t.Fatalf("publish do: %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("publish status: %d", resp.StatusCode)
}
// Expect the message via WS
_ = c.SetReadDeadline(time.Now().Add(5 * time.Second))
_, data, err := c.ReadMessage()
if err != nil {
t.Fatalf("ws read: %v", err)
}
if string(data) != string(payload) {
t.Fatalf("payload mismatch: %q != %q", string(data), string(payload))
}
// Topics list should include our topic (without namespace prefix)
req2, _ := http.NewRequest(http.MethodGet, base+"/v1/pubsub/topics", nil)
req2.Header = authHeader(key)
resp2, err := httpClient().Do(req2)
if err != nil {
t.Fatalf("topics do: %v", err)
}
defer resp2.Body.Close()
if resp2.StatusCode != http.StatusOK {
t.Fatalf("topics status: %d", resp2.StatusCode)
}
var tlist struct {
Topics []string `json:"topics"`
}
if err := json.NewDecoder(resp2.Body).Decode(&tlist); err != nil {
t.Fatalf("topics decode: %v", err)
}
found := false
for _, tt := range tlist.Topics {
if tt == topic {
found = true
break
}
}
if !found {
t.Fatalf("topic %s not found in topics list", topic)
}
}
func TestGateway_Database_CreateQueryMigrate(t *testing.T) {
key := requireAPIKey(t)
base := gatewayBaseURL()
// Create table
schema := `CREATE TABLE IF NOT EXISTS e2e_items (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)`
body := fmt.Sprintf(`{"schema":%q}`, schema)
req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body))
req.Header = authHeader(key)
resp, err := httpClient().Do(req)
if err != nil {
t.Fatalf("create-table do: %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
t.Fatalf("create-table status: %d", resp.StatusCode)
}
// Insert via transaction (simulate migration/data seed)
txBody := `{"statements":["INSERT INTO e2e_items(name) VALUES ('one')","INSERT INTO e2e_items(name) VALUES ('two')"]}`
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txBody))
req.Header = authHeader(key)
resp, err = httpClient().Do(req)
if err != nil {
t.Fatalf("tx do: %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("tx status: %d", resp.StatusCode)
}
// Query rows
qBody := `{"sql":"SELECT name FROM e2e_items ORDER BY id ASC"}`
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody))
req.Header = authHeader(key)
resp, err = httpClient().Do(req)
if err != nil {
t.Fatalf("query do: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("query status: %d", resp.StatusCode)
}
var qr struct {
Columns []string `json:"columns"`
Rows [][]any `json:"rows"`
Count int `json:"count"`
}
if err := json.NewDecoder(resp.Body).Decode(&qr); err != nil {
t.Fatalf("query decode: %v", err)
}
if qr.Count < 2 {
t.Fatalf("expected at least 2 rows, got %d", qr.Count)
}
// Schema endpoint returns tables
req, _ = http.NewRequest(http.MethodGet, base+"/v1/rqlite/schema", nil)
req.Header = authHeader(key)
resp2, err := httpClient().Do(req)
if err != nil {
t.Fatalf("schema do: %v", err)
}
defer resp2.Body.Close()
if resp2.StatusCode != http.StatusOK {
t.Fatalf("schema status: %d", resp2.StatusCode)
}
}
func TestGateway_Database_DropTable(t *testing.T) {
key := requireAPIKey(t)
base := gatewayBaseURL()
table := fmt.Sprintf("e2e_tmp_%d", time.Now().UnixNano())
schema := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)", table)
// create
body := fmt.Sprintf(`{"schema":%q}`, schema)
req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body))
req.Header = authHeader(key)
resp, err := httpClient().Do(req)
if err != nil {
t.Fatalf("create-table do: %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
t.Fatalf("create-table status: %d", resp.StatusCode)
}
// drop
dbody := fmt.Sprintf(`{"table":%q}`, table)
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/drop-table", strings.NewReader(dbody))
req.Header = authHeader(key)
resp, err = httpClient().Do(req)
if err != nil {
t.Fatalf("drop-table do: %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("drop-table status: %d", resp.StatusCode)
}
// verify not in schema
req, _ = http.NewRequest(http.MethodGet, base+"/v1/rqlite/schema", nil)
req.Header = authHeader(key)
resp2, err := httpClient().Do(req)
if err != nil {
t.Fatalf("schema do: %v", err)
}
defer resp2.Body.Close()
if resp2.StatusCode != http.StatusOK {
t.Fatalf("schema status: %d", resp2.StatusCode)
}
var schemaResp struct {
Tables []struct {
Name string `json:"name"`
} `json:"tables"`
}
if err := json.NewDecoder(resp2.Body).Decode(&schemaResp); err != nil {
t.Fatalf("schema decode: %v", err)
}
for _, tbl := range schemaResp.Tables {
if tbl.Name == table {
t.Fatalf("table %s still present after drop", table)
}
}
}
func TestGateway_Database_RecreateWithFK(t *testing.T) {
key := requireAPIKey(t)
base := gatewayBaseURL()
// base tables
orgs := fmt.Sprintf("e2e_orgs_%d", time.Now().UnixNano())
users := fmt.Sprintf("e2e_users_%d", time.Now().UnixNano())
createOrgs := fmt.Sprintf(`{"schema":%q}`, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)", orgs))
createUsers := fmt.Sprintf(`{"schema":%q}`, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)", users))
for _, body := range []string{createOrgs, createUsers} {
req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body))
req.Header = authHeader(key)
resp, err := httpClient().Do(req)
if err != nil {
t.Fatalf("create-table do: %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
t.Fatalf("create-table status: %d", resp.StatusCode)
}
}
// seed data
txSeed := fmt.Sprintf(`{"statements":["INSERT INTO %s(id,name) VALUES (1,'org')","INSERT INTO %s(id,name,org_id,age) VALUES (1,'alice',1,'30')"]}`, orgs, users)
req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txSeed))
req.Header = authHeader(key)
resp, err := httpClient().Do(req)
if err != nil {
t.Fatalf("seed tx do: %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("seed tx status: %d", resp.StatusCode)
}
// migrate: change users.age TEXT -> INTEGER and add FK to orgs(id)
// Note: Some backends may not support connection-scoped BEGIN/COMMIT or PRAGMA via HTTP.
// We apply the standard recreate pattern without explicit PRAGMAs/transaction.
txMig := fmt.Sprintf(`{"statements":[
"CREATE TABLE %s_new (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age INTEGER, FOREIGN KEY(org_id) REFERENCES %s(id) ON DELETE CASCADE)",
"INSERT INTO %s_new (id,name,org_id,age) SELECT id,name,org_id, CAST(age AS INTEGER) FROM %s",
"DROP TABLE %s",
"ALTER TABLE %s_new RENAME TO %s"
]}`, users, orgs, users, users, users, users, users)
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txMig))
req.Header = authHeader(key)
resp, err = httpClient().Do(req)
if err != nil {
t.Fatalf("mig tx do: %v", err)
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("mig tx status: %d", resp.StatusCode)
}
// verify schema type change
qBody := fmt.Sprintf(`{"sql":"PRAGMA table_info(%s)"}`, users)
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody))
req.Header = authHeader(key)
resp, err = httpClient().Do(req)
if err != nil {
t.Fatalf("pragma do: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("pragma status: %d", resp.StatusCode)
}
var qr struct {
Columns []string `json:"columns"`
Rows [][]any `json:"rows"`
}
if err := json.NewDecoder(resp.Body).Decode(&qr); err != nil {
t.Fatalf("pragma decode: %v", err)
}
// column order: cid,name,type,notnull,dflt_value,pk
ageIsInt := false
for _, row := range qr.Rows {
if len(row) >= 3 && fmt.Sprintf("%v", row[1]) == "age" {
tstr := strings.ToUpper(fmt.Sprintf("%v", row[2]))
if strings.Contains(tstr, "INT") {
ageIsInt = true
break
}
}
}
if !ageIsInt {
// Fallback: inspect CREATE TABLE SQL from sqlite_master
qBody2 := fmt.Sprintf(`{"sql":"SELECT sql FROM sqlite_master WHERE type='table' AND name='%s'"}`, users)
req2, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody2))
req2.Header = authHeader(key)
resp3, err := httpClient().Do(req2)
if err != nil {
t.Fatalf("sqlite_master do: %v", err)
}
defer resp3.Body.Close()
if resp3.StatusCode != http.StatusOK {
t.Fatalf("sqlite_master status: %d", resp3.StatusCode)
}
var qr2 struct {
Rows [][]any `json:"rows"`
}
if err := json.NewDecoder(resp3.Body).Decode(&qr2); err != nil {
t.Fatalf("sqlite_master decode: %v", err)
}
found := false
for _, row := range qr2.Rows {
if len(row) > 0 {
sql := strings.ToUpper(fmt.Sprintf("%v", row[0]))
if strings.Contains(sql, "AGE INT") || strings.Contains(sql, "AGE INTEGER") {
found = true
break
}
}
}
if !found {
t.Fatalf("age column type not INTEGER after migration")
}
}
}
func toWSURL(httpURL string) string {
u, err := url.Parse(httpURL)
if err != nil {
return httpURL
}
if u.Scheme == "https" {
u.Scheme = "wss"
} else {
u.Scheme = "ws"
}
return u.String()
}
func randomBytes(n int) []byte {
b := make([]byte, n)
_, _ = rand.Read(b)
return b
}

400
e2e/ipfs_cluster_test.go Normal file
View File

@ -0,0 +1,400 @@
//go:build e2e
package e2e
import (
"bytes"
"context"
"fmt"
"io"
"testing"
"time"
"github.com/DeBrosOfficial/network/pkg/ipfs"
)
func TestIPFSCluster_Health(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 10 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
err = client.Health(ctx)
if err != nil {
t.Fatalf("health check failed: %v", err)
}
}
func TestIPFSCluster_GetPeerCount(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 10 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
peerCount, err := client.GetPeerCount(ctx)
if err != nil {
t.Fatalf("get peer count failed: %v", err)
}
if peerCount < 0 {
t.Fatalf("expected non-negative peer count, got %d", peerCount)
}
t.Logf("IPFS cluster peers: %d", peerCount)
}
func TestIPFSCluster_AddFile(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
content := []byte("IPFS cluster test content")
result, err := client.Add(ctx, bytes.NewReader(content), "test.txt")
if err != nil {
t.Fatalf("add file failed: %v", err)
}
if result.Cid == "" {
t.Fatalf("expected non-empty CID")
}
if result.Size != int64(len(content)) {
t.Fatalf("expected size %d, got %d", len(content), result.Size)
}
t.Logf("Added file with CID: %s", result.Cid)
}
func TestIPFSCluster_PinFile(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
// Add file first
content := []byte("IPFS pin test content")
addResult, err := client.Add(ctx, bytes.NewReader(content), "pin-test.txt")
if err != nil {
t.Fatalf("add file failed: %v", err)
}
cid := addResult.Cid
// Pin the file
pinResult, err := client.Pin(ctx, cid, "pinned-file", 1)
if err != nil {
t.Fatalf("pin file failed: %v", err)
}
if pinResult.Cid != cid {
t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid)
}
t.Logf("Pinned file: %s", cid)
}
func TestIPFSCluster_PinStatus(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
// Add and pin file
content := []byte("IPFS status test content")
addResult, err := client.Add(ctx, bytes.NewReader(content), "status-test.txt")
if err != nil {
t.Fatalf("add file failed: %v", err)
}
cid := addResult.Cid
pinResult, err := client.Pin(ctx, cid, "status-test", 1)
if err != nil {
t.Fatalf("pin file failed: %v", err)
}
if pinResult.Cid != cid {
t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid)
}
// Give pin time to propagate
Delay(1000)
// Get status
status, err := client.PinStatus(ctx, cid)
if err != nil {
t.Fatalf("get pin status failed: %v", err)
}
if status.Cid != cid {
t.Fatalf("expected cid %s, got %s", cid, status.Cid)
}
if status.Name != "status-test" {
t.Fatalf("expected name 'status-test', got %s", status.Name)
}
if status.ReplicationFactor < 1 {
t.Logf("warning: replication factor is %d, expected >= 1", status.ReplicationFactor)
}
t.Logf("Pin status: %s (replication: %d, peers: %d)", status.Status, status.ReplicationFactor, len(status.Peers))
}
func TestIPFSCluster_UnpinFile(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
// Add and pin file
content := []byte("IPFS unpin test content")
addResult, err := client.Add(ctx, bytes.NewReader(content), "unpin-test.txt")
if err != nil {
t.Fatalf("add file failed: %v", err)
}
cid := addResult.Cid
_, err = client.Pin(ctx, cid, "unpin-test", 1)
if err != nil {
t.Fatalf("pin file failed: %v", err)
}
// Unpin file
err = client.Unpin(ctx, cid)
if err != nil {
t.Fatalf("unpin file failed: %v", err)
}
t.Logf("Unpinned file: %s", cid)
}
func TestIPFSCluster_GetFile(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
// Add file
content := []byte("IPFS get test content")
addResult, err := client.Add(ctx, bytes.NewReader(content), "get-test.txt")
if err != nil {
t.Fatalf("add file failed: %v", err)
}
cid := addResult.Cid
// Give time for propagation
Delay(1000)
// Get file
rc, err := client.Get(ctx, cid, GetIPFSAPIURL())
if err != nil {
t.Fatalf("get file failed: %v", err)
}
defer rc.Close()
retrievedContent, err := io.ReadAll(rc)
if err != nil {
t.Fatalf("failed to read content: %v", err)
}
if !bytes.Equal(retrievedContent, content) {
t.Fatalf("content mismatch: expected %q, got %q", string(content), string(retrievedContent))
}
t.Logf("Retrieved file: %s (%d bytes)", cid, len(retrievedContent))
}
func TestIPFSCluster_LargeFile(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 60 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
// Create 5MB file
content := bytes.Repeat([]byte("x"), 5*1024*1024)
result, err := client.Add(ctx, bytes.NewReader(content), "large.bin")
if err != nil {
t.Fatalf("add large file failed: %v", err)
}
if result.Cid == "" {
t.Fatalf("expected non-empty CID")
}
if result.Size != int64(len(content)) {
t.Fatalf("expected size %d, got %d", len(content), result.Size)
}
t.Logf("Added large file with CID: %s (%d bytes)", result.Cid, result.Size)
}
func TestIPFSCluster_ReplicationFactor(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
// Add file
content := []byte("IPFS replication test content")
addResult, err := client.Add(ctx, bytes.NewReader(content), "replication-test.txt")
if err != nil {
t.Fatalf("add file failed: %v", err)
}
cid := addResult.Cid
// Pin with specific replication factor
replicationFactor := 2
pinResult, err := client.Pin(ctx, cid, "replication-test", replicationFactor)
if err != nil {
t.Fatalf("pin file failed: %v", err)
}
if pinResult.Cid != cid {
t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid)
}
// Give time for replication
Delay(2000)
// Check status
status, err := client.PinStatus(ctx, cid)
if err != nil {
t.Fatalf("get pin status failed: %v", err)
}
t.Logf("Replication factor: requested=%d, actual=%d, peers=%d", replicationFactor, status.ReplicationFactor, len(status.Peers))
}
func TestIPFSCluster_MultipleFiles(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
logger := NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
client, err := ipfs.NewClient(cfg, logger)
if err != nil {
t.Fatalf("failed to create IPFS client: %v", err)
}
// Add multiple files
numFiles := 5
var cids []string
for i := 0; i < numFiles; i++ {
content := []byte(fmt.Sprintf("File %d", i))
result, err := client.Add(ctx, bytes.NewReader(content), fmt.Sprintf("file%d.txt", i))
if err != nil {
t.Fatalf("add file %d failed: %v", i, err)
}
cids = append(cids, result.Cid)
}
if len(cids) != numFiles {
t.Fatalf("expected %d files added, got %d", numFiles, len(cids))
}
// Verify all files exist
for i, cid := range cids {
status, err := client.PinStatus(ctx, cid)
if err != nil {
t.Logf("warning: failed to get status for file %d: %v", i, err)
continue
}
if status.Cid != cid {
t.Fatalf("expected cid %s, got %s", cid, status.Cid)
}
}
t.Logf("Successfully added and verified %d files", numFiles)
}

View File

@ -0,0 +1,294 @@
//go:build e2e
package e2e
import (
"context"
"net/http"
"strings"
"testing"
"time"
)
func TestLibP2P_PeerConnectivity(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create and connect client
c := NewNetworkClient(t)
if err := c.Connect(); err != nil {
t.Fatalf("connect failed: %v", err)
}
defer c.Disconnect()
// Verify peer connectivity through the gateway
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("peers request failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
peers := resp["peers"].([]interface{})
if len(peers) == 0 {
t.Logf("warning: no peers connected (cluster may still be initializing)")
}
}
func TestLibP2P_BootstrapPeers(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
bootstrapPeers := GetBootstrapPeers()
if len(bootstrapPeers) == 0 {
t.Skipf("E2E_BOOTSTRAP_PEERS not set; skipping")
}
// Create client with bootstrap peers explicitly set
c := NewNetworkClient(t)
if err := c.Connect(); err != nil {
t.Fatalf("connect failed: %v", err)
}
defer c.Disconnect()
// Give peer discovery time
Delay(2000)
// Verify we're connected (check via gateway status)
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("status request failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if resp["connected"] != true {
t.Logf("warning: client not connected to network (cluster may still be initializing)")
}
}
func TestLibP2P_MultipleClientConnections(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create multiple clients
c1 := NewNetworkClient(t)
c2 := NewNetworkClient(t)
c3 := NewNetworkClient(t)
if err := c1.Connect(); err != nil {
t.Fatalf("c1 connect failed: %v", err)
}
defer c1.Disconnect()
if err := c2.Connect(); err != nil {
t.Fatalf("c2 connect failed: %v", err)
}
defer c2.Disconnect()
if err := c3.Connect(); err != nil {
t.Fatalf("c3 connect failed: %v", err)
}
defer c3.Disconnect()
// Give peer discovery time
Delay(2000)
// Verify gateway sees multiple peers
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("peers request failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
peers := resp["peers"].([]interface{})
if len(peers) < 1 {
t.Logf("warning: expected at least 1 peer, got %d", len(peers))
}
}
func TestLibP2P_ReconnectAfterDisconnect(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
c := NewNetworkClient(t)
// Connect
if err := c.Connect(); err != nil {
t.Fatalf("connect failed: %v", err)
}
// Verify connected via gateway
req1 := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
}
_, status1, err := req1.Do(ctx)
if err != nil || status1 != http.StatusOK {
t.Logf("warning: gateway check failed before disconnect: status %d, err %v", status1, err)
}
// Disconnect
if err := c.Disconnect(); err != nil {
t.Logf("warning: disconnect failed: %v", err)
}
// Give time for disconnect to propagate
Delay(500)
// Reconnect
if err := c.Connect(); err != nil {
t.Fatalf("reconnect failed: %v", err)
}
defer c.Disconnect()
// Verify connected via gateway again
req2 := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
}
_, status2, err := req2.Do(ctx)
if err != nil || status2 != http.StatusOK {
t.Logf("warning: gateway check failed after reconnect: status %d, err %v", status2, err)
}
}
func TestLibP2P_PeerDiscovery(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create client
c := NewNetworkClient(t)
if err := c.Connect(); err != nil {
t.Fatalf("connect failed: %v", err)
}
defer c.Disconnect()
// Give peer discovery time
Delay(3000)
// Get peer list
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("peers request failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
peers := resp["peers"].([]interface{})
if len(peers) == 0 {
t.Logf("warning: no peers discovered (cluster may not have multiple nodes)")
} else {
// Verify peer format (should be multiaddr strings)
for _, p := range peers {
peerStr := p.(string)
if !strings.Contains(peerStr, "/p2p/") && !strings.Contains(peerStr, "/ipfs/") {
t.Logf("warning: unexpected peer format: %s", peerStr)
}
}
}
}
func TestLibP2P_PeerAddressFormat(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create client
c := NewNetworkClient(t)
if err := c.Connect(); err != nil {
t.Fatalf("connect failed: %v", err)
}
defer c.Disconnect()
// Get peer list
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("peers request failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
peers := resp["peers"].([]interface{})
for _, p := range peers {
peerStr := p.(string)
// Multiaddrs should start with /
if !strings.HasPrefix(peerStr, "/") {
t.Fatalf("expected multiaddr format, got %s", peerStr)
}
}
}

223
e2e/network_http_test.go Normal file
View File

@ -0,0 +1,223 @@
//go:build e2e
package e2e
import (
"context"
"net/http"
"testing"
"time"
)
func TestNetwork_Health(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/health",
SkipAuth: true,
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("health check failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if resp["status"] != "ok" {
t.Fatalf("expected status 'ok', got %v", resp["status"])
}
}
func TestNetwork_Status(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("status check failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if _, ok := resp["connected"]; !ok {
t.Fatalf("expected 'connected' field in response")
}
if _, ok := resp["peer_count"]; !ok {
t.Fatalf("expected 'peer_count' field in response")
}
}
func TestNetwork_Peers(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("peers check failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if _, ok := resp["peers"]; !ok {
t.Fatalf("expected 'peers' field in response")
}
}
func TestNetwork_ProxyAnonSuccess(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/proxy/anon",
Body: map[string]interface{}{
"url": "https://httpbin.org/get",
"method": "GET",
"headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"},
},
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("proxy anon request failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d: %s", status, string(body))
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if resp["status_code"] != float64(200) {
t.Fatalf("expected proxy status 200, got %v", resp["status_code"])
}
if _, ok := resp["body"]; !ok {
t.Fatalf("expected 'body' field in response")
}
}
func TestNetwork_ProxyAnonBadURL(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/proxy/anon",
Body: map[string]interface{}{
"url": "http://localhost:1/nonexistent",
"method": "GET",
},
}
_, status, err := req.Do(ctx)
if err == nil && status == http.StatusOK {
t.Fatalf("expected error for bad URL, got status 200")
}
}
func TestNetwork_ProxyAnonPostRequest(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/proxy/anon",
Body: map[string]interface{}{
"url": "https://httpbin.org/post",
"method": "POST",
"headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"},
"body": "test_data",
},
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("proxy anon POST failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d: %s", status, string(body))
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if resp["status_code"] != float64(200) {
t.Fatalf("expected proxy status 200, got %v", resp["status_code"])
}
}
func TestNetwork_Unauthorized(t *testing.T) {
// Test without API key
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Create request without auth
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
if status != http.StatusUnauthorized && status != http.StatusForbidden {
t.Logf("warning: expected 401/403, got %d (auth may not be enforced on this endpoint)", status)
}
}

421
e2e/pubsub_client_test.go Normal file
View File

@ -0,0 +1,421 @@
//go:build e2e
package e2e
import (
"context"
"fmt"
"sync"
"testing"
"time"
)
func newMessageCollector(ctx context.Context, buffer int) (chan []byte, func(string, []byte) error) {
if buffer <= 0 {
buffer = 1
}
ch := make(chan []byte, buffer)
handler := func(_ string, data []byte) error {
copied := append([]byte(nil), data...)
select {
case ch <- copied:
case <-ctx.Done():
}
return nil
}
return ch, handler
}
func waitForMessage(ctx context.Context, ch <-chan []byte) ([]byte, error) {
select {
case msg := <-ch:
return msg, nil
case <-ctx.Done():
return nil, fmt.Errorf("context finished while waiting for pubsub message: %w", ctx.Err())
}
}
func TestPubSub_SubscribePublish(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create two clients
client1 := NewNetworkClient(t)
client2 := NewNetworkClient(t)
if err := client1.Connect(); err != nil {
t.Fatalf("client1 connect failed: %v", err)
}
defer client1.Disconnect()
if err := client2.Connect(); err != nil {
t.Fatalf("client2 connect failed: %v", err)
}
defer client2.Disconnect()
topic := GenerateTopic()
message := "test-message-from-client1"
// Subscribe on client2
messageCh, handler := newMessageCollector(ctx, 1)
if err := client2.PubSub().Subscribe(ctx, topic, handler); err != nil {
t.Fatalf("subscribe failed: %v", err)
}
defer client2.PubSub().Unsubscribe(ctx, topic)
// Give subscription time to propagate and mesh to form
Delay(2000)
// Publish from client1
if err := client1.PubSub().Publish(ctx, topic, []byte(message)); err != nil {
t.Fatalf("publish failed: %v", err)
}
// Receive message on client2
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
defer recvCancel()
msg, err := waitForMessage(recvCtx, messageCh)
if err != nil {
t.Fatalf("receive failed: %v", err)
}
if string(msg) != message {
t.Fatalf("expected message %q, got %q", message, string(msg))
}
}
func TestPubSub_MultipleSubscribers(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create three clients
clientPub := NewNetworkClient(t)
clientSub1 := NewNetworkClient(t)
clientSub2 := NewNetworkClient(t)
if err := clientPub.Connect(); err != nil {
t.Fatalf("publisher connect failed: %v", err)
}
defer clientPub.Disconnect()
if err := clientSub1.Connect(); err != nil {
t.Fatalf("subscriber1 connect failed: %v", err)
}
defer clientSub1.Disconnect()
if err := clientSub2.Connect(); err != nil {
t.Fatalf("subscriber2 connect failed: %v", err)
}
defer clientSub2.Disconnect()
topic := GenerateTopic()
message1 := "message-for-sub1"
message2 := "message-for-sub2"
// Subscribe on both clients
sub1Ch, sub1Handler := newMessageCollector(ctx, 4)
if err := clientSub1.PubSub().Subscribe(ctx, topic, sub1Handler); err != nil {
t.Fatalf("subscribe1 failed: %v", err)
}
defer clientSub1.PubSub().Unsubscribe(ctx, topic)
sub2Ch, sub2Handler := newMessageCollector(ctx, 4)
if err := clientSub2.PubSub().Subscribe(ctx, topic, sub2Handler); err != nil {
t.Fatalf("subscribe2 failed: %v", err)
}
defer clientSub2.PubSub().Unsubscribe(ctx, topic)
// Give subscriptions time to propagate
Delay(500)
// Publish first message
if err := clientPub.PubSub().Publish(ctx, topic, []byte(message1)); err != nil {
t.Fatalf("publish1 failed: %v", err)
}
// Both subscribers should receive first message
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
defer recvCancel()
msg1a, err := waitForMessage(recvCtx, sub1Ch)
if err != nil {
t.Fatalf("sub1 receive1 failed: %v", err)
}
if string(msg1a) != message1 {
t.Fatalf("sub1: expected %q, got %q", message1, string(msg1a))
}
msg1b, err := waitForMessage(recvCtx, sub2Ch)
if err != nil {
t.Fatalf("sub2 receive1 failed: %v", err)
}
if string(msg1b) != message1 {
t.Fatalf("sub2: expected %q, got %q", message1, string(msg1b))
}
// Publish second message
if err := clientPub.PubSub().Publish(ctx, topic, []byte(message2)); err != nil {
t.Fatalf("publish2 failed: %v", err)
}
// Both subscribers should receive second message
recvCtx2, recvCancel2 := context.WithTimeout(ctx, 10*time.Second)
defer recvCancel2()
msg2a, err := waitForMessage(recvCtx2, sub1Ch)
if err != nil {
t.Fatalf("sub1 receive2 failed: %v", err)
}
if string(msg2a) != message2 {
t.Fatalf("sub1: expected %q, got %q", message2, string(msg2a))
}
msg2b, err := waitForMessage(recvCtx2, sub2Ch)
if err != nil {
t.Fatalf("sub2 receive2 failed: %v", err)
}
if string(msg2b) != message2 {
t.Fatalf("sub2: expected %q, got %q", message2, string(msg2b))
}
}
func TestPubSub_Deduplication(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create two clients
clientPub := NewNetworkClient(t)
clientSub := NewNetworkClient(t)
if err := clientPub.Connect(); err != nil {
t.Fatalf("publisher connect failed: %v", err)
}
defer clientPub.Disconnect()
if err := clientSub.Connect(); err != nil {
t.Fatalf("subscriber connect failed: %v", err)
}
defer clientSub.Disconnect()
topic := GenerateTopic()
message := "duplicate-test-message"
// Subscribe on client
messageCh, handler := newMessageCollector(ctx, 3)
if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil {
t.Fatalf("subscribe failed: %v", err)
}
defer clientSub.PubSub().Unsubscribe(ctx, topic)
// Give subscription time to propagate and mesh to form
Delay(2000)
// Publish the same message multiple times
for i := 0; i < 3; i++ {
if err := clientPub.PubSub().Publish(ctx, topic, []byte(message)); err != nil {
t.Fatalf("publish %d failed: %v", i, err)
}
}
// Receive messages - should get all (no dedup filter on subscribe)
recvCtx, recvCancel := context.WithTimeout(ctx, 5*time.Second)
defer recvCancel()
receivedCount := 0
for receivedCount < 3 {
if _, err := waitForMessage(recvCtx, messageCh); err != nil {
break
}
receivedCount++
}
if receivedCount < 1 {
t.Fatalf("expected to receive at least 1 message, got %d", receivedCount)
}
}
func TestPubSub_ConcurrentPublish(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create clients
clientPub := NewNetworkClient(t)
clientSub := NewNetworkClient(t)
if err := clientPub.Connect(); err != nil {
t.Fatalf("publisher connect failed: %v", err)
}
defer clientPub.Disconnect()
if err := clientSub.Connect(); err != nil {
t.Fatalf("subscriber connect failed: %v", err)
}
defer clientSub.Disconnect()
topic := GenerateTopic()
numMessages := 10
// Subscribe
messageCh, handler := newMessageCollector(ctx, numMessages)
if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil {
t.Fatalf("subscribe failed: %v", err)
}
defer clientSub.PubSub().Unsubscribe(ctx, topic)
// Give subscription time to propagate and mesh to form
Delay(2000)
// Publish multiple messages concurrently
var wg sync.WaitGroup
for i := 0; i < numMessages; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
msg := fmt.Sprintf("concurrent-msg-%d", idx)
if err := clientPub.PubSub().Publish(ctx, topic, []byte(msg)); err != nil {
t.Logf("publish %d failed: %v", idx, err)
}
}(i)
}
wg.Wait()
// Receive messages
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
defer recvCancel()
receivedCount := 0
for receivedCount < numMessages {
if _, err := waitForMessage(recvCtx, messageCh); err != nil {
break
}
receivedCount++
}
if receivedCount < numMessages {
t.Logf("expected %d messages, got %d (some may have been dropped)", numMessages, receivedCount)
}
}
func TestPubSub_TopicIsolation(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create clients
clientPub := NewNetworkClient(t)
clientSub := NewNetworkClient(t)
if err := clientPub.Connect(); err != nil {
t.Fatalf("publisher connect failed: %v", err)
}
defer clientPub.Disconnect()
if err := clientSub.Connect(); err != nil {
t.Fatalf("subscriber connect failed: %v", err)
}
defer clientSub.Disconnect()
topic1 := GenerateTopic()
topic2 := GenerateTopic()
// Subscribe to topic1
messageCh, handler := newMessageCollector(ctx, 2)
if err := clientSub.PubSub().Subscribe(ctx, topic1, handler); err != nil {
t.Fatalf("subscribe1 failed: %v", err)
}
defer clientSub.PubSub().Unsubscribe(ctx, topic1)
// Give subscription time to propagate and mesh to form
Delay(2000)
// Publish to topic2
msg2 := "message-on-topic2"
if err := clientPub.PubSub().Publish(ctx, topic2, []byte(msg2)); err != nil {
t.Fatalf("publish2 failed: %v", err)
}
// Publish to topic1
msg1 := "message-on-topic1"
if err := clientPub.PubSub().Publish(ctx, topic1, []byte(msg1)); err != nil {
t.Fatalf("publish1 failed: %v", err)
}
// Receive on sub1 - should get msg1 only
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
defer recvCancel()
msg, err := waitForMessage(recvCtx, messageCh)
if err != nil {
t.Fatalf("receive failed: %v", err)
}
if string(msg) != msg1 {
t.Fatalf("expected %q, got %q", msg1, string(msg))
}
}
func TestPubSub_EmptyMessage(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create clients
clientPub := NewNetworkClient(t)
clientSub := NewNetworkClient(t)
if err := clientPub.Connect(); err != nil {
t.Fatalf("publisher connect failed: %v", err)
}
defer clientPub.Disconnect()
if err := clientSub.Connect(); err != nil {
t.Fatalf("subscriber connect failed: %v", err)
}
defer clientSub.Disconnect()
topic := GenerateTopic()
// Subscribe
messageCh, handler := newMessageCollector(ctx, 1)
if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil {
t.Fatalf("subscribe failed: %v", err)
}
defer clientSub.PubSub().Unsubscribe(ctx, topic)
// Give subscription time to propagate and mesh to form
Delay(2000)
// Publish empty message
if err := clientPub.PubSub().Publish(ctx, topic, []byte("")); err != nil {
t.Fatalf("publish empty failed: %v", err)
}
// Receive on sub - should get empty message
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
defer recvCancel()
msg, err := waitForMessage(recvCtx, messageCh)
if err != nil {
t.Fatalf("receive failed: %v", err)
}
if len(msg) != 0 {
t.Fatalf("expected empty message, got %q", string(msg))
}
}

446
e2e/rqlite_http_test.go Normal file
View File

@ -0,0 +1,446 @@
//go:build e2e
package e2e
import (
"context"
"fmt"
"net/http"
"testing"
"time"
)
func TestRQLite_CreateTable(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
table := GenerateTableName()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)",
table,
)
req := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("create table request failed: %v", err)
}
if status != http.StatusCreated && status != http.StatusOK {
t.Fatalf("expected status 201 or 200, got %d: %s", status, string(body))
}
}
func TestRQLite_InsertQuery(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
table := GenerateTableName()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)",
table,
)
// Create table
createReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
}
_, status, err := createReq.Do(ctx)
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
t.Fatalf("create table failed: status %d, err %v", status, err)
}
// Insert rows
insertReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s(name) VALUES ('alice')", table),
fmt.Sprintf("INSERT INTO %s(name) VALUES ('bob')", table),
},
},
}
_, status, err = insertReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("insert failed: status %d, err %v", status, err)
}
// Query rows
queryReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT name FROM %s ORDER BY id", table),
},
}
body, status, err := queryReq.Do(ctx)
if err != nil {
t.Fatalf("query failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var queryResp map[string]interface{}
if err := DecodeJSON(body, &queryResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if queryResp["count"].(float64) < 2 {
t.Fatalf("expected at least 2 rows, got %v", queryResp["count"])
}
}
func TestRQLite_DropTable(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
table := GenerateTableName()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)",
table,
)
// Create table
createReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
}
_, status, err := createReq.Do(ctx)
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
t.Fatalf("create table failed: status %d, err %v", status, err)
}
// Drop table
dropReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{
"table": table,
},
}
_, status, err = dropReq.Do(ctx)
if err != nil {
t.Fatalf("drop table request failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
// Verify table doesn't exist via schema
schemaReq := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/rqlite/schema",
}
body, status, err := schemaReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Logf("warning: failed to verify schema after drop: status %d, err %v", status, err)
return
}
var schemaResp map[string]interface{}
if err := DecodeJSON(body, &schemaResp); err != nil {
t.Logf("warning: failed to decode schema response: %v", err)
return
}
if tables, ok := schemaResp["tables"].([]interface{}); ok {
for _, tbl := range tables {
tblMap := tbl.(map[string]interface{})
if tblMap["name"] == table {
t.Fatalf("table %s still present after drop", table)
}
}
}
}
func TestRQLite_Schema(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/rqlite/schema",
}
body, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("schema request failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if _, ok := resp["tables"]; !ok {
t.Fatalf("expected 'tables' field in response")
}
}
func TestRQLite_MalformedSQL(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": "SELECT * FROM nonexistent_table WHERE invalid syntax",
},
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
// Should get an error response
if status == http.StatusOK {
t.Fatalf("expected error for malformed SQL, got status 200")
}
}
func TestRQLite_LargeTransaction(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
table := GenerateTableName()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)",
table,
)
// Create table
createReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
}
_, status, err := createReq.Do(ctx)
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
t.Fatalf("create table failed: status %d, err %v", status, err)
}
// Generate large transaction (50 inserts)
var statements []string
for i := 0; i < 50; i++ {
statements = append(statements, fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, i))
}
txReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": statements,
},
}
_, status, err = txReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("large transaction failed: status %d, err %v", status, err)
}
// Verify all rows were inserted
queryReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
},
}
body, status, err := queryReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("count query failed: status %d, err %v", status, err)
}
var countResp map[string]interface{}
if err := DecodeJSON(body, &countResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
// Extract count from result
if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 {
row := rows[0].([]interface{})
if row[0].(float64) != 50 {
t.Fatalf("expected 50 rows, got %v", row[0])
}
}
}
func TestRQLite_ForeignKeyMigration(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
orgsTable := GenerateTableName()
usersTable := GenerateTableName()
// Create base tables
createOrgsReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)",
orgsTable,
),
},
}
_, status, err := createOrgsReq.Do(ctx)
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
t.Fatalf("create orgs table failed: status %d, err %v", status, err)
}
createUsersReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)",
usersTable,
),
},
}
_, status, err = createUsersReq.Do(ctx)
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
t.Fatalf("create users table failed: status %d, err %v", status, err)
}
// Seed data
seedReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s(id,name) VALUES (1,'org')", orgsTable),
fmt.Sprintf("INSERT INTO %s(id,name,org_id,age) VALUES (1,'alice',1,'30')", usersTable),
},
},
}
_, status, err = seedReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("seed transaction failed: status %d, err %v", status, err)
}
// Migrate: change age type and add FK
migrationReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf(
"CREATE TABLE %s_new (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age INTEGER, FOREIGN KEY(org_id) REFERENCES %s(id) ON DELETE CASCADE)",
usersTable, orgsTable,
),
fmt.Sprintf(
"INSERT INTO %s_new (id,name,org_id,age) SELECT id,name,org_id, CAST(age AS INTEGER) FROM %s",
usersTable, usersTable,
),
fmt.Sprintf("DROP TABLE %s", usersTable),
fmt.Sprintf("ALTER TABLE %s_new RENAME TO %s", usersTable, usersTable),
},
},
}
_, status, err = migrationReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("migration transaction failed: status %d, err %v", status, err)
}
// Verify data is intact
queryReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT name, org_id, age FROM %s", usersTable),
},
}
body, status, err := queryReq.Do(ctx)
if err != nil || status != http.StatusOK {
t.Fatalf("query after migration failed: status %d, err %v", status, err)
}
var queryResp map[string]interface{}
if err := DecodeJSON(body, &queryResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if queryResp["count"].(float64) != 1 {
t.Fatalf("expected 1 row after migration, got %v", queryResp["count"])
}
}
func TestRQLite_DropNonexistentTable(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dropReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{
"table": "nonexistent_table_xyz_" + fmt.Sprintf("%d", time.Now().UnixNano()),
},
}
_, status, err := dropReq.Do(ctx)
if err != nil {
t.Logf("warning: drop nonexistent table request failed: %v", err)
return
}
// Should get an error (400 or 404)
if status == http.StatusOK {
t.Logf("warning: expected error for dropping nonexistent table, got status 200")
}
}

550
e2e/storage_http_test.go Normal file
View File

@ -0,0 +1,550 @@
//go:build e2e
package e2e
import (
"bytes"
"context"
"io"
"mime/multipart"
"net/http"
"testing"
"time"
)
// uploadFile is a helper to upload a file to storage
func uploadFile(t *testing.T, ctx context.Context, content []byte, filename string) string {
t.Helper()
// Create multipart form
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, err := writer.CreateFormFile("file", filename)
if err != nil {
t.Fatalf("failed to create form file: %v", err)
}
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
t.Fatalf("failed to copy data: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("failed to close writer: %v", err)
}
// Create request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
// Add auth headers
if jwt := GetJWT(); jwt != "" {
req.Header.Set("Authorization", "Bearer "+jwt)
} else if apiKey := GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
}
result, err := DecodeJSONFromReader(resp.Body)
if err != nil {
t.Fatalf("failed to decode upload response: %v", err)
}
return result["cid"].(string)
}
// DecodeJSON is a helper to decode JSON from io.ReadCloser
func DecodeJSONFromReader(rc io.ReadCloser) (map[string]interface{}, error) {
defer rc.Close()
body, err := io.ReadAll(rc)
if err != nil {
return nil, err
}
var result map[string]interface{}
err = DecodeJSON(body, &result)
return result, err
}
func TestStorage_UploadText(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
content := []byte("Hello, IPFS!")
filename := "test.txt"
// Create multipart form
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, err := writer.CreateFormFile("file", filename)
if err != nil {
t.Fatalf("failed to create form file: %v", err)
}
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
t.Fatalf("failed to copy data: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("failed to close writer: %v", err)
}
// Create request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
}
var result map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &result); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if result["cid"] == nil {
t.Fatalf("expected cid in response")
}
if result["name"] != filename {
t.Fatalf("expected name %q, got %v", filename, result["name"])
}
if result["size"] == nil || result["size"].(float64) <= 0 {
t.Fatalf("expected positive size")
}
}
func TestStorage_UploadBinary(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
// PNG header
content := []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a}
filename := "test.png"
// Create multipart form
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, err := writer.CreateFormFile("file", filename)
if err != nil {
t.Fatalf("failed to create form file: %v", err)
}
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
t.Fatalf("failed to copy data: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("failed to close writer: %v", err)
}
// Create request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
}
var result map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &result); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if result["cid"] == nil {
t.Fatalf("expected cid in response")
}
}
func TestStorage_UploadLarge(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
// Create 1MB file
content := bytes.Repeat([]byte("x"), 1024*1024)
filename := "large.bin"
// Create multipart form
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, err := writer.CreateFormFile("file", filename)
if err != nil {
t.Fatalf("failed to create form file: %v", err)
}
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
t.Fatalf("failed to copy data: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("failed to close writer: %v", err)
}
// Create request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
}
var result map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &result); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if result["size"] != float64(1024*1024) {
t.Fatalf("expected size %d, got %v", 1024*1024, result["size"])
}
}
func TestStorage_PinUnpin(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
content := []byte("test content for pinning")
// Upload file first
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, err := writer.CreateFormFile("file", "pin-test.txt")
if err != nil {
t.Fatalf("failed to create form file: %v", err)
}
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
t.Fatalf("failed to copy data: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("failed to close writer: %v", err)
}
// Create upload request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload failed: %v", err)
}
defer resp.Body.Close()
var uploadResult map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &uploadResult); err != nil {
t.Fatalf("failed to decode upload response: %v", err)
}
cid := uploadResult["cid"].(string)
// Pin the file
pinReq := &HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/storage/pin",
Body: map[string]interface{}{
"cid": cid,
"name": "pinned-file",
},
}
body2, status, err := pinReq.Do(ctx)
if err != nil {
t.Fatalf("pin failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d: %s", status, string(body2))
}
var pinResult map[string]interface{}
if err := DecodeJSON(body2, &pinResult); err != nil {
t.Fatalf("failed to decode pin response: %v", err)
}
if pinResult["cid"] != cid {
t.Fatalf("expected cid %s, got %v", cid, pinResult["cid"])
}
// Unpin the file
unpinReq := &HTTPRequest{
Method: http.MethodDelete,
URL: GetGatewayURL() + "/v1/storage/unpin/" + cid,
}
body3, status, err := unpinReq.Do(ctx)
if err != nil {
t.Fatalf("unpin failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d: %s", status, string(body3))
}
}
func TestStorage_Status(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
content := []byte("test content for status")
// Upload file first
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, err := writer.CreateFormFile("file", "status-test.txt")
if err != nil {
t.Fatalf("failed to create form file: %v", err)
}
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
t.Fatalf("failed to copy data: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("failed to close writer: %v", err)
}
// Create upload request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload failed: %v", err)
}
defer resp.Body.Close()
var uploadResult map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &uploadResult); err != nil {
t.Fatalf("failed to decode upload response: %v", err)
}
cid := uploadResult["cid"].(string)
// Get status
statusReq := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/storage/status/" + cid,
}
statusBody, status, err := statusReq.Do(ctx)
if err != nil {
t.Fatalf("status request failed: %v", err)
}
if status != http.StatusOK {
t.Fatalf("expected status 200, got %d", status)
}
var statusResult map[string]interface{}
if err := DecodeJSON(statusBody, &statusResult); err != nil {
t.Fatalf("failed to decode status response: %v", err)
}
if statusResult["cid"] != cid {
t.Fatalf("expected cid %s, got %v", cid, statusResult["cid"])
}
}
func TestStorage_InvalidCID(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
statusReq := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/storage/status/QmInvalidCID123456789",
}
_, status, err := statusReq.Do(ctx)
if err != nil {
t.Fatalf("status request failed: %v", err)
}
if status != http.StatusNotFound {
t.Logf("warning: expected status 404 for invalid CID, got %d", status)
}
}
func TestStorage_GetByteRange(t *testing.T) {
SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
content := []byte("0123456789abcdefghijklmnopqrstuvwxyz")
// Upload file first
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, err := writer.CreateFormFile("file", "range-test.txt")
if err != nil {
t.Fatalf("failed to create form file: %v", err)
}
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
t.Fatalf("failed to copy data: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("failed to close writer: %v", err)
}
// Create upload request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload failed: %v", err)
}
defer resp.Body.Close()
var uploadResult map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &uploadResult); err != nil {
t.Fatalf("failed to decode upload response: %v", err)
}
cid := uploadResult["cid"].(string)
// Get full content
getReq, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/storage/get/"+cid, nil)
if err != nil {
t.Fatalf("failed to create get request: %v", err)
}
if apiKey := GetAPIKey(); apiKey != "" {
getReq.Header.Set("Authorization", "Bearer "+apiKey)
}
resp, err = client.Do(getReq)
if err != nil {
t.Fatalf("get request failed: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("expected status 200, got %d", resp.StatusCode)
}
retrievedContent, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("failed to read response body: %v", err)
}
if !bytes.Equal(retrievedContent, content) {
t.Fatalf("content mismatch: expected %q, got %q", string(content), string(retrievedContent))
}
}

28
go.mod
View File

@ -11,21 +11,28 @@ require (
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/mackerelio/go-osstat v0.2.6
github.com/multiformats/go-multiaddr v0.15.0
github.com/olric-data/olric v0.7.0
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.40.0
golang.org/x/net v0.42.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/RoaringBitmap/roaring v1.9.4 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.22.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/buraksezer/consistent v0.10.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/elastic/gosigar v0.14.3 // indirect
github.com/flynn/noise v1.1.0 // indirect
@ -33,10 +40,20 @@ require (
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-metrics v0.5.4 // indirect
github.com/hashicorp/go-msgpack/v2 v2.1.3 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-sockaddr v1.0.7 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/logutils v1.0.0 // indirect
github.com/hashicorp/memberlist v0.5.3 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
@ -55,11 +72,13 @@ require (
github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-sqlite3 v1.14.32 // indirect
github.com/miekg/dns v1.1.66 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mschoch/smat v0.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
@ -101,14 +120,20 @@ require (
github.com/quic-go/quic-go v0.50.1 // indirect
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/redis/go-redis/v9 v9.8.0 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/tidwall/btree v1.7.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/redcon v1.6.2 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
go.uber.org/dig v1.18.0 // indirect
go.uber.org/fx v1.23.0 // indirect
go.uber.org/mock v0.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.40.0 // indirect
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
golang.org/x/mod v0.26.0 // indirect
golang.org/x/sync v0.16.0 // indirect
@ -116,5 +141,6 @@ require (
golang.org/x/text v0.27.0 // indirect
golang.org/x/tools v0.35.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)

163
go.sum
View File

@ -8,22 +8,45 @@ dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ=
github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4=
github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/buraksezer/consistent v0.10.0 h1:hqBgz1PvNLC5rkWcEBVAL9dFMBWz6I0VgUCW25rrZlU=
github.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0maujuPowduSpZqmw=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
@ -43,6 +66,8 @@ github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
@ -61,8 +86,15 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@ -79,13 +111,29 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@ -101,8 +149,33 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY=
github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI=
github.com/hashicorp/go-msgpack/v2 v2.1.3 h1:cB1w4Zrk0O3jQBTcFMKqYQWRFfsSQ/TYKNyUUVyCP2c=
github.com/hashicorp/go-msgpack/v2 v2.1.3/go.mod h1:SjlwKKFnwBXvxD/I1bEcfJIBbEJ+MCUn39TxymNR5ZU=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=
github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk=
github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
@ -116,8 +189,14 @@ github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@ -125,8 +204,11 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk=
github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@ -164,6 +246,8 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
@ -178,11 +262,15 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
@ -207,8 +295,12 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/olric-data/olric v0.7.0 h1:EKN2T6ZTtdu8Un0jV0KOWVxWm9odptJpefmDivfZdjE=
github.com/olric-data/olric v0.7.0/go.mod h1:+ZnPpgc8JkNkza8rETCKGn0P/QPF6HhZY0EbCKAOslo=
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
@ -217,6 +309,8 @@ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
@ -261,21 +355,38 @@ github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
github.com/pion/webrtc/v4 v4.0.10 h1:Hq/JLjhqLxi+NmCtE8lnRPDr8H4LcNvwg8OxVcdv56Q=
github.com/pion/webrtc/v4 v4.0.10/go.mod h1:ViHLVaNpiuvaH8pdiuQxuA9awuE6KVzAXx3vVWilOck=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
@ -286,12 +397,16 @@ github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE=
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8/go.mod h1:xF/KoXmrRyahPfo5L7Szb5cAAUl53dMWBh9cMruGEZg=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
@ -316,16 +431,22 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
@ -333,9 +454,21 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tidwall/btree v1.1.0/go.mod h1:TzIRzen6yHbibdSfK6t8QimqbUnoxUSrZfeW7Uob0q4=
github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/redcon v1.6.2 h1:5qfvrrybgtO85jnhSravmkZyC0D+7WstbfCs3MmPhow=
github.com/tidwall/redcon v1.6.2/go.mod h1:p5Wbsgeyi2VSTBWOcA5vRXrOb9arFTcU2+ZzFjqV75Y=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
@ -357,6 +490,7 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@ -390,12 +524,15 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@ -419,6 +556,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -426,16 +564,26 @@ golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -456,6 +604,7 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
@ -502,15 +651,29 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -19,7 +19,7 @@ var disabled bool
func SetDisabled(v bool) { disabled = v }
// Enabled reports whether Anyone proxy routing is active.
// Defaults to true, using SOCKS5 at 127.0.0.1:9050, unless explicitly disabled
// Defaults to true, using SOCKS5 at localhost:9050, unless explicitly disabled
// via SetDisabled(true) or environment variable ANYONE_DISABLE=1.
// ANYONE_SOCKS5 may override the proxy address.
func Enabled() bool {
@ -31,7 +31,7 @@ func Enabled() bool {
// socksAddr returns the SOCKS5 address to use for proxying (host:port).
func socksAddr() string {
return "127.0.0.1:9050"
return "localhost:9050"
}
// socksContextDialer implements tcp.ContextDialer over a SOCKS5 proxy.
@ -57,7 +57,7 @@ func (d *socksContextDialer) DialContext(ctx context.Context, network, address s
// DialerForAddr returns a tcp.DialerForAddr that routes through the Anyone SOCKS5 proxy.
// It automatically BYPASSES the proxy for loopback, private, and link-local addresses
// to allow local/dev networking (e.g. 127.0.0.1, 10.0.0.0/8, 192.168.0.0/16, fc00::/7, fe80::/10).
// to allow local/dev networking (e.g. localhost, 10.0.0.0/8, 192.168.0.0/16, fc00::/7, fe80::/10).
func DialerForAddr() tcp.DialerForAddr {
return func(raddr ma.Multiaddr) (tcp.ContextDialer, error) {
// Prefer direct dialing for local/private targets

116
pkg/auth/simple_auth.go Normal file
View File

@ -0,0 +1,116 @@
package auth
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
// PerformSimpleAuthentication performs a simple authentication flow where the user
// provides a wallet address and receives an API key without signature verification
func PerformSimpleAuthentication(gatewayURL string) (*Credentials, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Println("\n🔐 Simple Wallet Authentication")
fmt.Println("================================")
// Read wallet address
fmt.Print("Enter your wallet address (0x...): ")
walletInput, err := reader.ReadString('\n')
if err != nil {
return nil, fmt.Errorf("failed to read wallet address: %w", err)
}
wallet := strings.TrimSpace(walletInput)
if wallet == "" {
return nil, fmt.Errorf("wallet address cannot be empty")
}
// Validate wallet format (basic check)
if !strings.HasPrefix(wallet, "0x") && !strings.HasPrefix(wallet, "0X") {
wallet = "0x" + wallet
}
if !ValidateWalletAddress(wallet) {
return nil, fmt.Errorf("invalid wallet address format")
}
// Read namespace (optional)
fmt.Print("Enter namespace (press Enter for 'default'): ")
nsInput, err := reader.ReadString('\n')
if err != nil {
return nil, fmt.Errorf("failed to read namespace: %w", err)
}
namespace := strings.TrimSpace(nsInput)
if namespace == "" {
namespace = "default"
}
fmt.Printf("\n✅ Wallet: %s\n", wallet)
fmt.Printf("✅ Namespace: %s\n", namespace)
fmt.Println("⏳ Requesting API key from gateway...")
// Request API key from gateway
apiKey, err := requestAPIKeyFromGateway(gatewayURL, wallet, namespace)
if err != nil {
return nil, fmt.Errorf("failed to request API key: %w", err)
}
// Create credentials
creds := &Credentials{
APIKey: apiKey,
Namespace: namespace,
UserID: wallet,
Wallet: wallet,
IssuedAt: time.Now(),
}
fmt.Printf("\n🎉 Authentication successful!\n")
fmt.Printf("📝 API Key: %s\n", creds.APIKey)
return creds, nil
}
// requestAPIKeyFromGateway calls the gateway's simple-key endpoint to generate an API key
func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, error) {
reqBody := map[string]string{
"wallet": wallet,
"namespace": namespace,
}
payload, err := json.Marshal(reqBody)
if err != nil {
return "", fmt.Errorf("failed to marshal request: %w", err)
}
endpoint := gatewayURL + "/v1/auth/simple-key"
resp, err := http.Post(endpoint, "application/json", bytes.NewReader(payload))
if err != nil {
return "", fmt.Errorf("failed to call gateway: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("gateway returned status %d: %s", resp.StatusCode, string(body))
}
var respBody map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&respBody); err != nil {
return "", fmt.Errorf("failed to decode response: %w", err)
}
apiKey, ok := respBody["api_key"].(string)
if !ok || apiKey == "" {
return "", fmt.Errorf("no api_key in response")
}
return apiKey, nil
}

View File

@ -33,29 +33,34 @@ func HandleAuthCommand(args []string) {
func showAuthHelp() {
fmt.Printf("🔐 Authentication Commands\n\n")
fmt.Printf("Usage: network-cli auth <subcommand>\n\n")
fmt.Printf("Usage: dbn auth <subcommand>\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" login - Authenticate with wallet\n")
fmt.Printf(" login - Authenticate by providing your wallet address\n")
fmt.Printf(" logout - Clear stored credentials\n")
fmt.Printf(" whoami - Show current authentication status\n")
fmt.Printf(" status - Show detailed authentication info\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" network-cli auth login\n")
fmt.Printf(" network-cli auth whoami\n")
fmt.Printf(" network-cli auth status\n")
fmt.Printf(" network-cli auth logout\n\n")
fmt.Printf(" dbn auth login # Enter wallet address interactively\n")
fmt.Printf(" dbn auth whoami # Check who you're logged in as\n")
fmt.Printf(" dbn auth status # View detailed authentication info\n")
fmt.Printf(" dbn auth logout # Clear all stored credentials\n\n")
fmt.Printf("Environment Variables:\n")
fmt.Printf(" DEBROS_GATEWAY_URL - Gateway URL (overrides environment config)\n\n")
fmt.Printf("Authentication Flow:\n")
fmt.Printf(" 1. Run 'dbn auth login'\n")
fmt.Printf(" 2. Enter your wallet address when prompted\n")
fmt.Printf(" 3. Enter your namespace (or press Enter for 'default')\n")
fmt.Printf(" 4. An API key will be generated and saved to ~/.debros/credentials.json\n\n")
fmt.Printf("Note: Authentication uses the currently active environment.\n")
fmt.Printf(" Use 'network-cli env current' to see your active environment.\n")
fmt.Printf(" Use 'dbn env current' to see your active environment.\n")
}
func handleAuthLogin() {
gatewayURL := getGatewayURL()
fmt.Printf("🔐 Authenticating with gateway at: %s\n", gatewayURL)
// Use the wallet authentication flow
creds, err := auth.PerformWalletAuthentication(gatewayURL)
// Use the simple authentication flow
creds, err := auth.PerformSimpleAuthentication(gatewayURL)
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Authentication failed: %v\n", err)
os.Exit(1)
@ -72,6 +77,7 @@ func handleAuthLogin() {
fmt.Printf("📁 Credentials saved to: %s\n", credsPath)
fmt.Printf("🎯 Wallet: %s\n", creds.Wallet)
fmt.Printf("🏢 Namespace: %s\n", creds.Namespace)
fmt.Printf("🔑 API Key: %s\n", creds.APIKey)
}
func handleAuthLogout() {
@ -93,7 +99,7 @@ func handleAuthWhoami() {
creds, exists := store.GetCredentialsForGateway(gatewayURL)
if !exists || !creds.IsValid() {
fmt.Println("❌ Not authenticated - run 'network-cli auth login' to authenticate")
fmt.Println("❌ Not authenticated - run 'dbn auth login' to authenticate")
os.Exit(1)
}

View File

@ -158,7 +158,7 @@ func HandlePeerIDCommand(format string, timeout time.Duration) {
// HandlePubSubCommand handles pubsub commands
func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: network-cli pubsub <publish|subscribe|topics> [args...]\n")
fmt.Fprintf(os.Stderr, "Usage: dbn pubsub <publish|subscribe|topics> [args...]\n")
os.Exit(1)
}
@ -179,7 +179,7 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
switch subcommand {
case "publish":
if len(args) < 3 {
fmt.Fprintf(os.Stderr, "Usage: network-cli pubsub publish <topic> <message>\n")
fmt.Fprintf(os.Stderr, "Usage: dbn pubsub publish <topic> <message>\n")
os.Exit(1)
}
err := cli.PubSub().Publish(ctx, args[1], []byte(args[2]))
@ -191,7 +191,7 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
case "subscribe":
if len(args) < 2 {
fmt.Fprintf(os.Stderr, "Usage: network-cli pubsub subscribe <topic> [duration]\n")
fmt.Fprintf(os.Stderr, "Usage: dbn pubsub subscribe <topic> [duration]\n")
os.Exit(1)
}
duration := 30 * time.Second
@ -243,14 +243,26 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
// Helper functions
func createClient() (client.NetworkClient, error) {
config := client.DefaultClientConfig("network-cli")
config := client.DefaultClientConfig("dbn")
// Use active environment's gateway URL
gatewayURL := getGatewayURL()
config.GatewayURL = gatewayURL
// Try to get bootstrap peers from active environment
// For now, we'll use the default bootstrap peers from config
// In the future, environments could specify their own bootstrap peers
env, err := GetActiveEnvironment()
if err == nil && env != nil {
// Environment loaded successfully - gateway URL already set above
// Bootstrap peers could be added to Environment struct in the future
_ = env // Use env if we add bootstrap peers to it
}
// Check for existing credentials using enhanced authentication
creds, err := auth.GetValidEnhancedCredentials()
if err != nil {
// No valid credentials found, use the enhanced authentication flow
gatewayURL := getGatewayURL()
newCreds, authErr := auth.GetOrPromptForCredentials(gatewayURL)
if authErr != nil {
return nil, fmt.Errorf("authentication failed: %w", authErr)

View File

@ -1,519 +0,0 @@
package cli
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/encryption"
)
// HandleConfigCommand handles config management commands
func HandleConfigCommand(args []string) {
if len(args) == 0 {
showConfigHelp()
return
}
subcommand := args[0]
subargs := args[1:]
switch subcommand {
case "init":
handleConfigInit(subargs)
case "validate":
handleConfigValidate(subargs)
case "help":
showConfigHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown config subcommand: %s\n", subcommand)
showConfigHelp()
os.Exit(1)
}
}
func showConfigHelp() {
fmt.Printf("Config Management Commands\n\n")
fmt.Printf("Usage: network-cli config <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" init - Generate full network stack in ~/.debros (bootstrap + 2 nodes + gateway)\n")
fmt.Printf(" validate --name <file> - Validate a config file\n\n")
fmt.Printf("Init Default Behavior (no --type):\n")
fmt.Printf(" Generates bootstrap.yaml, node2.yaml, node3.yaml, gateway.yaml with:\n")
fmt.Printf(" - Auto-generated identities for bootstrap, node2, node3\n")
fmt.Printf(" - Correct bootstrap_peers and join addresses\n")
fmt.Printf(" - Default ports: P2P 4001-4003, HTTP 5001-5003, Raft 7001-7003\n\n")
fmt.Printf("Init Options:\n")
fmt.Printf(" --type <type> - Single config type: node, bootstrap, gateway (skips stack generation)\n")
fmt.Printf(" --name <file> - Output filename (default: depends on --type or 'stack' for full stack)\n")
fmt.Printf(" --force - Overwrite existing config/stack files\n\n")
fmt.Printf("Single Config Options (with --type):\n")
fmt.Printf(" --id <id> - Node ID for bootstrap peers\n")
fmt.Printf(" --listen-port <port> - LibP2P listen port (default: 4001)\n")
fmt.Printf(" --rqlite-http-port <port> - RQLite HTTP port (default: 5001)\n")
fmt.Printf(" --rqlite-raft-port <port> - RQLite Raft port (default: 7001)\n")
fmt.Printf(" --join <host:port> - RQLite address to join (required for non-bootstrap)\n")
fmt.Printf(" --bootstrap-peers <peers> - Comma-separated bootstrap peer multiaddrs\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" network-cli config init # Generate full stack\n")
fmt.Printf(" network-cli config init --force # Overwrite existing stack\n")
fmt.Printf(" network-cli config init --type bootstrap # Single bootstrap config (legacy)\n")
fmt.Printf(" network-cli config validate --name node.yaml\n")
}
func handleConfigInit(args []string) {
// Parse flags
var (
cfgType = ""
name = "" // Will be set based on type if not provided
id string
listenPort = 4001
rqliteHTTPPort = 5001
rqliteRaftPort = 7001
joinAddr string
bootstrapPeers string
force bool
)
for i := 0; i < len(args); i++ {
switch args[i] {
case "--type":
if i+1 < len(args) {
cfgType = args[i+1]
i++
}
case "--name":
if i+1 < len(args) {
name = args[i+1]
i++
}
case "--id":
if i+1 < len(args) {
id = args[i+1]
i++
}
case "--listen-port":
if i+1 < len(args) {
if p, err := strconv.Atoi(args[i+1]); err == nil {
listenPort = p
}
i++
}
case "--rqlite-http-port":
if i+1 < len(args) {
if p, err := strconv.Atoi(args[i+1]); err == nil {
rqliteHTTPPort = p
}
i++
}
case "--rqlite-raft-port":
if i+1 < len(args) {
if p, err := strconv.Atoi(args[i+1]); err == nil {
rqliteRaftPort = p
}
i++
}
case "--join":
if i+1 < len(args) {
joinAddr = args[i+1]
i++
}
case "--bootstrap-peers":
if i+1 < len(args) {
bootstrapPeers = args[i+1]
i++
}
case "--force":
force = true
}
}
// If --type is not specified, generate full stack
if cfgType == "" {
initFullStack(force)
return
}
// Otherwise, continue with single-file generation
// Validate type
if cfgType != "node" && cfgType != "bootstrap" && cfgType != "gateway" {
fmt.Fprintf(os.Stderr, "Invalid --type: %s (expected: node, bootstrap, or gateway)\n", cfgType)
os.Exit(1)
}
// Set default name based on type if not provided
if name == "" {
switch cfgType {
case "bootstrap":
name = "bootstrap.yaml"
case "gateway":
name = "gateway.yaml"
default:
name = "node.yaml"
}
}
// Ensure config directory exists
configDir, err := config.EnsureConfigDir()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to ensure config directory: %v\n", err)
os.Exit(1)
}
configPath := filepath.Join(configDir, name)
// Check if file exists
if !force {
if _, err := os.Stat(configPath); err == nil {
fmt.Fprintf(os.Stderr, "Config file already exists at %s (use --force to overwrite)\n", configPath)
os.Exit(1)
}
}
// Generate config based on type
var configContent string
switch cfgType {
case "node":
configContent = GenerateNodeConfig(name, id, listenPort, rqliteHTTPPort, rqliteRaftPort, joinAddr, bootstrapPeers)
case "bootstrap":
configContent = GenerateBootstrapConfig(name, id, listenPort, rqliteHTTPPort, rqliteRaftPort)
case "gateway":
configContent = GenerateGatewayConfig(bootstrapPeers)
}
// Write config file
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write config file: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Configuration file created: %s\n", configPath)
fmt.Printf(" Type: %s\n", cfgType)
fmt.Printf("\nYou can now start the %s using the generated config.\n", cfgType)
}
func handleConfigValidate(args []string) {
var name string
for i := 0; i < len(args); i++ {
if args[i] == "--name" && i+1 < len(args) {
name = args[i+1]
i++
}
}
if name == "" {
fmt.Fprintf(os.Stderr, "Missing --name flag\n")
showConfigHelp()
os.Exit(1)
}
configDir, err := config.ConfigDir()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to get config directory: %v\n", err)
os.Exit(1)
}
configPath := filepath.Join(configDir, name)
file, err := os.Open(configPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to open config file: %v\n", err)
os.Exit(1)
}
defer file.Close()
var cfg config.Config
if err := config.DecodeStrict(file, &cfg); err != nil {
fmt.Fprintf(os.Stderr, "Failed to parse config: %v\n", err)
os.Exit(1)
}
// Run validation
errs := cfg.Validate()
if len(errs) > 0 {
fmt.Fprintf(os.Stderr, "\n❌ Configuration errors (%d):\n", len(errs))
for _, err := range errs {
fmt.Fprintf(os.Stderr, " - %s\n", err)
}
os.Exit(1)
}
fmt.Printf("✅ Config is valid: %s\n", configPath)
}
func initFullStack(force bool) {
fmt.Printf("🚀 Initializing full network stack...\n")
// Ensure ~/.debros directory exists
homeDir, err := os.UserHomeDir()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
if err := os.MkdirAll(debrosDir, 0755); err != nil {
fmt.Fprintf(os.Stderr, "Failed to create ~/.debros directory: %v\n", err)
os.Exit(1)
}
// Step 1: Generate bootstrap identity
bootstrapIdentityDir := filepath.Join(debrosDir, "bootstrap")
bootstrapIdentityPath := filepath.Join(bootstrapIdentityDir, "identity.key")
if !force {
if _, err := os.Stat(bootstrapIdentityPath); err == nil {
fmt.Fprintf(os.Stderr, "Bootstrap identity already exists at %s (use --force to overwrite)\n", bootstrapIdentityPath)
os.Exit(1)
}
}
bootstrapInfo, err := encryption.GenerateIdentity()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to generate bootstrap identity: %v\n", err)
os.Exit(1)
}
if err := os.MkdirAll(bootstrapIdentityDir, 0755); err != nil {
fmt.Fprintf(os.Stderr, "Failed to create bootstrap data directory: %v\n", err)
os.Exit(1)
}
if err := encryption.SaveIdentity(bootstrapInfo, bootstrapIdentityPath); err != nil {
fmt.Fprintf(os.Stderr, "Failed to save bootstrap identity: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Generated bootstrap identity: %s (Peer ID: %s)\n", bootstrapIdentityPath, bootstrapInfo.PeerID.String())
// Construct bootstrap multiaddr
bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String())
fmt.Printf(" Bootstrap multiaddr: %s\n", bootstrapMultiaddr)
// Generate configs for all nodes...
// (rest of the implementation - similar to what was in main.go)
// I'll keep it similar to the original for consistency
// Step 2: Generate bootstrap.yaml
bootstrapName := "bootstrap.yaml"
bootstrapPath := filepath.Join(debrosDir, bootstrapName)
if !force {
if _, err := os.Stat(bootstrapPath); err == nil {
fmt.Fprintf(os.Stderr, "Bootstrap config already exists at %s (use --force to overwrite)\n", bootstrapPath)
os.Exit(1)
}
}
bootstrapContent := GenerateBootstrapConfig(bootstrapName, "", 4001, 5001, 7001)
if err := os.WriteFile(bootstrapPath, []byte(bootstrapContent), 0644); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write bootstrap config: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Generated bootstrap config: %s\n", bootstrapPath)
// Step 3: Generate node2.yaml
node2Name := "node2.yaml"
node2Path := filepath.Join(debrosDir, node2Name)
if !force {
if _, err := os.Stat(node2Path); err == nil {
fmt.Fprintf(os.Stderr, "Node2 config already exists at %s (use --force to overwrite)\n", node2Path)
os.Exit(1)
}
}
node2Content := GenerateNodeConfig(node2Name, "", 4002, 5002, 7002, "localhost:5001", bootstrapMultiaddr)
if err := os.WriteFile(node2Path, []byte(node2Content), 0644); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write node2 config: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Generated node2 config: %s\n", node2Path)
// Step 4: Generate node3.yaml
node3Name := "node3.yaml"
node3Path := filepath.Join(debrosDir, node3Name)
if !force {
if _, err := os.Stat(node3Path); err == nil {
fmt.Fprintf(os.Stderr, "Node3 config already exists at %s (use --force to overwrite)\n", node3Path)
os.Exit(1)
}
}
node3Content := GenerateNodeConfig(node3Name, "", 4003, 5003, 7003, "localhost:5001", bootstrapMultiaddr)
if err := os.WriteFile(node3Path, []byte(node3Content), 0644); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write node3 config: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Generated node3 config: %s\n", node3Path)
// Step 5: Generate gateway.yaml
gatewayName := "gateway.yaml"
gatewayPath := filepath.Join(debrosDir, gatewayName)
if !force {
if _, err := os.Stat(gatewayPath); err == nil {
fmt.Fprintf(os.Stderr, "Gateway config already exists at %s (use --force to overwrite)\n", gatewayPath)
os.Exit(1)
}
}
gatewayContent := GenerateGatewayConfig(bootstrapMultiaddr)
if err := os.WriteFile(gatewayPath, []byte(gatewayContent), 0644); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write gateway config: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Generated gateway config: %s\n", gatewayPath)
fmt.Printf("\n" + strings.Repeat("=", 60) + "\n")
fmt.Printf("✅ Full network stack initialized successfully!\n")
fmt.Printf(strings.Repeat("=", 60) + "\n")
fmt.Printf("\nBootstrap Peer ID: %s\n", bootstrapInfo.PeerID.String())
fmt.Printf("Bootstrap Multiaddr: %s\n", bootstrapMultiaddr)
fmt.Printf("\nGenerated configs:\n")
fmt.Printf(" - %s\n", bootstrapPath)
fmt.Printf(" - %s\n", node2Path)
fmt.Printf(" - %s\n", node3Path)
fmt.Printf(" - %s\n", gatewayPath)
fmt.Printf("\nStart the network with: make dev\n")
}
// GenerateNodeConfig generates a node configuration
func GenerateNodeConfig(name, id string, listenPort, rqliteHTTPPort, rqliteRaftPort int, joinAddr, bootstrapPeers string) string {
nodeID := id
if nodeID == "" {
nodeID = fmt.Sprintf("node-%d", time.Now().Unix())
}
// Parse bootstrap peers
var peers []string
if bootstrapPeers != "" {
for _, p := range strings.Split(bootstrapPeers, ",") {
if p = strings.TrimSpace(p); p != "" {
peers = append(peers, p)
}
}
}
// Construct data_dir from name stem (remove .yaml)
dataDir := strings.TrimSuffix(name, ".yaml")
dataDir = filepath.Join(os.ExpandEnv("~"), ".debros", dataDir)
var peersYAML strings.Builder
if len(peers) == 0 {
peersYAML.WriteString(" bootstrap_peers: []")
} else {
peersYAML.WriteString(" bootstrap_peers:\n")
for _, p := range peers {
fmt.Fprintf(&peersYAML, " - \"%s\"\n", p)
}
}
if joinAddr == "" {
joinAddr = "localhost:5001"
}
return fmt.Sprintf(`node:
id: "%s"
type: "node"
listen_addresses:
- "/ip4/0.0.0.0/tcp/%d"
data_dir: "%s"
max_connections: 50
database:
data_dir: "%s/rqlite"
replication_factor: 3
shard_count: 16
max_database_size: 1073741824
backup_interval: "24h"
rqlite_port: %d
rqlite_raft_port: %d
rqlite_join_address: "%s"
cluster_sync_interval: "30s"
peer_inactivity_limit: "24h"
min_cluster_size: 1
discovery:
%s
discovery_interval: "15s"
bootstrap_port: %d
http_adv_address: "127.0.0.1:%d"
raft_adv_address: "127.0.0.1:%d"
node_namespace: "default"
security:
enable_tls: false
logging:
level: "info"
format: "console"
`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, joinAddr, peersYAML.String(), 4001, rqliteHTTPPort, rqliteRaftPort)
}
// GenerateBootstrapConfig generates a bootstrap configuration
func GenerateBootstrapConfig(name, id string, listenPort, rqliteHTTPPort, rqliteRaftPort int) string {
nodeID := id
if nodeID == "" {
nodeID = "bootstrap"
}
dataDir := filepath.Join(os.ExpandEnv("~"), ".debros", "bootstrap")
return fmt.Sprintf(`node:
id: "%s"
type: "bootstrap"
listen_addresses:
- "/ip4/0.0.0.0/tcp/%d"
data_dir: "%s"
max_connections: 50
database:
data_dir: "%s/rqlite"
replication_factor: 3
shard_count: 16
max_database_size: 1073741824
backup_interval: "24h"
rqlite_port: %d
rqlite_raft_port: %d
rqlite_join_address: ""
cluster_sync_interval: "30s"
peer_inactivity_limit: "24h"
min_cluster_size: 1
discovery:
bootstrap_peers: []
discovery_interval: "15s"
bootstrap_port: %d
http_adv_address: "127.0.0.1:%d"
raft_adv_address: "127.0.0.1:%d"
node_namespace: "default"
security:
enable_tls: false
logging:
level: "info"
format: "console"
`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, 4001, rqliteHTTPPort, rqliteRaftPort)
}
// GenerateGatewayConfig generates a gateway configuration
func GenerateGatewayConfig(bootstrapPeers string) string {
var peers []string
if bootstrapPeers != "" {
for _, p := range strings.Split(bootstrapPeers, ",") {
if p = strings.TrimSpace(p); p != "" {
peers = append(peers, p)
}
}
}
var peersYAML strings.Builder
if len(peers) == 0 {
peersYAML.WriteString("bootstrap_peers: []")
} else {
peersYAML.WriteString("bootstrap_peers:\n")
for _, p := range peers {
fmt.Fprintf(&peersYAML, " - \"%s\"\n", p)
}
}
return fmt.Sprintf(`listen_addr: ":6001"
client_namespace: "default"
rqlite_dsn: ""
%s
`, peersYAML.String())
}

194
pkg/cli/dev_commands.go Normal file
View File

@ -0,0 +1,194 @@
package cli
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/DeBrosOfficial/network/pkg/environments/development"
)
// HandleDevCommand handles the dev command group
func HandleDevCommand(args []string) {
if len(args) == 0 {
showDevHelp()
return
}
subcommand := args[0]
subargs := args[1:]
switch subcommand {
case "up":
handleDevUp(subargs)
case "down":
handleDevDown(subargs)
case "status":
handleDevStatus(subargs)
case "logs":
handleDevLogs(subargs)
case "help":
showDevHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown dev subcommand: %s\n", subcommand)
showDevHelp()
os.Exit(1)
}
}
func showDevHelp() {
fmt.Printf("🚀 Development Environment Commands\n\n")
fmt.Printf("Usage: dbn dev <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" up - Start development environment (2 bootstraps + 3 nodes + gateway)\n")
fmt.Printf(" down - Stop all development services\n")
fmt.Printf(" status - Show status of running services\n")
fmt.Printf(" logs <component> - Tail logs for a component\n")
fmt.Printf(" help - Show this help\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" dbn dev up\n")
fmt.Printf(" dbn dev down\n")
fmt.Printf(" dbn dev status\n")
fmt.Printf(" dbn dev logs bootstrap --follow\n")
}
func handleDevUp(args []string) {
ctx := context.Background()
// Get home directory and .debros path
homeDir, err := os.UserHomeDir()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
// Step 1: Check dependencies
fmt.Printf("📋 Checking dependencies...\n\n")
checker := development.NewDependencyChecker()
if _, err := checker.CheckAll(); err != nil {
fmt.Fprintf(os.Stderr, "❌ %v\n", err)
os.Exit(1)
}
fmt.Printf("✓ All required dependencies available\n\n")
// Step 2: Check ports
fmt.Printf("🔌 Checking port availability...\n\n")
portChecker := development.NewPortChecker()
if _, err := portChecker.CheckAll(); err != nil {
fmt.Fprintf(os.Stderr, "❌ %v\n\n", err)
fmt.Fprintf(os.Stderr, "Port mapping:\n")
for port, service := range development.PortMap() {
fmt.Fprintf(os.Stderr, " %d - %s\n", port, service)
}
fmt.Fprintf(os.Stderr, "\n")
os.Exit(1)
}
fmt.Printf("✓ All required ports available\n\n")
// Step 3: Ensure configs
fmt.Printf("⚙️ Preparing configuration files...\n\n")
ensurer := development.NewConfigEnsurer(debrosDir)
if err := ensurer.EnsureAll(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to prepare configs: %v\n", err)
os.Exit(1)
}
fmt.Printf("\n")
// Step 4: Start services
pm := development.NewProcessManager(debrosDir, os.Stdout)
if err := pm.StartAll(ctx); err != nil {
fmt.Fprintf(os.Stderr, "❌ Error starting services: %v\n", err)
os.Exit(1)
}
// Step 5: Show summary
fmt.Printf("🎉 Development environment is running!\n\n")
fmt.Printf("Key endpoints:\n")
fmt.Printf(" Gateway: http://localhost:6001\n")
fmt.Printf(" Bootstrap IPFS: http://localhost:4501\n")
fmt.Printf(" Bootstrap2 IPFS: http://localhost:4511\n")
fmt.Printf(" Node2 IPFS: http://localhost:4502\n")
fmt.Printf(" Node3 IPFS: http://localhost:4503\n")
fmt.Printf(" Node4 IPFS: http://localhost:4504\n")
fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n")
fmt.Printf(" Olric Cache: http://localhost:3320\n\n")
fmt.Printf("Useful commands:\n")
fmt.Printf(" dbn dev status - Show status\n")
fmt.Printf(" dbn dev logs bootstrap - Bootstrap logs\n")
fmt.Printf(" dbn dev logs bootstrap2 - Bootstrap2 logs\n")
fmt.Printf(" dbn dev down - Stop all services\n\n")
fmt.Printf("Logs directory: %s/logs\n\n", debrosDir)
}
func handleDevDown(args []string) {
homeDir, err := os.UserHomeDir()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
pm := development.NewProcessManager(debrosDir, os.Stdout)
ctx := context.Background()
if err := pm.StopAll(ctx); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Error stopping services: %v\n", err)
}
}
func handleDevStatus(args []string) {
homeDir, err := os.UserHomeDir()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
pm := development.NewProcessManager(debrosDir, os.Stdout)
ctx := context.Background()
pm.Status(ctx)
}
func handleDevLogs(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: dbn dev logs <component> [--follow]\n")
fmt.Fprintf(os.Stderr, "\nComponents: bootstrap, bootstrap2, node2, node3, node4, gateway, ipfs-bootstrap, ipfs-bootstrap2, ipfs-node2, ipfs-node3, ipfs-node4, olric, anon\n")
os.Exit(1)
}
component := args[0]
follow := len(args) > 1 && args[1] == "--follow"
homeDir, err := os.UserHomeDir()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
logPath := filepath.Join(debrosDir, "logs", fmt.Sprintf("%s.log", component))
if _, err := os.Stat(logPath); os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "❌ Log file not found: %s\n", logPath)
os.Exit(1)
}
if follow {
// Run tail -f
tailCmd := fmt.Sprintf("tail -f %s", logPath)
fmt.Printf("Following %s (press Ctrl+C to stop)...\n\n", logPath)
// syscall.Exec doesn't work in all environments, use exec.Command instead
cmd := exec.Command("sh", "-c", tailCmd)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Run()
} else {
// Cat the file
data, _ := os.ReadFile(logPath)
fmt.Print(string(data))
}
}

View File

@ -35,7 +35,7 @@ func HandleEnvCommand(args []string) {
func showEnvHelp() {
fmt.Printf("🌍 Environment Management Commands\n\n")
fmt.Printf("Usage: network-cli env <subcommand>\n\n")
fmt.Printf("Usage: dbn env <subcommand>\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" list - List all available environments\n")
fmt.Printf(" current - Show current active environment\n")
@ -46,12 +46,12 @@ func showEnvHelp() {
fmt.Printf(" devnet - Development network (https://devnet.debros.network)\n")
fmt.Printf(" testnet - Test network (https://testnet.debros.network)\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" network-cli env list\n")
fmt.Printf(" network-cli env current\n")
fmt.Printf(" network-cli env switch devnet\n")
fmt.Printf(" network-cli env enable testnet\n")
fmt.Printf(" network-cli devnet enable # Shorthand for switch to devnet\n")
fmt.Printf(" network-cli testnet enable # Shorthand for switch to testnet\n")
fmt.Printf(" dbn env list\n")
fmt.Printf(" dbn env current\n")
fmt.Printf(" dbn env switch devnet\n")
fmt.Printf(" dbn env enable testnet\n")
fmt.Printf(" dbn devnet enable # Shorthand for switch to devnet\n")
fmt.Printf(" dbn testnet enable # Shorthand for switch to testnet\n")
}
func handleEnvList() {
@ -99,7 +99,7 @@ func handleEnvCurrent() {
func handleEnvSwitch(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: network-cli env switch <environment>\n")
fmt.Fprintf(os.Stderr, "Usage: dbn env switch <environment>\n")
fmt.Fprintf(os.Stderr, "Available: local, devnet, testnet\n")
os.Exit(1)
}

1378
pkg/cli/prod_commands.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,81 @@
package cli
import (
"testing"
)
// TestProdCommandFlagParsing verifies that prod command flags are parsed correctly
func TestProdCommandFlagParsing(t *testing.T) {
tests := []struct {
name string
args []string
expectBootstrap bool
expectVPSIP string
expectBootstrapJoin string
expectPeers string
}{
{
name: "bootstrap node",
args: []string{"install", "--bootstrap"},
expectBootstrap: true,
},
{
name: "non-bootstrap with vps-ip",
args: []string{"install", "--vps-ip", "10.0.0.2", "--peers", "multiaddr1,multiaddr2"},
expectVPSIP: "10.0.0.2",
expectPeers: "multiaddr1,multiaddr2",
},
{
name: "secondary bootstrap",
args: []string{"install", "--bootstrap", "--vps-ip", "10.0.0.3", "--bootstrap-join", "10.0.0.1:7001"},
expectBootstrap: true,
expectVPSIP: "10.0.0.3",
expectBootstrapJoin: "10.0.0.1:7001",
},
{
name: "with domain",
args: []string{"install", "--bootstrap", "--domain", "example.com"},
expectBootstrap: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Extract flags manually to verify parsing logic
isBootstrap := false
var vpsIP, peersStr, bootstrapJoin string
for i, arg := range tt.args {
switch arg {
case "--bootstrap":
isBootstrap = true
case "--peers":
if i+1 < len(tt.args) {
peersStr = tt.args[i+1]
}
case "--vps-ip":
if i+1 < len(tt.args) {
vpsIP = tt.args[i+1]
}
case "--bootstrap-join":
if i+1 < len(tt.args) {
bootstrapJoin = tt.args[i+1]
}
}
}
if isBootstrap != tt.expectBootstrap {
t.Errorf("expected bootstrap=%v, got %v", tt.expectBootstrap, isBootstrap)
}
if vpsIP != tt.expectVPSIP {
t.Errorf("expected vpsIP=%q, got %q", tt.expectVPSIP, vpsIP)
}
if peersStr != tt.expectPeers {
t.Errorf("expected peers=%q, got %q", tt.expectPeers, peersStr)
}
if bootstrapJoin != tt.expectBootstrapJoin {
t.Errorf("expected bootstrapJoin=%q, got %q", tt.expectBootstrapJoin, bootstrapJoin)
}
})
}
}

View File

@ -1,327 +0,0 @@
package cli
import (
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"github.com/DeBrosOfficial/network/pkg/config"
"gopkg.in/yaml.v3"
)
// HandleRQLiteCommand handles rqlite-related commands
func HandleRQLiteCommand(args []string) {
if len(args) == 0 {
showRQLiteHelp()
return
}
if runtime.GOOS != "linux" {
fmt.Fprintf(os.Stderr, "❌ RQLite commands are only supported on Linux\n")
os.Exit(1)
}
subcommand := args[0]
subargs := args[1:]
switch subcommand {
case "fix":
handleRQLiteFix(subargs)
case "help":
showRQLiteHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown rqlite subcommand: %s\n", subcommand)
showRQLiteHelp()
os.Exit(1)
}
}
func showRQLiteHelp() {
fmt.Printf("🗄️ RQLite Commands\n\n")
fmt.Printf("Usage: network-cli rqlite <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" fix - Fix misconfigured join address and clean stale raft state\n\n")
fmt.Printf("Description:\n")
fmt.Printf(" The 'fix' command automatically repairs common rqlite cluster issues:\n")
fmt.Printf(" - Corrects join address from HTTP port (5001) to Raft port (7001) if misconfigured\n")
fmt.Printf(" - Cleans stale raft state that prevents proper cluster formation\n")
fmt.Printf(" - Restarts the node service with corrected configuration\n\n")
fmt.Printf("Requirements:\n")
fmt.Printf(" - Must be run as root (use sudo)\n")
fmt.Printf(" - Only works on non-bootstrap nodes (nodes with join_address configured)\n")
fmt.Printf(" - Stops and restarts the debros-node service\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" sudo network-cli rqlite fix\n")
}
func handleRQLiteFix(args []string) {
requireRoot()
// Parse optional flags
dryRun := false
for _, arg := range args {
if arg == "--dry-run" || arg == "-n" {
dryRun = true
}
}
if dryRun {
fmt.Printf("🔍 Dry-run mode - no changes will be made\n\n")
}
fmt.Printf("🔧 RQLite Cluster Repair\n\n")
// Load config
configPath, err := config.DefaultPath("node.yaml")
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to determine config path: %v\n", err)
os.Exit(1)
}
cfg, err := loadConfigForRepair(configPath)
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to load config: %v\n", err)
os.Exit(1)
}
// Check if this is a bootstrap node
if cfg.Node.Type == "bootstrap" || cfg.Database.RQLiteJoinAddress == "" {
fmt.Printf(" This is a bootstrap node (no join address configured)\n")
fmt.Printf(" Bootstrap nodes don't need repair - they are the cluster leader\n")
fmt.Printf(" Run this command on follower nodes instead\n")
return
}
joinAddr := cfg.Database.RQLiteJoinAddress
// Check if join address needs fixing
needsConfigFix := needsFix(joinAddr, cfg.Database.RQLiteRaftPort, cfg.Database.RQLitePort)
var fixedAddr string
if needsConfigFix {
fmt.Printf("⚠️ Detected misconfigured join address: %s\n", joinAddr)
fmt.Printf(" Expected Raft port (%d) but found HTTP port (%d)\n", cfg.Database.RQLiteRaftPort, cfg.Database.RQLitePort)
// Extract host from join address
host, _, err := parseJoinAddress(joinAddr)
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to parse join address: %v\n", err)
os.Exit(1)
}
// Fix the join address - rqlite expects Raft port for -join
fixedAddr = fmt.Sprintf("%s:%d", host, cfg.Database.RQLiteRaftPort)
fmt.Printf(" Corrected address: %s\n\n", fixedAddr)
} else {
fmt.Printf("✅ Join address looks correct: %s\n", joinAddr)
fmt.Printf(" Will clean stale raft state to ensure proper cluster formation\n\n")
fixedAddr = joinAddr // No change needed
}
if dryRun {
fmt.Printf("🔍 Dry-run: Would clean raft state")
if needsConfigFix {
fmt.Printf(" and fix config")
}
fmt.Printf("\n")
return
}
// Stop the service
fmt.Printf("⏹️ Stopping debros-node service...\n")
if err := stopService("debros-node"); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to stop service: %v\n", err)
os.Exit(1)
}
fmt.Printf(" ✓ Service stopped\n\n")
// Update config file if needed
if needsConfigFix {
fmt.Printf("📝 Updating configuration file...\n")
if err := updateConfigJoinAddress(configPath, fixedAddr); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to update config: %v\n", err)
fmt.Fprintf(os.Stderr, " Service is stopped - please fix manually and restart\n")
os.Exit(1)
}
fmt.Printf(" ✓ Config updated: %s\n\n", configPath)
}
// Clean raft state
fmt.Printf("🧹 Cleaning stale raft state...\n")
dataDir := expandDataDir(cfg.Node.DataDir)
raftDir := filepath.Join(dataDir, "rqlite", "raft")
if err := cleanRaftState(raftDir); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Failed to clean raft state: %v\n", err)
fmt.Fprintf(os.Stderr, " Continuing anyway - raft state may still exist\n")
} else {
fmt.Printf(" ✓ Raft state cleaned\n\n")
}
// Restart the service
fmt.Printf("🚀 Restarting debros-node service...\n")
if err := startService("debros-node"); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to start service: %v\n", err)
fmt.Fprintf(os.Stderr, " Config has been fixed - please restart manually:\n")
fmt.Fprintf(os.Stderr, " sudo systemctl start debros-node\n")
os.Exit(1)
}
fmt.Printf(" ✓ Service started\n\n")
fmt.Printf("✅ Repair complete!\n\n")
fmt.Printf("The node should now join the cluster correctly.\n")
fmt.Printf("Monitor logs with: sudo network-cli service logs node --follow\n")
}
func loadConfigForRepair(path string) (*config.Config, error) {
file, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("failed to open config file: %w", err)
}
defer file.Close()
var cfg config.Config
if err := config.DecodeStrict(file, &cfg); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
return &cfg, nil
}
func needsFix(joinAddr string, raftPort int, httpPort int) bool {
if joinAddr == "" {
return false
}
// Remove http:// or https:// prefix if present
addr := joinAddr
if strings.HasPrefix(addr, "http://") {
addr = strings.TrimPrefix(addr, "http://")
} else if strings.HasPrefix(addr, "https://") {
addr = strings.TrimPrefix(addr, "https://")
}
// Parse host:port
_, port, err := net.SplitHostPort(addr)
if err != nil {
return false // Can't parse, assume it's fine
}
// Check if port matches HTTP port (incorrect - should be Raft port)
if port == fmt.Sprintf("%d", httpPort) {
return true
}
// If it matches Raft port, it's correct
if port == fmt.Sprintf("%d", raftPort) {
return false
}
// Unknown port - assume it's fine
return false
}
func parseJoinAddress(joinAddr string) (host, port string, err error) {
// Remove http:// or https:// prefix if present
addr := joinAddr
if strings.HasPrefix(addr, "http://") {
addr = strings.TrimPrefix(addr, "http://")
} else if strings.HasPrefix(addr, "https://") {
addr = strings.TrimPrefix(addr, "https://")
}
host, port, err = net.SplitHostPort(addr)
if err != nil {
return "", "", fmt.Errorf("invalid join address format: %w", err)
}
return host, port, nil
}
func updateConfigJoinAddress(configPath string, newJoinAddr string) error {
// Read the file
data, err := os.ReadFile(configPath)
if err != nil {
return fmt.Errorf("failed to read config file: %w", err)
}
// Parse YAML into a generic map to preserve structure
var yamlData map[string]interface{}
if err := yaml.Unmarshal(data, &yamlData); err != nil {
return fmt.Errorf("failed to parse YAML: %w", err)
}
// Navigate to database.rqlite_join_address
database, ok := yamlData["database"].(map[string]interface{})
if !ok {
return fmt.Errorf("database section not found in config")
}
database["rqlite_join_address"] = newJoinAddr
// Write back to file
updatedData, err := yaml.Marshal(yamlData)
if err != nil {
return fmt.Errorf("failed to marshal YAML: %w", err)
}
if err := os.WriteFile(configPath, updatedData, 0644); err != nil {
return fmt.Errorf("failed to write config file: %w", err)
}
return nil
}
func expandDataDir(dataDir string) string {
expanded := os.ExpandEnv(dataDir)
if strings.HasPrefix(expanded, "~") {
home, err := os.UserHomeDir()
if err != nil {
return expanded // Fallback to original
}
expanded = filepath.Join(home, expanded[1:])
}
return expanded
}
func cleanRaftState(raftDir string) error {
if _, err := os.Stat(raftDir); os.IsNotExist(err) {
return nil // Directory doesn't exist, nothing to clean
}
// Remove raft state files
filesToRemove := []string{
"peers.json",
"peers.json.backup",
"peers.info",
"raft.db",
}
for _, file := range filesToRemove {
filePath := filepath.Join(raftDir, file)
if err := os.Remove(filePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove %s: %w", filePath, err)
}
}
return nil
}
func stopService(serviceName string) error {
cmd := exec.Command("systemctl", "stop", serviceName)
if err := cmd.Run(); err != nil {
return fmt.Errorf("systemctl stop failed: %w", err)
}
return nil
}
func startService(serviceName string) error {
cmd := exec.Command("systemctl", "start", serviceName)
if err := cmd.Run(); err != nil {
return fmt.Errorf("systemctl start failed: %w", err)
}
return nil
}

View File

@ -1,243 +0,0 @@
package cli
import (
"fmt"
"os"
"os/exec"
"runtime"
"strings"
)
// HandleServiceCommand handles systemd service management commands
func HandleServiceCommand(args []string) {
if len(args) == 0 {
showServiceHelp()
return
}
if runtime.GOOS != "linux" {
fmt.Fprintf(os.Stderr, "❌ Service commands are only supported on Linux with systemd\n")
os.Exit(1)
}
subcommand := args[0]
subargs := args[1:]
switch subcommand {
case "start":
handleServiceStart(subargs)
case "stop":
handleServiceStop(subargs)
case "restart":
handleServiceRestart(subargs)
case "status":
handleServiceStatus(subargs)
case "logs":
handleServiceLogs(subargs)
case "help":
showServiceHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown service subcommand: %s\n", subcommand)
showServiceHelp()
os.Exit(1)
}
}
func showServiceHelp() {
fmt.Printf("🔧 Service Management Commands\n\n")
fmt.Printf("Usage: network-cli service <subcommand> <target> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" start <target> - Start services\n")
fmt.Printf(" stop <target> - Stop services\n")
fmt.Printf(" restart <target> - Restart services\n")
fmt.Printf(" status <target> - Show service status\n")
fmt.Printf(" logs <target> - View service logs\n\n")
fmt.Printf("Targets:\n")
fmt.Printf(" node - DeBros node service\n")
fmt.Printf(" gateway - DeBros gateway service\n")
fmt.Printf(" all - All DeBros services\n\n")
fmt.Printf("Logs Options:\n")
fmt.Printf(" --follow - Follow logs in real-time (-f)\n")
fmt.Printf(" --since=<time> - Show logs since time (e.g., '1h', '30m', '2d')\n")
fmt.Printf(" -n <lines> - Show last N lines\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" network-cli service start node\n")
fmt.Printf(" network-cli service status all\n")
fmt.Printf(" network-cli service restart gateway\n")
fmt.Printf(" network-cli service logs node --follow\n")
fmt.Printf(" network-cli service logs gateway --since=1h\n")
fmt.Printf(" network-cli service logs node -n 100\n")
}
func getServices(target string) []string {
switch target {
case "node":
return []string{"debros-node"}
case "gateway":
return []string{"debros-gateway"}
case "all":
return []string{"debros-node", "debros-gateway"}
default:
fmt.Fprintf(os.Stderr, "❌ Invalid target: %s (use: node, gateway, or all)\n", target)
os.Exit(1)
return nil
}
}
func requireRoot() {
if os.Geteuid() != 0 {
fmt.Fprintf(os.Stderr, "❌ This command requires root privileges\n")
fmt.Fprintf(os.Stderr, " Run with: sudo network-cli service ...\n")
os.Exit(1)
}
}
func handleServiceStart(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: network-cli service start <node|gateway|all>\n")
os.Exit(1)
}
requireRoot()
target := args[0]
services := getServices(target)
fmt.Printf("🚀 Starting services...\n")
for _, service := range services {
cmd := exec.Command("systemctl", "start", service)
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to start %s: %v\n", service, err)
continue
}
fmt.Printf(" ✓ Started %s\n", service)
}
}
func handleServiceStop(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: network-cli service stop <node|gateway|all>\n")
os.Exit(1)
}
requireRoot()
target := args[0]
services := getServices(target)
fmt.Printf("⏹️ Stopping services...\n")
for _, service := range services {
cmd := exec.Command("systemctl", "stop", service)
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to stop %s: %v\n", service, err)
continue
}
fmt.Printf(" ✓ Stopped %s\n", service)
}
}
func handleServiceRestart(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: network-cli service restart <node|gateway|all>\n")
os.Exit(1)
}
requireRoot()
target := args[0]
services := getServices(target)
fmt.Printf("🔄 Restarting services...\n")
for _, service := range services {
cmd := exec.Command("systemctl", "restart", service)
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to restart %s: %v\n", service, err)
continue
}
fmt.Printf(" ✓ Restarted %s\n", service)
}
}
func handleServiceStatus(args []string) {
if len(args) == 0 {
args = []string{"all"} // Default to all
}
target := args[0]
services := getServices(target)
fmt.Printf("📊 Service Status:\n\n")
for _, service := range services {
// Use systemctl is-active to get simple status
cmd := exec.Command("systemctl", "is-active", service)
output, _ := cmd.Output()
status := strings.TrimSpace(string(output))
emoji := "❌"
if status == "active" {
emoji = "✅"
} else if status == "inactive" {
emoji = "⚪"
}
fmt.Printf("%s %s: %s\n", emoji, service, status)
// Show detailed status
cmd = exec.Command("systemctl", "status", service, "--no-pager", "-l")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
fmt.Println()
}
}
func handleServiceLogs(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: network-cli service logs <node|gateway> [--follow] [--since=<time>] [-n <lines>]\n")
os.Exit(1)
}
target := args[0]
if target == "all" {
fmt.Fprintf(os.Stderr, "❌ Cannot show logs for 'all' - specify 'node' or 'gateway'\n")
os.Exit(1)
}
services := getServices(target)
if len(services) == 0 {
os.Exit(1)
}
service := services[0]
// Parse options
journalArgs := []string{"-u", service, "--no-pager"}
for i := 1; i < len(args); i++ {
arg := args[i]
switch {
case arg == "--follow" || arg == "-f":
journalArgs = append(journalArgs, "-f")
case strings.HasPrefix(arg, "--since="):
since := strings.TrimPrefix(arg, "--since=")
journalArgs = append(journalArgs, "--since="+since)
case arg == "-n":
if i+1 < len(args) {
journalArgs = append(journalArgs, "-n", args[i+1])
i++
}
}
}
fmt.Printf("📜 Logs for %s:\n\n", service)
cmd := exec.Command("journalctl", journalArgs...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to show logs: %v\n", err)
os.Exit(1)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -35,6 +35,7 @@ type Client struct {
database *DatabaseClientImpl
network *NetworkInfoImpl
pubsub *pubSubBridge
storage *StorageClientImpl
// State
connected bool
@ -70,6 +71,7 @@ func NewClient(config *ClientConfig) (NetworkClient, error) {
// Initialize components (will be configured when connected)
client.database = &DatabaseClientImpl{client: client}
client.network = &NetworkInfoImpl{client: client}
client.storage = &StorageClientImpl{client: client}
return client, nil
}
@ -89,6 +91,11 @@ func (c *Client) Network() NetworkInfo {
return c.network
}
// Storage returns the storage client
func (c *Client) Storage() StorageClient {
return c.storage
}
// Config returns a snapshot copy of the client's configuration
func (c *Client) Config() *ClientConfig {
c.mu.RLock()

View File

@ -50,7 +50,10 @@ func TestNormalizeEndpoints(t *testing.T) {
}
func TestEndpointFromMultiaddr(t *testing.T) {
ma, _ := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4001")
ma, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4001")
if err != nil {
t.Fatalf("failed to create multiaddr: %v", err)
}
if ep := endpointFromMultiaddr(ma, 5001); ep != "http://127.0.0.1:5001" {
t.Fatalf("unexpected endpoint: %s", ep)
}

View File

@ -3,6 +3,7 @@ package client
import (
"context"
"fmt"
"io"
"time"
)
@ -17,6 +18,9 @@ type NetworkClient interface {
// Network information
Network() NetworkInfo
// Storage operations (IPFS)
Storage() StorageClient
// Lifecycle
Connect() error
Disconnect() error
@ -51,6 +55,24 @@ type NetworkInfo interface {
DisconnectFromPeer(ctx context.Context, peerID string) error
}
// StorageClient provides IPFS storage operations
type StorageClient interface {
// Upload uploads content to IPFS and pins it
Upload(ctx context.Context, reader io.Reader, name string) (*StorageUploadResult, error)
// Pin pins an existing CID
Pin(ctx context.Context, cid string, name string) (*StoragePinResult, error)
// Status gets the pin status for a CID
Status(ctx context.Context, cid string) (*StorageStatus, error)
// Get retrieves content from IPFS by CID
Get(ctx context.Context, cid string) (io.ReadCloser, error)
// Unpin removes a pin from a CID
Unpin(ctx context.Context, cid string) error
}
// MessageHandler is called when a pub/sub message is received
type MessageHandler func(topic string, data []byte) error
@ -107,12 +129,38 @@ type HealthStatus struct {
ResponseTime time.Duration `json:"response_time"`
}
// StorageUploadResult represents the result of uploading content to IPFS
type StorageUploadResult struct {
Cid string `json:"cid"`
Name string `json:"name"`
Size int64 `json:"size"`
}
// StoragePinResult represents the result of pinning a CID
type StoragePinResult struct {
Cid string `json:"cid"`
Name string `json:"name"`
}
// StorageStatus represents the status of a pinned CID
type StorageStatus struct {
Cid string `json:"cid"`
Name string `json:"name"`
Status string `json:"status"` // "pinned", "pinning", "queued", "unpinned", "error"
ReplicationMin int `json:"replication_min"`
ReplicationMax int `json:"replication_max"`
ReplicationFactor int `json:"replication_factor"`
Peers []string `json:"peers"`
Error string `json:"error,omitempty"`
}
// ClientConfig represents configuration for network clients
type ClientConfig struct {
AppName string `json:"app_name"`
DatabaseName string `json:"database_name"`
BootstrapPeers []string `json:"bootstrap_peers"`
DatabaseEndpoints []string `json:"database_endpoints"`
GatewayURL string `json:"gateway_url"` // Gateway URL for HTTP API access (e.g., "http://localhost:6001")
ConnectTimeout time.Duration `json:"connect_timeout"`
RetryAttempts int `json:"retry_attempts"`
RetryDelay time.Duration `json:"retry_delay"`
@ -132,6 +180,7 @@ func DefaultClientConfig(appName string) *ClientConfig {
DatabaseName: fmt.Sprintf("%s_db", appName),
BootstrapPeers: peers,
DatabaseEndpoints: endpoints,
GatewayURL: "http://localhost:6001",
ConnectTimeout: time.Second * 30,
RetryAttempts: 3,
RetryDelay: time.Second * 5,

View File

@ -0,0 +1,245 @@
package client
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strings"
"time"
)
// StorageClientImpl implements StorageClient using HTTP requests to the gateway
type StorageClientImpl struct {
client *Client
}
// Upload uploads content to IPFS and pins it
func (s *StorageClientImpl) Upload(ctx context.Context, reader io.Reader, name string) (*StorageUploadResult, error) {
if err := s.client.requireAccess(ctx); err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
// Create multipart form
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
// Add file field
part, err := writer.CreateFormFile("file", name)
if err != nil {
return nil, fmt.Errorf("failed to create form file: %w", err)
}
if _, err := io.Copy(part, reader); err != nil {
return nil, fmt.Errorf("failed to copy data: %w", err)
}
if err := writer.Close(); err != nil {
return nil, fmt.Errorf("failed to close writer: %w", err)
}
// Create request
req, err := http.NewRequestWithContext(ctx, "POST", gatewayURL+"/v1/storage/upload", &buf)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
s.addAuthHeaders(req)
// Execute request
client := &http.Client{Timeout: 5 * time.Minute} // Large timeout for file uploads
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(body))
}
var result StorageUploadResult
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
return &result, nil
}
// Pin pins an existing CID
func (s *StorageClientImpl) Pin(ctx context.Context, cid string, name string) (*StoragePinResult, error) {
if err := s.client.requireAccess(ctx); err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
reqBody := map[string]interface{}{
"cid": cid,
}
if name != "" {
reqBody["name"] = name
}
jsonBody, err := json.Marshal(reqBody)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", gatewayURL+"/v1/storage/pin", bytes.NewReader(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
s.addAuthHeaders(req)
client := &http.Client{Timeout: 60 * time.Second}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("pin failed with status %d: %s", resp.StatusCode, string(body))
}
var result StoragePinResult
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
return &result, nil
}
// Status gets the pin status for a CID
func (s *StorageClientImpl) Status(ctx context.Context, cid string) (*StorageStatus, error) {
if err := s.client.requireAccess(ctx); err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
req, err := http.NewRequestWithContext(ctx, "GET", gatewayURL+"/v1/storage/status/"+cid, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
s.addAuthHeaders(req)
client := &http.Client{Timeout: 30 * time.Second}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("status failed with status %d: %s", resp.StatusCode, string(body))
}
var result StorageStatus
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
return &result, nil
}
// Get retrieves content from IPFS by CID
func (s *StorageClientImpl) Get(ctx context.Context, cid string) (io.ReadCloser, error) {
if err := s.client.requireAccess(ctx); err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
req, err := http.NewRequestWithContext(ctx, "GET", gatewayURL+"/v1/storage/get/"+cid, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
s.addAuthHeaders(req)
client := &http.Client{Timeout: 5 * time.Minute} // Large timeout for file downloads
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return nil, fmt.Errorf("get failed with status %d", resp.StatusCode)
}
return resp.Body, nil
}
// Unpin removes a pin from a CID
func (s *StorageClientImpl) Unpin(ctx context.Context, cid string) error {
if err := s.client.requireAccess(ctx); err != nil {
return fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
req, err := http.NewRequestWithContext(ctx, "DELETE", gatewayURL+"/v1/storage/unpin/"+cid, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
s.addAuthHeaders(req)
client := &http.Client{Timeout: 30 * time.Second}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("unpin failed with status %d: %s", resp.StatusCode, string(body))
}
return nil
}
// getGatewayURL returns the gateway URL from config, defaulting to localhost:6001
func (s *StorageClientImpl) getGatewayURL() string {
cfg := s.client.Config()
if cfg != nil && cfg.GatewayURL != "" {
return strings.TrimSuffix(cfg.GatewayURL, "/")
}
return "http://localhost:6001"
}
// addAuthHeaders adds authentication headers to the request
func (s *StorageClientImpl) addAuthHeaders(req *http.Request) {
cfg := s.client.Config()
if cfg == nil {
return
}
// Prefer JWT if available
if cfg.JWT != "" {
req.Header.Set("Authorization", "Bearer "+cfg.JWT)
return
}
// Fallback to API key
if cfg.APIKey != "" {
req.Header.Set("Authorization", "Bearer "+cfg.APIKey)
req.Header.Set("X-API-Key", cfg.APIKey)
}
}

View File

@ -0,0 +1,378 @@
package client
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestStorageClientImpl_Upload(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmUpload123"
expectedName := "test.txt"
expectedSize := int64(100)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/v1/storage/upload" {
t.Errorf("Expected path '/v1/storage/upload', got %s", r.URL.Path)
}
// Verify multipart form
if err := r.ParseMultipartForm(32 << 20); err != nil {
t.Errorf("Failed to parse multipart form: %v", err)
return
}
file, header, err := r.FormFile("file")
if err != nil {
t.Errorf("Failed to get file: %v", err)
return
}
defer file.Close()
if header.Filename != expectedName {
t.Errorf("Expected filename %s, got %s", expectedName, header.Filename)
}
response := StorageUploadResult{
Cid: expectedCID,
Name: expectedName,
Size: expectedSize,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
reader := strings.NewReader("test content")
result, err := storage.Upload(context.Background(), reader, expectedName)
if err != nil {
t.Fatalf("Failed to upload: %v", err)
}
if result.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, result.Cid)
}
if result.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, result.Name)
}
if result.Size != expectedSize {
t.Errorf("Expected size %d, got %d", expectedSize, result.Size)
}
})
t.Run("server_error", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("internal error"))
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
reader := strings.NewReader("test")
_, err := storage.Upload(context.Background(), reader, "test.txt")
if err == nil {
t.Error("Expected error for server error")
}
})
t.Run("missing_credentials", func(t *testing.T) {
cfg := &ClientConfig{
GatewayURL: "http://localhost:6001",
// No AppName, JWT, or APIKey
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
reader := strings.NewReader("test")
_, err := storage.Upload(context.Background(), reader, "test.txt")
if err == nil {
t.Error("Expected error for missing credentials")
}
})
}
func TestStorageClientImpl_Pin(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmPin123"
expectedName := "pinned-file"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/v1/storage/pin" {
t.Errorf("Expected path '/v1/storage/pin', got %s", r.URL.Path)
}
var reqBody map[string]interface{}
if err := json.NewDecoder(r.Body).Decode(&reqBody); err != nil {
t.Errorf("Failed to decode request: %v", err)
return
}
if reqBody["cid"] != expectedCID {
t.Errorf("Expected CID %s, got %v", expectedCID, reqBody["cid"])
}
response := StoragePinResult{
Cid: expectedCID,
Name: expectedName,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
result, err := storage.Pin(context.Background(), expectedCID, expectedName)
if err != nil {
t.Fatalf("Failed to pin: %v", err)
}
if result.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, result.Cid)
}
if result.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, result.Name)
}
})
}
func TestStorageClientImpl_Status(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmStatus123"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/v1/storage/status/") {
t.Errorf("Expected path '/v1/storage/status/', got %s", r.URL.Path)
}
response := StorageStatus{
Cid: expectedCID,
Name: "test-file",
Status: "pinned",
ReplicationMin: 3,
ReplicationMax: 3,
ReplicationFactor: 3,
Peers: []string{"peer1", "peer2", "peer3"},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
status, err := storage.Status(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to get status: %v", err)
}
if status.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, status.Cid)
}
if status.Status != "pinned" {
t.Errorf("Expected status 'pinned', got %s", status.Status)
}
})
}
func TestStorageClientImpl_Get(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmGet123"
expectedContent := "test content"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/v1/storage/get/") {
t.Errorf("Expected path '/v1/storage/get/', got %s", r.URL.Path)
}
w.Write([]byte(expectedContent))
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
reader, err := storage.Get(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to get content: %v", err)
}
defer reader.Close()
data, err := io.ReadAll(reader)
if err != nil {
t.Fatalf("Failed to read content: %v", err)
}
if string(data) != expectedContent {
t.Errorf("Expected content %s, got %s", expectedContent, string(data))
}
})
}
func TestStorageClientImpl_Unpin(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmUnpin123"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/v1/storage/unpin/") {
t.Errorf("Expected path '/v1/storage/unpin/', got %s", r.URL.Path)
}
if r.Method != "DELETE" {
t.Errorf("Expected method DELETE, got %s", r.Method)
}
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
err := storage.Unpin(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to unpin: %v", err)
}
})
}
func TestStorageClientImpl_getGatewayURL(t *testing.T) {
storage := &StorageClientImpl{}
t.Run("from_config", func(t *testing.T) {
cfg := &ClientConfig{GatewayURL: "http://custom:6001"}
client := &Client{config: cfg}
storage.client = client
url := storage.getGatewayURL()
if url != "http://custom:6001" {
t.Errorf("Expected 'http://custom:6001', got %s", url)
}
})
t.Run("default", func(t *testing.T) {
cfg := &ClientConfig{}
client := &Client{config: cfg}
storage.client = client
url := storage.getGatewayURL()
if url != "http://localhost:6001" {
t.Errorf("Expected 'http://localhost:6001', got %s", url)
}
})
t.Run("nil_config", func(t *testing.T) {
client := &Client{config: nil}
storage.client = client
url := storage.getGatewayURL()
if url != "http://localhost:6001" {
t.Errorf("Expected 'http://localhost:6001', got %s", url)
}
})
}
func TestStorageClientImpl_addAuthHeaders(t *testing.T) {
t.Run("jwt_preferred", func(t *testing.T) {
cfg := &ClientConfig{
JWT: "test-jwt-token",
APIKey: "test-api-key",
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
req := httptest.NewRequest("POST", "/test", nil)
storage.addAuthHeaders(req)
auth := req.Header.Get("Authorization")
if auth != "Bearer test-jwt-token" {
t.Errorf("Expected JWT in Authorization header, got %s", auth)
}
})
t.Run("apikey_fallback", func(t *testing.T) {
cfg := &ClientConfig{
APIKey: "test-api-key",
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
req := httptest.NewRequest("POST", "/test", nil)
storage.addAuthHeaders(req)
auth := req.Header.Get("Authorization")
if auth != "Bearer test-api-key" {
t.Errorf("Expected API key in Authorization header, got %s", auth)
}
apiKey := req.Header.Get("X-API-Key")
if apiKey != "test-api-key" {
t.Errorf("Expected API key in X-API-Key header, got %s", apiKey)
}
})
t.Run("no_auth", func(t *testing.T) {
cfg := &ClientConfig{}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
req := httptest.NewRequest("POST", "/test", nil)
storage.addAuthHeaders(req)
auth := req.Header.Get("Authorization")
if auth != "" {
t.Errorf("Expected no Authorization header, got %s", auth)
}
})
t.Run("nil_config", func(t *testing.T) {
client := &Client{config: nil}
storage := &StorageClientImpl{client: client}
req := httptest.NewRequest("POST", "/test", nil)
storage.addAuthHeaders(req)
auth := req.Header.Get("Authorization")
if auth != "" {
t.Errorf("Expected no Authorization header, got %s", auth)
}
})
}

View File

@ -36,11 +36,41 @@ type DatabaseConfig struct {
RQLitePort int `yaml:"rqlite_port"` // RQLite HTTP API port
RQLiteRaftPort int `yaml:"rqlite_raft_port"` // RQLite Raft consensus port
RQLiteJoinAddress string `yaml:"rqlite_join_address"` // Address to join RQLite cluster
// Dynamic discovery configuration (always enabled)
ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s
PeerInactivityLimit time.Duration `yaml:"peer_inactivity_limit"` // default: 24h
MinClusterSize int `yaml:"min_cluster_size"` // default: 1
// Olric cache configuration
OlricHTTPPort int `yaml:"olric_http_port"` // Olric HTTP API port (default: 3320)
OlricMemberlistPort int `yaml:"olric_memberlist_port"` // Olric memberlist port (default: 3322)
// IPFS storage configuration
IPFS IPFSConfig `yaml:"ipfs"`
}
// IPFSConfig contains IPFS storage configuration
type IPFSConfig struct {
// ClusterAPIURL is the IPFS Cluster HTTP API URL (e.g., "http://localhost:9094")
// If empty, IPFS storage is disabled for this node
ClusterAPIURL string `yaml:"cluster_api_url"`
// APIURL is the IPFS HTTP API URL for content retrieval (e.g., "http://localhost:5001")
// If empty, defaults to "http://localhost:5001"
APIURL string `yaml:"api_url"`
// Timeout for IPFS operations
// If zero, defaults to 60 seconds
Timeout time.Duration `yaml:"timeout"`
// ReplicationFactor is the replication factor for pinned content
// If zero, defaults to 3
ReplicationFactor int `yaml:"replication_factor"`
// EnableEncryption enables client-side encryption before upload
// Defaults to true
EnableEncryption bool `yaml:"enable_encryption"`
}
// DiscoveryConfig contains peer discovery configuration
@ -111,11 +141,24 @@ func DefaultConfig() *Config {
RQLitePort: 5001,
RQLiteRaftPort: 7001,
RQLiteJoinAddress: "", // Empty for bootstrap node
// Dynamic discovery (always enabled)
ClusterSyncInterval: 30 * time.Second,
PeerInactivityLimit: 24 * time.Hour,
MinClusterSize: 1,
// Olric cache configuration
OlricHTTPPort: 3320,
OlricMemberlistPort: 3322,
// IPFS storage configuration
IPFS: IPFSConfig{
ClusterAPIURL: "", // Empty = disabled
APIURL: "http://localhost:5001",
Timeout: 60 * time.Second,
ReplicationFactor: 3,
EnableEncryption: true,
},
},
Discovery: DiscoveryConfig{
BootstrapPeers: []string{},

View File

@ -29,10 +29,49 @@ func EnsureConfigDir() (string, error) {
// DefaultPath returns the path to the config file for the given component name.
// component should be e.g., "node.yaml", "bootstrap.yaml", "gateway.yaml"
// It checks ~/.debros/data/, ~/.debros/configs/, and ~/.debros/ for backward compatibility.
// If component is already an absolute path, it returns it as-is.
func DefaultPath(component string) (string, error) {
// If component is already an absolute path, return it directly
if filepath.IsAbs(component) {
return component, nil
}
dir, err := ConfigDir()
if err != nil {
return "", err
}
return filepath.Join(dir, component), nil
var gatewayDefault string
// For gateway.yaml, check data/ directory first (production location)
if component == "gateway.yaml" {
dataPath := filepath.Join(dir, "data", component)
if _, err := os.Stat(dataPath); err == nil {
return dataPath, nil
}
// Remember the preferred default so we can still fall back to legacy paths
gatewayDefault = dataPath
}
// First check in ~/.debros/configs/ (production installer location)
configsPath := filepath.Join(dir, "configs", component)
if _, err := os.Stat(configsPath); err == nil {
return configsPath, nil
}
// Fallback to ~/.debros/ (legacy/development location)
legacyPath := filepath.Join(dir, component)
if _, err := os.Stat(legacyPath); err == nil {
return legacyPath, nil
}
if gatewayDefault != "" {
// If we preferred the data path (gateway.yaml) but didn't find it anywhere else,
// return the data path so error messages point to the production location.
return gatewayDefault, nil
}
// Return configs path as default (even if it doesn't exist yet)
// This allows the error message to show the expected production location
return configsPath, nil
}

View File

@ -235,11 +235,16 @@ func (c *Config) validateDatabase() []error {
}
}
} else if c.Node.Type == "bootstrap" {
// Bootstrap nodes can optionally join another bootstrap's RQLite cluster
// This allows secondary bootstraps to synchronize with the primary
if dc.RQLiteJoinAddress != "" {
errs = append(errs, ValidationError{
Path: "database.rqlite_join_address",
Message: "must be empty for bootstrap type",
})
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
errs = append(errs, ValidationError{
Path: "database.rqlite_join_address",
Message: err.Error(),
Hint: "expected format: host:port",
})
}
}
}
@ -488,12 +493,7 @@ func (c *Config) validateCrossFields() []error {
}
// Cross-check rqlite_join_address vs node type
if c.Node.Type == "bootstrap" && c.Database.RQLiteJoinAddress != "" {
errs = append(errs, ValidationError{
Path: "database.rqlite_join_address",
Message: "must be empty for bootstrap node type",
})
}
// Note: Bootstrap nodes can optionally join another bootstrap's cluster
if c.Node.Type == "node" && c.Database.RQLiteJoinAddress == "" {
errs = append(errs, ValidationError{

View File

@ -30,8 +30,8 @@ func validConfigForType(nodeType string) *Config {
BootstrapPeers: []string{validPeer},
DiscoveryInterval: 15 * time.Second,
BootstrapPort: 4001,
HttpAdvAddress: "127.0.0.1:5001",
RaftAdvAddress: "127.0.0.1:7001",
HttpAdvAddress: "localhost:5001",
RaftAdvAddress: "localhost:7001",
NodeNamespace: "default",
},
Logging: LoggingConfig{
@ -183,7 +183,7 @@ func TestValidateRQLiteJoinAddress(t *testing.T) {
}{
{"node with join", "node", "localhost:5001", false},
{"node without join", "node", "", true},
{"bootstrap with join", "bootstrap", "localhost:5001", true},
{"bootstrap with join", "bootstrap", "localhost:5001", false},
{"bootstrap without join", "bootstrap", "", false},
{"invalid join format", "node", "localhost", true},
{"invalid join port", "node", "localhost:99999", true},
@ -392,7 +392,7 @@ func TestValidateCompleteConfig(t *testing.T) {
BackupInterval: 24 * time.Hour,
RQLitePort: 5002,
RQLiteRaftPort: 7002,
RQLiteJoinAddress: "127.0.0.1:7001",
RQLiteJoinAddress: "localhost:7001",
MinClusterSize: 1,
},
Discovery: DiscoveryConfig{
@ -401,8 +401,8 @@ func TestValidateCompleteConfig(t *testing.T) {
},
DiscoveryInterval: 15 * time.Second,
BootstrapPort: 4001,
HttpAdvAddress: "127.0.0.1:5001",
RaftAdvAddress: "127.0.0.1:7001",
HttpAdvAddress: "localhost:5001",
RaftAdvAddress: "localhost:7001",
NodeNamespace: "default",
},
Security: SecurityConfig{

View File

@ -115,35 +115,34 @@ func (d *Manager) handlePeerExchangeStream(s network.Stream) {
continue
}
// Filter addresses to only include configured listen addresses, not ephemeral ports
// Ephemeral ports are typically > 32768, so we filter those out
// Filter addresses to only include port 4001 (standard libp2p port)
// This prevents including non-libp2p service ports (like RQLite ports) in peer exchange
const libp2pPort = 4001
filteredAddrs := make([]multiaddr.Multiaddr, 0)
filteredCount := 0
for _, addr := range addrs {
// Extract TCP port from multiaddr
port, err := addr.ValueForProtocol(multiaddr.P_TCP)
if err == nil {
portNum, err := strconv.Atoi(port)
if err == nil {
// Only include ports that are reasonable (not ephemeral ports > 32768)
// Common LibP2P ports are typically < 10000
if portNum > 0 && portNum <= 32767 {
// Only include addresses with port 4001
if portNum == libp2pPort {
filteredAddrs = append(filteredAddrs, addr)
} else {
filteredCount++
}
} else {
// If we can't parse port, include it anyway (might be non-TCP)
filteredAddrs = append(filteredAddrs, addr)
}
// Skip addresses with unparseable ports
} else {
// If no TCP port found, include it anyway (might be non-TCP)
filteredAddrs = append(filteredAddrs, addr)
// Skip non-TCP addresses (libp2p uses TCP)
filteredCount++
}
}
// If no addresses remain after filtering, skip this peer
// (Filtering is routine - no need to log every occurrence)
if len(filteredAddrs) == 0 {
d.logger.Debug("No valid addresses after filtering ephemeral ports",
zap.String("peer_id", pid.String()[:8]+"..."),
zap.Int("original_count", len(addrs)))
continue
}
@ -177,9 +176,7 @@ func (d *Manager) handlePeerExchangeStream(s network.Stream) {
return
}
d.logger.Debug("Sent peer exchange response",
zap.Int("peer_count", len(resp.Peers)),
zap.Bool("has_rqlite_metadata", resp.RQLiteMetadata != nil))
// Response sent - routine operation, no need to log
}
// Start begins periodic peer discovery
@ -222,9 +219,6 @@ func (d *Manager) discoverPeers(ctx context.Context, config Config) {
connectedPeers := d.host.Network().Peers()
initialCount := len(connectedPeers)
d.logger.Debug("Starting peer discovery",
zap.Int("current_peers", initialCount))
newConnections := 0
// Strategy 1: Try to connect to peers learned from the host's peerstore
@ -237,11 +231,12 @@ func (d *Manager) discoverPeers(ctx context.Context, config Config) {
finalPeerCount := len(d.host.Network().Peers())
// Summary log: only log if there were changes or new connections
if newConnections > 0 || finalPeerCount != initialCount {
d.logger.Debug("Peer discovery completed",
zap.Int("new_connections", newConnections),
zap.Int("initial_peers", initialCount),
zap.Int("final_peers", finalPeerCount))
d.logger.Debug("Discovery summary",
zap.Int("connected", finalPeerCount),
zap.Int("new", newConnections),
zap.Int("was", initialCount))
}
}
@ -256,7 +251,10 @@ func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int)
// Iterate over peerstore known peers
peers := d.host.Peerstore().Peers()
d.logger.Debug("Peerstore contains peers", zap.Int("count", len(peers)))
// Only connect to peers on our standard LibP2P port to avoid cross-connecting
// with IPFS/IPFS Cluster instances that use different ports
const libp2pPort = 4001
for _, pid := range peers {
if connected >= maxConnections {
@ -271,6 +269,24 @@ func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int)
continue
}
// Filter peers to only include those with addresses on our port (4001)
// This prevents attempting to connect to IPFS (port 4101) or IPFS Cluster (port 9096)
peerInfo := d.host.Peerstore().PeerInfo(pid)
hasValidPort := false
for _, addr := range peerInfo.Addrs {
if port, err := addr.ValueForProtocol(multiaddr.P_TCP); err == nil {
if portNum, err := strconv.Atoi(port); err == nil && portNum == libp2pPort {
hasValidPort = true
break
}
}
}
// Skip peers without valid port 4001 addresses
if !hasValidPort {
continue
}
// Try to connect
if err := d.connectToPeer(ctx, pid); err == nil {
connected++
@ -293,8 +309,8 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
return 0
}
d.logger.Debug("Starting peer exchange with connected peers",
zap.Int("num_peers", len(connectedPeers)))
exchangedPeers := 0
metadataCollected := 0
for _, peerID := range connectedPeers {
if connected >= maxConnections {
@ -307,9 +323,13 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
continue
}
d.logger.Debug("Received peer list from peer",
zap.String("from_peer", peerID.String()[:8]+"..."),
zap.Int("peer_count", len(peers)))
exchangedPeers++
// Check if we got RQLite metadata
if val, err := d.host.Peerstore().Get(peerID, "rqlite_metadata"); err == nil {
if _, ok := val.([]byte); ok {
metadataCollected++
}
}
// Try to connect to discovered peers
for _, peerInfo := range peers {
@ -334,7 +354,8 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
continue
}
// Parse addresses
// Parse and filter addresses to only include port 4001 (standard libp2p port)
const libp2pPort = 4001
addrs := make([]multiaddr.Multiaddr, 0, len(peerInfo.Addrs))
for _, addrStr := range peerInfo.Addrs {
ma, err := multiaddr.NewMultiaddr(addrStr)
@ -342,14 +363,24 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
d.logger.Debug("Failed to parse multiaddr", zap.Error(err))
continue
}
addrs = append(addrs, ma)
// Only include addresses with port 4001
port, err := ma.ValueForProtocol(multiaddr.P_TCP)
if err == nil {
portNum, err := strconv.Atoi(port)
if err == nil && portNum == libp2pPort {
addrs = append(addrs, ma)
}
// Skip addresses with wrong ports
}
// Skip non-TCP addresses
}
if len(addrs) == 0 {
// Skip peers without valid addresses - no need to log every occurrence
continue
}
// Add to peerstore
// Add to peerstore (only valid addresses with port 4001)
d.host.Peerstore().AddAddrs(parsedID, addrs, time.Hour*24)
// Try to connect
@ -358,20 +389,29 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
if err := d.host.Connect(connectCtx, peerAddrInfo); err != nil {
cancel()
d.logger.Debug("Failed to connect to discovered peer",
zap.String("peer_id", parsedID.String()[:8]+"..."),
// Only log connection failures for debugging - errors are still useful
d.logger.Debug("Connect failed",
zap.String("peer", parsedID.String()[:8]+"..."),
zap.Error(err))
continue
}
cancel()
d.logger.Info("Successfully connected to discovered peer",
zap.String("peer_id", parsedID.String()[:8]+"..."),
zap.String("discovered_from", peerID.String()[:8]+"..."))
d.logger.Info("Connected",
zap.String("peer", parsedID.String()[:8]+"..."),
zap.String("from", peerID.String()[:8]+"..."))
connected++
}
}
// Summary log for peer exchange
if exchangedPeers > 0 {
d.logger.Debug("Exchange summary",
zap.Int("exchanged_with", exchangedPeers),
zap.Int("metadata_collected", metadataCollected),
zap.Int("new_connections", connected))
}
return connected
}
@ -424,9 +464,10 @@ func (d *Manager) requestPeersFromPeer(ctx context.Context, peerID peer.ID, limi
metadataJSON, err := json.Marshal(resp.RQLiteMetadata)
if err == nil {
_ = d.host.Peerstore().Put(peerID, "rqlite_metadata", metadataJSON)
d.logger.Debug("Stored RQLite metadata from peer",
zap.String("peer_id", peerID.String()[:8]+"..."),
zap.String("node_id", resp.RQLiteMetadata.NodeID))
// Only log when new metadata is stored (useful for debugging)
d.logger.Debug("Metadata stored",
zap.String("peer", peerID.String()[:8]+"..."),
zap.String("node", resp.RQLiteMetadata.NodeID))
}
}
@ -442,9 +483,6 @@ func (d *Manager) TriggerPeerExchange(ctx context.Context) int {
return 0
}
d.logger.Info("Manually triggering peer exchange",
zap.Int("connected_peers", len(connectedPeers)))
metadataCollected := 0
for _, peerID := range connectedPeers {
// Request peer list from this peer (which includes their RQLite metadata)
@ -458,9 +496,9 @@ func (d *Manager) TriggerPeerExchange(ctx context.Context) int {
}
}
d.logger.Info("Peer exchange completed",
zap.Int("peers_with_metadata", metadataCollected),
zap.Int("total_peers", len(connectedPeers)))
d.logger.Info("Exchange completed",
zap.Int("peers", len(connectedPeers)),
zap.Int("with_metadata", metadataCollected))
return metadataCollected
}
@ -480,8 +518,7 @@ func (d *Manager) connectToPeer(ctx context.Context, peerID peer.ID) error {
return err
}
d.logger.Debug("Successfully connected to peer",
zap.String("peer_id", peerID.String()[:8]+"..."))
// Connection success logged at higher level - no need for duplicate DEBUG log
return nil
}

View File

@ -0,0 +1,136 @@
package development
import (
"fmt"
"net"
"os/exec"
"strings"
)
// Dependency represents an external binary dependency
type Dependency struct {
Name string
Command string
MinVersion string // Optional: if set, try to check version
InstallHint string
}
// DependencyChecker handles dependency validation
type DependencyChecker struct {
dependencies []Dependency
}
// NewDependencyChecker creates a new dependency checker
func NewDependencyChecker() *DependencyChecker {
return &DependencyChecker{
dependencies: []Dependency{
{
Name: "IPFS",
Command: "ipfs",
MinVersion: "0.25.0",
InstallHint: "Install with: brew install ipfs (macOS) or https://docs.ipfs.tech/install/command-line/",
},
{
Name: "IPFS Cluster Service",
Command: "ipfs-cluster-service",
MinVersion: "1.0.0",
InstallHint: "Install with: go install github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest",
},
{
Name: "RQLite",
Command: "rqlited",
InstallHint: "Install with: brew install rqlite (macOS) or https://github.com/rqlite/rqlite/releases",
},
{
Name: "Olric Server",
Command: "olric-server",
InstallHint: "Install with: go install github.com/olric-data/olric/cmd/olric-server@v0.7.0",
},
{
Name: "npm (for Anyone)",
Command: "npm",
InstallHint: "Install Node.js with: brew install node (macOS) or https://nodejs.org/",
},
{
Name: "OpenSSL",
Command: "openssl",
InstallHint: "Install with: brew install openssl (macOS) - usually pre-installed on Linux",
},
},
}
}
// CheckAll performs all dependency checks and returns a report
func (dc *DependencyChecker) CheckAll() ([]string, error) {
var missing []string
var hints []string
for _, dep := range dc.dependencies {
if _, err := exec.LookPath(dep.Command); err != nil {
missing = append(missing, dep.Name)
hints = append(hints, fmt.Sprintf(" %s: %s", dep.Name, dep.InstallHint))
}
}
if len(missing) == 0 {
return nil, nil // All OK
}
errMsg := fmt.Sprintf("Missing %d required dependencies:\n%s\n\nInstall them with:\n%s",
len(missing), strings.Join(missing, ", "), strings.Join(hints, "\n"))
return missing, fmt.Errorf(errMsg)
}
// PortChecker validates that required ports are available
type PortChecker struct {
ports []int
}
// RequiredPorts defines all ports needed for dev environment
// Computed from DefaultTopology
var RequiredPorts = DefaultTopology().AllPorts()
// NewPortChecker creates a new port checker with required ports
func NewPortChecker() *PortChecker {
return &PortChecker{
ports: RequiredPorts,
}
}
// CheckAll verifies all required ports are available
func (pc *PortChecker) CheckAll() ([]int, error) {
var unavailable []int
for _, port := range pc.ports {
if !isPortAvailable(port) {
unavailable = append(unavailable, port)
}
}
if len(unavailable) == 0 {
return nil, nil // All OK
}
errMsg := fmt.Sprintf("The following ports are unavailable: %v\n\nFree them or stop conflicting services and try again",
unavailable)
return unavailable, fmt.Errorf(errMsg)
}
// isPortAvailable checks if a TCP port is available for binding
func isPortAvailable(port int) bool {
// Port 0 is reserved and means "assign any available port"
if port == 0 {
return false
}
ln, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port))
if err != nil {
return false
}
ln.Close()
return true
}
// PortMap provides a human-readable mapping of ports to services
func PortMap() map[int]string {
return DefaultTopology().PortMap()
}

View File

@ -0,0 +1,91 @@
package development
import (
"testing"
)
func TestPortChecker(t *testing.T) {
checker := NewPortChecker()
if checker == nil {
t.Fatal("NewPortChecker returned nil")
}
// Verify all required ports are defined
if len(checker.ports) == 0 {
t.Fatal("No ports defined in checker")
}
// Check that required port counts match expectations
expectedPortCount := 44 // Based on RequiredPorts
if len(checker.ports) != expectedPortCount {
t.Errorf("Expected %d ports, got %d", expectedPortCount, len(checker.ports))
}
}
func TestPortMap(t *testing.T) {
portMap := PortMap()
if len(portMap) == 0 {
t.Fatal("PortMap returned empty map")
}
// Check for key ports
expectedPorts := []int{4001, 5001, 7001, 6001, 3320, 9050, 9094}
for _, port := range expectedPorts {
if _, exists := portMap[port]; !exists {
t.Errorf("Expected port %d not found in PortMap", port)
}
}
// Verify descriptions exist
for port, desc := range portMap {
if desc == "" {
t.Errorf("Port %d has empty description", port)
}
}
}
func TestDependencyChecker(t *testing.T) {
checker := NewDependencyChecker()
if checker == nil {
t.Fatal("NewDependencyChecker returned nil")
}
// Verify required dependencies are defined
if len(checker.dependencies) == 0 {
t.Fatal("No dependencies defined in checker")
}
// Expected minimum dependencies
expectedDeps := []string{"ipfs", "rqlited", "olric-server", "npm"}
for _, expected := range expectedDeps {
found := false
for _, dep := range checker.dependencies {
if dep.Command == expected {
found = true
if dep.InstallHint == "" {
t.Errorf("Dependency %s has no install hint", expected)
}
break
}
}
if !found {
t.Errorf("Expected dependency %s not found", expected)
}
}
}
func TestIsPortAvailable(t *testing.T) {
// Test with a very high port that should be available
highPort := 65432
if !isPortAvailable(highPort) {
t.Logf("Port %d may be in use (this is non-fatal for testing)", highPort)
}
// Port 0 should not be available (reserved)
if isPortAvailable(0) {
t.Error("Port 0 should not be available")
}
}

View File

@ -0,0 +1,257 @@
package development
import (
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/DeBrosOfficial/network/pkg/encryption"
"github.com/DeBrosOfficial/network/pkg/environments/templates"
)
// ConfigEnsurer handles all config file creation and validation
type ConfigEnsurer struct {
debrosDir string
}
// NewConfigEnsurer creates a new config ensurer
func NewConfigEnsurer(debrosDir string) *ConfigEnsurer {
return &ConfigEnsurer{
debrosDir: debrosDir,
}
}
// EnsureAll ensures all necessary config files and secrets exist
func (ce *ConfigEnsurer) EnsureAll() error {
// Create directories
if err := os.MkdirAll(ce.debrosDir, 0755); err != nil {
return fmt.Errorf("failed to create .debros directory: %w", err)
}
if err := os.MkdirAll(filepath.Join(ce.debrosDir, "logs"), 0755); err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}
// Ensure shared secrets
if err := ce.ensureSharedSecrets(); err != nil {
return fmt.Errorf("failed to ensure shared secrets: %w", err)
}
// Load topology
topology := DefaultTopology()
// Generate identities for all bootstrap nodes and collect multiaddrs
bootstrapAddrs := []string{}
for _, nodeSpec := range topology.GetBootstrapNodes() {
addr, err := ce.ensureNodeIdentity(nodeSpec)
if err != nil {
return fmt.Errorf("failed to ensure identity for %s: %w", nodeSpec.Name, err)
}
bootstrapAddrs = append(bootstrapAddrs, addr)
}
// Ensure configs for all bootstrap and regular nodes
for _, nodeSpec := range topology.Nodes {
if err := ce.ensureNodeConfig(nodeSpec, bootstrapAddrs); err != nil {
return fmt.Errorf("failed to ensure config for %s: %w", nodeSpec.Name, err)
}
}
// Ensure gateway config
if err := ce.ensureGateway(bootstrapAddrs); err != nil {
return fmt.Errorf("failed to ensure gateway: %w", err)
}
// Ensure Olric config
if err := ce.ensureOlric(); err != nil {
return fmt.Errorf("failed to ensure olric: %w", err)
}
return nil
}
// ensureSharedSecrets creates cluster secret and swarm key if they don't exist
func (ce *ConfigEnsurer) ensureSharedSecrets() error {
secretPath := filepath.Join(ce.debrosDir, "cluster-secret")
if _, err := os.Stat(secretPath); os.IsNotExist(err) {
secret := generateRandomHex(64) // 64 hex chars = 32 bytes
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
return fmt.Errorf("failed to write cluster secret: %w", err)
}
fmt.Printf("✓ Generated cluster secret\n")
}
swarmKeyPath := filepath.Join(ce.debrosDir, "swarm.key")
if _, err := os.Stat(swarmKeyPath); os.IsNotExist(err) {
keyHex := strings.ToUpper(generateRandomHex(64))
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
if err := os.WriteFile(swarmKeyPath, []byte(content), 0600); err != nil {
return fmt.Errorf("failed to write swarm key: %w", err)
}
fmt.Printf("✓ Generated IPFS swarm key\n")
}
return nil
}
// ensureNodeIdentity creates or loads a node identity and returns its multiaddr
func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
identityPath := filepath.Join(nodeDir, "identity.key")
// Create identity if missing
var peerID string
if _, err := os.Stat(identityPath); os.IsNotExist(err) {
if err := os.MkdirAll(nodeDir, 0755); err != nil {
return "", fmt.Errorf("failed to create node directory: %w", err)
}
info, err := encryption.GenerateIdentity()
if err != nil {
return "", fmt.Errorf("failed to generate identity: %w", err)
}
if err := encryption.SaveIdentity(info, identityPath); err != nil {
return "", fmt.Errorf("failed to save identity: %w", err)
}
peerID = info.PeerID.String()
fmt.Printf("✓ Generated %s identity (Peer ID: %s)\n", nodeSpec.Name, peerID)
} else {
info, err := encryption.LoadIdentity(identityPath)
if err != nil {
return "", fmt.Errorf("failed to load identity: %w", err)
}
peerID = info.PeerID.String()
}
// Return multiaddr
return fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", nodeSpec.P2PPort, peerID), nil
}
// ensureNodeConfig creates or updates a node configuration
func (ce *ConfigEnsurer) ensureNodeConfig(nodeSpec NodeSpec, bootstrapAddrs []string) error {
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
configPath := filepath.Join(ce.debrosDir, nodeSpec.ConfigFilename)
if err := os.MkdirAll(nodeDir, 0755); err != nil {
return fmt.Errorf("failed to create node directory: %w", err)
}
if nodeSpec.Role == "bootstrap" {
// Generate bootstrap config
data := templates.BootstrapConfigData{
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
BootstrapPeers: bootstrapAddrs,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
}
config, err := templates.RenderBootstrapConfig(data)
if err != nil {
return fmt.Errorf("failed to render bootstrap config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write bootstrap config: %w", err)
}
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
} else {
// Generate regular node config
data := templates.NodeConfigData{
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
BootstrapPeers: bootstrapAddrs,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
}
config, err := templates.RenderNodeConfig(data)
if err != nil {
return fmt.Errorf("failed to render node config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write node config: %w", err)
}
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
}
return nil
}
// ensureGateway creates gateway config
func (ce *ConfigEnsurer) ensureGateway(bootstrapAddrs []string) error {
configPath := filepath.Join(ce.debrosDir, "gateway.yaml")
// Get first bootstrap's cluster API port for default
topology := DefaultTopology()
firstBootstrap := topology.GetBootstrapNodes()[0]
data := templates.GatewayConfigData{
ListenPort: topology.GatewayPort,
BootstrapPeers: bootstrapAddrs,
OlricServers: []string{fmt.Sprintf("127.0.0.1:%d", topology.OlricHTTPPort)},
ClusterAPIPort: firstBootstrap.ClusterAPIPort,
IPFSAPIPort: firstBootstrap.IPFSAPIPort,
}
config, err := templates.RenderGatewayConfig(data)
if err != nil {
return fmt.Errorf("failed to render gateway config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write gateway config: %w", err)
}
fmt.Printf("✓ Generated gateway.yaml\n")
return nil
}
// ensureOlric creates Olric config
func (ce *ConfigEnsurer) ensureOlric() error {
configPath := filepath.Join(ce.debrosDir, "olric-config.yaml")
topology := DefaultTopology()
data := templates.OlricConfigData{
BindAddr: "127.0.0.1",
HTTPPort: topology.OlricHTTPPort,
MemberlistPort: topology.OlricMemberPort,
}
config, err := templates.RenderOlricConfig(data)
if err != nil {
return fmt.Errorf("failed to render olric config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write olric config: %w", err)
}
fmt.Printf("✓ Generated olric-config.yaml\n")
return nil
}
// generateRandomHex generates a random hex string of specified length
func generateRandomHex(length int) string {
bytes := make([]byte, length/2)
if _, err := rand.Read(bytes); err != nil {
panic(fmt.Sprintf("failed to generate random bytes: %v", err))
}
return hex.EncodeToString(bytes)
}

View File

@ -0,0 +1,214 @@
package development
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os/exec"
"strings"
"time"
)
// HealthCheckResult represents the result of a health check
type HealthCheckResult struct {
Name string
Healthy bool
Details string
}
// IPFSHealthCheck verifies IPFS peer connectivity
func (pm *ProcessManager) IPFSHealthCheck(ctx context.Context, nodes []ipfsNodeInfo) HealthCheckResult {
result := HealthCheckResult{Name: "IPFS Peers"}
healthyCount := 0
for _, node := range nodes {
cmd := exec.CommandContext(ctx, "ipfs", "swarm", "peers", "--repo-dir="+node.ipfsPath)
output, err := cmd.CombinedOutput()
if err != nil {
result.Details += fmt.Sprintf("%s: error getting peers (%v); ", node.name, err)
continue
}
// Split by newlines and filter empty lines
peerLines := strings.Split(strings.TrimSpace(string(output)), "\n")
peerCount := 0
for _, line := range peerLines {
if strings.TrimSpace(line) != "" {
peerCount++
}
}
// With 5 nodes, expect each node to see at least 3 other peers
if peerCount < 3 {
result.Details += fmt.Sprintf("%s: only %d peers (want 3+); ", node.name, peerCount)
} else {
result.Details += fmt.Sprintf("%s: %d peers; ", node.name, peerCount)
healthyCount++
}
}
// Require all 5 nodes to have healthy peer counts
result.Healthy = healthyCount == len(nodes)
return result
}
// RQLiteHealthCheck verifies RQLite cluster formation
func (pm *ProcessManager) RQLiteHealthCheck(ctx context.Context) HealthCheckResult {
result := HealthCheckResult{Name: "RQLite Cluster"}
topology := DefaultTopology()
healthyCount := 0
for _, nodeSpec := range topology.Nodes {
status := pm.checkRQLiteNode(ctx, nodeSpec.Name, nodeSpec.RQLiteHTTPPort)
if status.Healthy {
healthyCount++
}
result.Details += fmt.Sprintf("%s: %s; ", nodeSpec.Name, status.Details)
}
// Require at least 3 out of 5 nodes to be healthy for quorum
result.Healthy = healthyCount >= 3
return result
}
// checkRQLiteNode queries a single RQLite node's status
func (pm *ProcessManager) checkRQLiteNode(ctx context.Context, name string, httpPort int) HealthCheckResult {
result := HealthCheckResult{Name: fmt.Sprintf("RQLite-%s", name)}
urlStr := fmt.Sprintf("http://localhost:%d/status", httpPort)
client := &http.Client{Timeout: 2 * time.Second}
resp, err := client.Get(urlStr)
if err != nil {
result.Details = fmt.Sprintf("connection failed: %v", err)
return result
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
result.Details = fmt.Sprintf("HTTP %d", resp.StatusCode)
return result
}
var status map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
result.Details = fmt.Sprintf("decode error: %v", err)
return result
}
// Check the store.raft structure (RQLite 8 format)
store, ok := status["store"].(map[string]interface{})
if !ok {
result.Details = "store data not found"
return result
}
raft, ok := store["raft"].(map[string]interface{})
if !ok {
result.Details = "raft data not found"
return result
}
// Check if we have a leader
leader, hasLeader := raft["leader"].(string)
if hasLeader && leader != "" {
result.Healthy = true
result.Details = "cluster member with leader elected"
return result
}
// Check node state - accept both Leader and Follower
if state, ok := raft["state"].(string); ok {
if state == "Leader" {
result.Healthy = true
result.Details = "this node is leader"
return result
}
if state == "Follower" {
result.Healthy = true
result.Details = "this node is follower in cluster"
return result
}
result.Details = fmt.Sprintf("state: %s", state)
return result
}
result.Details = "not yet connected"
return result
}
// LibP2PHealthCheck verifies that network nodes have peer connections
func (pm *ProcessManager) LibP2PHealthCheck(ctx context.Context) HealthCheckResult {
result := HealthCheckResult{Name: "LibP2P/Node Peers"}
// Check that nodes are part of the RQLite cluster and can communicate via LibP2P
topology := DefaultTopology()
healthyNodes := 0
for _, nodeSpec := range topology.Nodes {
status := pm.checkRQLiteNode(ctx, nodeSpec.Name, nodeSpec.RQLiteHTTPPort)
if status.Healthy {
healthyNodes++
result.Details += fmt.Sprintf("%s: connected; ", nodeSpec.Name)
} else {
result.Details += fmt.Sprintf("%s: %s; ", nodeSpec.Name, status.Details)
}
}
// Healthy if at least 3 nodes report connectivity
result.Healthy = healthyNodes >= 3
return result
}
// HealthCheckWithRetry performs a health check with retry logic
func (pm *ProcessManager) HealthCheckWithRetry(ctx context.Context, nodes []ipfsNodeInfo, retries int, retryInterval time.Duration, timeout time.Duration) bool {
fmt.Fprintf(pm.logWriter, "\n⚕ Validating cluster health...\n")
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
for attempt := 1; attempt <= retries; attempt++ {
// Perform all checks
ipfsResult := pm.IPFSHealthCheck(deadlineCtx, nodes)
rqliteResult := pm.RQLiteHealthCheck(deadlineCtx)
libp2pResult := pm.LibP2PHealthCheck(deadlineCtx)
// Log results
if attempt == 1 || attempt == retries || (attempt%3 == 0) {
fmt.Fprintf(pm.logWriter, " Attempt %d/%d:\n", attempt, retries)
pm.logHealthCheckResult(pm.logWriter, " ", ipfsResult)
pm.logHealthCheckResult(pm.logWriter, " ", rqliteResult)
pm.logHealthCheckResult(pm.logWriter, " ", libp2pResult)
}
// All checks must pass
if ipfsResult.Healthy && rqliteResult.Healthy && libp2pResult.Healthy {
fmt.Fprintf(pm.logWriter, "\n✓ All health checks passed!\n")
return true
}
if attempt < retries {
select {
case <-time.After(retryInterval):
continue
case <-deadlineCtx.Done():
fmt.Fprintf(pm.logWriter, "\n❌ Health check timeout reached\n")
return false
}
}
}
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed after %d attempts\n", retries)
return false
}
// logHealthCheckResult logs a single health check result
func (pm *ProcessManager) logHealthCheckResult(w io.Writer, indent string, result HealthCheckResult) {
status := "❌"
if result.Healthy {
status = "✓"
}
fmt.Fprintf(w, "%s%s %s: %s\n", indent, status, result.Name, result.Details)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,206 @@
package development
import "fmt"
// NodeSpec defines configuration for a single dev environment node
type NodeSpec struct {
Name string // bootstrap, bootstrap2, node2, node3, node4
Role string // "bootstrap" or "node"
ConfigFilename string // bootstrap.yaml, bootstrap2.yaml, node2.yaml, etc.
DataDir string // relative path from .debros root
P2PPort int // LibP2P listen port
IPFSAPIPort int // IPFS API port
IPFSSwarmPort int // IPFS Swarm port
IPFSGatewayPort int // IPFS HTTP Gateway port
RQLiteHTTPPort int // RQLite HTTP API port
RQLiteRaftPort int // RQLite Raft consensus port
ClusterAPIPort int // IPFS Cluster REST API port
ClusterPort int // IPFS Cluster P2P port
RQLiteJoinTarget string // which bootstrap RQLite port to join (leave empty for bootstraps that lead)
ClusterJoinTarget string // which bootstrap cluster to join (leave empty for bootstrap that leads)
}
// Topology defines the complete development environment topology
type Topology struct {
Nodes []NodeSpec
GatewayPort int
OlricHTTPPort int
OlricMemberPort int
AnonSOCKSPort int
}
// DefaultTopology returns the default five-node dev environment topology
func DefaultTopology() *Topology {
return &Topology{
Nodes: []NodeSpec{
{
Name: "bootstrap",
Role: "bootstrap",
ConfigFilename: "bootstrap.yaml",
DataDir: "bootstrap",
P2PPort: 4001,
IPFSAPIPort: 4501,
IPFSSwarmPort: 4101,
IPFSGatewayPort: 7501,
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
ClusterPort: 9096,
RQLiteJoinTarget: "",
ClusterJoinTarget: "",
},
{
Name: "bootstrap2",
Role: "bootstrap",
ConfigFilename: "bootstrap2.yaml",
DataDir: "bootstrap2",
P2PPort: 4011,
IPFSAPIPort: 4511,
IPFSSwarmPort: 4111,
IPFSGatewayPort: 7511,
RQLiteHTTPPort: 5011,
RQLiteRaftPort: 7011,
ClusterAPIPort: 9104,
ClusterPort: 9106,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node2",
Role: "node",
ConfigFilename: "node2.yaml",
DataDir: "node2",
P2PPort: 4002,
IPFSAPIPort: 4502,
IPFSSwarmPort: 4102,
IPFSGatewayPort: 7502,
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
ClusterAPIPort: 9114,
ClusterPort: 9116,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node3",
Role: "node",
ConfigFilename: "node3.yaml",
DataDir: "node3",
P2PPort: 4003,
IPFSAPIPort: 4503,
IPFSSwarmPort: 4103,
IPFSGatewayPort: 7503,
RQLiteHTTPPort: 5003,
RQLiteRaftPort: 7003,
ClusterAPIPort: 9124,
ClusterPort: 9126,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node4",
Role: "node",
ConfigFilename: "node4.yaml",
DataDir: "node4",
P2PPort: 4004,
IPFSAPIPort: 4504,
IPFSSwarmPort: 4104,
IPFSGatewayPort: 7504,
RQLiteHTTPPort: 5004,
RQLiteRaftPort: 7004,
ClusterAPIPort: 9134,
ClusterPort: 9136,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
},
GatewayPort: 6001,
OlricHTTPPort: 3320,
OlricMemberPort: 3322,
AnonSOCKSPort: 9050,
}
}
// AllPorts returns a slice of all ports used in the topology
func (t *Topology) AllPorts() []int {
var ports []int
// Node-specific ports
for _, node := range t.Nodes {
ports = append(ports,
node.P2PPort,
node.IPFSAPIPort,
node.IPFSSwarmPort,
node.IPFSGatewayPort,
node.RQLiteHTTPPort,
node.RQLiteRaftPort,
node.ClusterAPIPort,
node.ClusterPort,
)
}
// Shared service ports
ports = append(ports,
t.GatewayPort,
t.OlricHTTPPort,
t.OlricMemberPort,
t.AnonSOCKSPort,
)
return ports
}
// PortMap returns a human-readable mapping of ports to services
func (t *Topology) PortMap() map[int]string {
portMap := make(map[int]string)
for _, node := range t.Nodes {
portMap[node.P2PPort] = fmt.Sprintf("%s P2P", node.Name)
portMap[node.IPFSAPIPort] = fmt.Sprintf("%s IPFS API", node.Name)
portMap[node.IPFSSwarmPort] = fmt.Sprintf("%s IPFS Swarm", node.Name)
portMap[node.IPFSGatewayPort] = fmt.Sprintf("%s IPFS Gateway", node.Name)
portMap[node.RQLiteHTTPPort] = fmt.Sprintf("%s RQLite HTTP", node.Name)
portMap[node.RQLiteRaftPort] = fmt.Sprintf("%s RQLite Raft", node.Name)
portMap[node.ClusterAPIPort] = fmt.Sprintf("%s IPFS Cluster API", node.Name)
portMap[node.ClusterPort] = fmt.Sprintf("%s IPFS Cluster P2P", node.Name)
}
portMap[t.GatewayPort] = "Gateway"
portMap[t.OlricHTTPPort] = "Olric HTTP API"
portMap[t.OlricMemberPort] = "Olric Memberlist"
portMap[t.AnonSOCKSPort] = "Anon SOCKS Proxy"
return portMap
}
// GetBootstrapNodes returns only the bootstrap nodes
func (t *Topology) GetBootstrapNodes() []NodeSpec {
var bootstraps []NodeSpec
for _, node := range t.Nodes {
if node.Role == "bootstrap" {
bootstraps = append(bootstraps, node)
}
}
return bootstraps
}
// GetRegularNodes returns only the regular (non-bootstrap) nodes
func (t *Topology) GetRegularNodes() []NodeSpec {
var regulars []NodeSpec
for _, node := range t.Nodes {
if node.Role == "node" {
regulars = append(regulars, node)
}
}
return regulars
}
// GetNodeByName returns a node by its name, or nil if not found
func (t *Topology) GetNodeByName(name string) *NodeSpec {
for i, node := range t.Nodes {
if node.Name == name {
return &t.Nodes[i]
}
}
return nil
}

View File

@ -0,0 +1,299 @@
package production
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
)
// OSInfo contains detected operating system information
type OSInfo struct {
ID string // ubuntu, debian, etc.
Version string // 22.04, 24.04, 12, etc.
Name string // Full name: "ubuntu 24.04"
}
// PrivilegeChecker validates root access and user context
type PrivilegeChecker struct{}
// CheckRoot verifies the process is running as root
func (pc *PrivilegeChecker) CheckRoot() error {
if os.Geteuid() != 0 {
return fmt.Errorf("this command must be run as root (use sudo)")
}
return nil
}
// CheckLinuxOS verifies the process is running on Linux
func (pc *PrivilegeChecker) CheckLinuxOS() error {
if runtime.GOOS != "linux" {
return fmt.Errorf("production setup is only supported on Linux (detected: %s)", runtime.GOOS)
}
return nil
}
// OSDetector detects the Linux distribution
type OSDetector struct{}
// Detect returns information about the detected OS
func (od *OSDetector) Detect() (*OSInfo, error) {
data, err := os.ReadFile("/etc/os-release")
if err != nil {
return nil, fmt.Errorf("cannot detect operating system: %w", err)
}
lines := strings.Split(string(data), "\n")
var id, version string
for _, line := range lines {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "ID=") {
id = strings.Trim(strings.TrimPrefix(line, "ID="), "\"")
}
if strings.HasPrefix(line, "VERSION_ID=") {
version = strings.Trim(strings.TrimPrefix(line, "VERSION_ID="), "\"")
}
}
if id == "" {
return nil, fmt.Errorf("could not detect OS ID from /etc/os-release")
}
name := id
if version != "" {
name = fmt.Sprintf("%s %s", id, version)
}
return &OSInfo{
ID: id,
Version: version,
Name: name,
}, nil
}
// IsSupportedOS checks if the OS is supported for production deployment
func (od *OSDetector) IsSupportedOS(info *OSInfo) bool {
supported := map[string][]string{
"ubuntu": {"22.04", "24.04", "25.04"},
"debian": {"12"},
}
versions, ok := supported[info.ID]
if !ok {
return false
}
for _, v := range versions {
if info.Version == v {
return true
}
}
return false
}
// ArchitectureDetector detects the system architecture
type ArchitectureDetector struct{}
// Detect returns the detected architecture as a string usable for downloads
func (ad *ArchitectureDetector) Detect() (string, error) {
arch := runtime.GOARCH
switch arch {
case "amd64":
return "amd64", nil
case "arm64":
return "arm64", nil
case "arm":
return "arm", nil
default:
return "", fmt.Errorf("unsupported architecture: %s", arch)
}
}
// DependencyChecker validates external tool availability
type DependencyChecker struct {
skipOptional bool
}
// NewDependencyChecker creates a new checker
func NewDependencyChecker(skipOptional bool) *DependencyChecker {
return &DependencyChecker{
skipOptional: skipOptional,
}
}
// Dependency represents an external binary dependency
type Dependency struct {
Name string
Command string
Optional bool
InstallHint string
}
// CheckAll validates all required dependencies
func (dc *DependencyChecker) CheckAll() ([]Dependency, error) {
dependencies := []Dependency{
{
Name: "curl",
Command: "curl",
Optional: false,
InstallHint: "Usually pre-installed; if missing: apt-get install curl",
},
{
Name: "git",
Command: "git",
Optional: false,
InstallHint: "Install with: apt-get install git",
},
{
Name: "make",
Command: "make",
Optional: false,
InstallHint: "Install with: apt-get install make",
},
}
var missing []Dependency
for _, dep := range dependencies {
if _, err := exec.LookPath(dep.Command); err != nil {
if !dep.Optional || !dc.skipOptional {
missing = append(missing, dep)
}
}
}
if len(missing) > 0 {
errMsg := "missing required dependencies:\n"
for _, dep := range missing {
errMsg += fmt.Sprintf(" - %s (%s): %s\n", dep.Name, dep.Command, dep.InstallHint)
}
return missing, fmt.Errorf("%s", errMsg)
}
return nil, nil
}
// ExternalToolChecker validates external tool versions and availability
type ExternalToolChecker struct{}
// CheckIPFSAvailable checks if IPFS is available in PATH
func (etc *ExternalToolChecker) CheckIPFSAvailable() bool {
_, err := exec.LookPath("ipfs")
return err == nil
}
// CheckIPFSClusterAvailable checks if IPFS Cluster Service is available
func (etc *ExternalToolChecker) CheckIPFSClusterAvailable() bool {
_, err := exec.LookPath("ipfs-cluster-service")
return err == nil
}
// CheckRQLiteAvailable checks if RQLite is available
func (etc *ExternalToolChecker) CheckRQLiteAvailable() bool {
_, err := exec.LookPath("rqlited")
return err == nil
}
// CheckOlricAvailable checks if Olric Server is available
func (etc *ExternalToolChecker) CheckOlricAvailable() bool {
_, err := exec.LookPath("olric-server")
return err == nil
}
// CheckAnonAvailable checks if Anon is available (optional)
func (etc *ExternalToolChecker) CheckAnonAvailable() bool {
_, err := exec.LookPath("anon")
return err == nil
}
// CheckGoAvailable checks if Go is installed
func (etc *ExternalToolChecker) CheckGoAvailable() bool {
_, err := exec.LookPath("go")
return err == nil
}
// ResourceChecker validates system resources for production deployment
type ResourceChecker struct{}
// NewResourceChecker creates a new resource checker
func NewResourceChecker() *ResourceChecker {
return &ResourceChecker{}
}
// CheckDiskSpace validates sufficient disk space (minimum 10GB free)
func (rc *ResourceChecker) CheckDiskSpace(path string) error {
checkPath := path
// If the path doesn't exist, check the parent directory instead
for checkPath != "/" {
if _, err := os.Stat(checkPath); err == nil {
break
}
checkPath = filepath.Dir(checkPath)
}
var stat syscall.Statfs_t
if err := syscall.Statfs(checkPath, &stat); err != nil {
return fmt.Errorf("failed to check disk space: %w", err)
}
// Available space in bytes
availableBytes := stat.Bavail * uint64(stat.Bsize)
minRequiredBytes := uint64(10 * 1024 * 1024 * 1024) // 10GB
if availableBytes < minRequiredBytes {
availableGB := float64(availableBytes) / (1024 * 1024 * 1024)
return fmt.Errorf("insufficient disk space: %.1fGB available, minimum 10GB required", availableGB)
}
return nil
}
// CheckRAM validates sufficient RAM (minimum 2GB total)
func (rc *ResourceChecker) CheckRAM() error {
data, err := os.ReadFile("/proc/meminfo")
if err != nil {
return fmt.Errorf("failed to read memory info: %w", err)
}
lines := strings.Split(string(data), "\n")
totalKB := uint64(0)
for _, line := range lines {
if strings.HasPrefix(line, "MemTotal:") {
parts := strings.Fields(line)
if len(parts) >= 2 {
if kb, err := strconv.ParseUint(parts[1], 10, 64); err == nil {
totalKB = kb
break
}
}
}
}
if totalKB == 0 {
return fmt.Errorf("could not determine total RAM")
}
minRequiredKB := uint64(2 * 1024 * 1024) // 2GB in KB
if totalKB < minRequiredKB {
totalGB := float64(totalKB) / (1024 * 1024)
return fmt.Errorf("insufficient RAM: %.1fGB total, minimum 2GB required", totalGB)
}
return nil
}
// CheckCPU validates sufficient CPU cores (minimum 2 cores)
func (rc *ResourceChecker) CheckCPU() error {
cores := runtime.NumCPU()
if cores < 2 {
return fmt.Errorf("insufficient CPU cores: %d available, minimum 2 required", cores)
}
return nil
}

View File

@ -0,0 +1,440 @@
package production
import (
"crypto/rand"
"encoding/hex"
"fmt"
"net"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"strings"
"github.com/DeBrosOfficial/network/pkg/environments/templates"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
)
// ConfigGenerator manages generation of node, gateway, and service configs
type ConfigGenerator struct {
debrosDir string
}
// NewConfigGenerator creates a new config generator
func NewConfigGenerator(debrosDir string) *ConfigGenerator {
return &ConfigGenerator{
debrosDir: debrosDir,
}
}
// extractIPFromMultiaddr extracts the IP address from a bootstrap peer multiaddr
// Supports IP4, IP6, DNS4, DNS6, and DNSADDR protocols
// Returns the IP address as a string, or empty string if extraction/resolution fails
func extractIPFromMultiaddr(multiaddrStr string) string {
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
if err != nil {
return ""
}
// First, try to extract direct IP address
var ip net.IP
var dnsName string
multiaddr.ForEach(ma, func(c multiaddr.Component) bool {
switch c.Protocol().Code {
case multiaddr.P_IP4, multiaddr.P_IP6:
ip = net.ParseIP(c.Value())
return false // Stop iteration - found IP
case multiaddr.P_DNS4, multiaddr.P_DNS6, multiaddr.P_DNSADDR:
dnsName = c.Value()
// Continue to check for IP, but remember DNS name as fallback
}
return true
})
// If we found a direct IP, return it
if ip != nil {
return ip.String()
}
// If we found a DNS name, try to resolve it
if dnsName != "" {
if resolvedIPs, err := net.LookupIP(dnsName); err == nil && len(resolvedIPs) > 0 {
// Prefer IPv4 addresses, but accept IPv6 if that's all we have
for _, resolvedIP := range resolvedIPs {
if resolvedIP.To4() != nil {
return resolvedIP.String()
}
}
// Return first IPv6 address if no IPv4 found
return resolvedIPs[0].String()
}
}
return ""
}
// inferBootstrapIP extracts the IP address from bootstrap peer multiaddrs
// Iterates through all bootstrap peers to find a valid IP (supports DNS resolution)
// Falls back to vpsIP if provided, otherwise returns empty string
func inferBootstrapIP(bootstrapPeers []string, vpsIP string) string {
// Try to extract IP from each bootstrap peer (in order)
for _, peer := range bootstrapPeers {
if ip := extractIPFromMultiaddr(peer); ip != "" {
return ip
}
}
// Fall back to vpsIP if provided
if vpsIP != "" {
return vpsIP
}
return ""
}
// GenerateNodeConfig generates node.yaml configuration
func (cg *ConfigGenerator) GenerateNodeConfig(isBootstrap bool, bootstrapPeers []string, vpsIP string, bootstrapJoin string) (string, error) {
var nodeID string
if isBootstrap {
nodeID = "bootstrap"
} else {
nodeID = "node"
}
// Determine advertise addresses
// For bootstrap: use vpsIP if provided, otherwise localhost
// For regular nodes: infer from bootstrap peers or use vpsIP
var httpAdvAddr, raftAdvAddr string
if isBootstrap {
if vpsIP != "" {
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
} else {
httpAdvAddr = "localhost:5001"
raftAdvAddr = "localhost:7001"
}
} else {
// Regular node: infer from bootstrap peers or use vpsIP
bootstrapIP := inferBootstrapIP(bootstrapPeers, vpsIP)
if bootstrapIP != "" {
// Use the bootstrap IP for advertise addresses (this node should be reachable at same network)
// If vpsIP is provided, use it; otherwise use bootstrap IP
if vpsIP != "" {
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
} else {
httpAdvAddr = net.JoinHostPort(bootstrapIP, "5001")
raftAdvAddr = net.JoinHostPort(bootstrapIP, "7001")
}
} else {
// Fallback to localhost if nothing can be inferred
httpAdvAddr = "localhost:5001"
raftAdvAddr = "localhost:7001"
}
}
if isBootstrap {
// Bootstrap node - populate peer list and optional join address
data := templates.BootstrapConfigData{
NodeID: nodeID,
P2PPort: 4001,
DataDir: filepath.Join(cg.debrosDir, "data", "bootstrap"),
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
BootstrapPeers: bootstrapPeers,
RQLiteJoinAddress: bootstrapJoin,
HTTPAdvAddress: httpAdvAddr,
RaftAdvAddress: raftAdvAddr,
}
return templates.RenderBootstrapConfig(data)
}
// Regular node - infer join address from bootstrap peers
// MUST extract from bootstrap_peers - no fallback to vpsIP (would cause self-join)
var rqliteJoinAddr string
bootstrapIP := inferBootstrapIP(bootstrapPeers, "")
if bootstrapIP == "" {
// Try to extract from first bootstrap peer directly as fallback
if len(bootstrapPeers) > 0 {
if extractedIP := extractIPFromMultiaddr(bootstrapPeers[0]); extractedIP != "" {
bootstrapIP = extractedIP
}
}
// If still no IP, fail - we cannot join without a valid bootstrap address
if bootstrapIP == "" {
return "", fmt.Errorf("cannot determine RQLite join address: failed to extract IP from bootstrap peers %v (required for non-bootstrap nodes)", bootstrapPeers)
}
}
rqliteJoinAddr = net.JoinHostPort(bootstrapIP, "7001")
// Validate that join address doesn't match this node's own raft address (would cause self-join)
if rqliteJoinAddr == raftAdvAddr {
return "", fmt.Errorf("invalid configuration: rqlite_join_address (%s) cannot match raft_adv_address (%s) - node cannot join itself", rqliteJoinAddr, raftAdvAddr)
}
data := templates.NodeConfigData{
NodeID: nodeID,
P2PPort: 4001,
DataDir: filepath.Join(cg.debrosDir, "data", "node"),
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
RQLiteJoinAddress: rqliteJoinAddr,
BootstrapPeers: bootstrapPeers,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
HTTPAdvAddress: httpAdvAddr,
RaftAdvAddress: raftAdvAddr,
}
return templates.RenderNodeConfig(data)
}
// GenerateGatewayConfig generates gateway.yaml configuration
func (cg *ConfigGenerator) GenerateGatewayConfig(bootstrapPeers []string, enableHTTPS bool, domain string, olricServers []string) (string, error) {
tlsCacheDir := ""
if enableHTTPS {
tlsCacheDir = filepath.Join(cg.debrosDir, "tls-cache")
}
data := templates.GatewayConfigData{
ListenPort: 6001,
BootstrapPeers: bootstrapPeers,
OlricServers: olricServers,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
EnableHTTPS: enableHTTPS,
DomainName: domain,
TLSCacheDir: tlsCacheDir,
RQLiteDSN: "", // Empty for now, can be configured later
}
return templates.RenderGatewayConfig(data)
}
// GenerateOlricConfig generates Olric configuration
func (cg *ConfigGenerator) GenerateOlricConfig(bindAddr string, httpPort, memberlistPort int) (string, error) {
data := templates.OlricConfigData{
BindAddr: bindAddr,
HTTPPort: httpPort,
MemberlistPort: memberlistPort,
}
return templates.RenderOlricConfig(data)
}
// SecretGenerator manages generation of shared secrets and keys
type SecretGenerator struct {
debrosDir string
clusterSecretOverride string
}
// NewSecretGenerator creates a new secret generator
func NewSecretGenerator(debrosDir string, clusterSecretOverride string) *SecretGenerator {
return &SecretGenerator{
debrosDir: debrosDir,
clusterSecretOverride: clusterSecretOverride,
}
}
// ValidateClusterSecret ensures a cluster secret is 32 bytes of hex
func ValidateClusterSecret(secret string) error {
secret = strings.TrimSpace(secret)
if secret == "" {
return fmt.Errorf("cluster secret cannot be empty")
}
if len(secret) != 64 {
return fmt.Errorf("cluster secret must be 64 hex characters (32 bytes)")
}
if _, err := hex.DecodeString(secret); err != nil {
return fmt.Errorf("cluster secret must be valid hex: %w", err)
}
return nil
}
// EnsureClusterSecret gets or generates the IPFS Cluster secret
func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
secretPath := filepath.Join(sg.debrosDir, "secrets", "cluster-secret")
secretDir := filepath.Dir(secretPath)
// Ensure secrets directory exists
if err := os.MkdirAll(secretDir, 0755); err != nil {
return "", fmt.Errorf("failed to create secrets directory: %w", err)
}
// Use override if provided
if sg.clusterSecretOverride != "" {
secret := strings.TrimSpace(sg.clusterSecretOverride)
if err := ValidateClusterSecret(secret); err != nil {
return "", err
}
needsWrite := true
if data, err := os.ReadFile(secretPath); err == nil {
if strings.TrimSpace(string(data)) == secret {
needsWrite = false
}
}
if needsWrite {
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
return "", fmt.Errorf("failed to save cluster secret override: %w", err)
}
}
if err := ensureSecretFilePermissions(secretPath); err != nil {
return "", err
}
return secret, nil
}
// Try to read existing secret
if data, err := os.ReadFile(secretPath); err == nil {
secret := strings.TrimSpace(string(data))
if len(secret) == 64 {
if err := ensureSecretFilePermissions(secretPath); err != nil {
return "", err
}
return secret, nil
}
}
// Generate new secret (32 bytes = 64 hex chars)
bytes := make([]byte, 32)
if _, err := rand.Read(bytes); err != nil {
return "", fmt.Errorf("failed to generate cluster secret: %w", err)
}
secret := hex.EncodeToString(bytes)
// Write and protect
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
return "", fmt.Errorf("failed to save cluster secret: %w", err)
}
if err := ensureSecretFilePermissions(secretPath); err != nil {
return "", err
}
return secret, nil
}
func ensureSecretFilePermissions(secretPath string) error {
if err := os.Chmod(secretPath, 0600); err != nil {
return fmt.Errorf("failed to set permissions on %s: %w", secretPath, err)
}
if usr, err := user.Lookup("debros"); err == nil {
uid, err := strconv.Atoi(usr.Uid)
if err != nil {
return fmt.Errorf("failed to parse debros UID: %w", err)
}
gid, err := strconv.Atoi(usr.Gid)
if err != nil {
return fmt.Errorf("failed to parse debros GID: %w", err)
}
if err := os.Chown(secretPath, uid, gid); err != nil {
return fmt.Errorf("failed to change ownership of %s: %w", secretPath, err)
}
}
return nil
}
// EnsureSwarmKey gets or generates the IPFS private swarm key
func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
swarmKeyPath := filepath.Join(sg.debrosDir, "secrets", "swarm.key")
secretDir := filepath.Dir(swarmKeyPath)
// Ensure secrets directory exists
if err := os.MkdirAll(secretDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create secrets directory: %w", err)
}
// Try to read existing key
if data, err := os.ReadFile(swarmKeyPath); err == nil {
if strings.Contains(string(data), "/key/swarm/psk/1.0.0/") {
return data, nil
}
}
// Generate new key (32 bytes)
keyBytes := make([]byte, 32)
if _, err := rand.Read(keyBytes); err != nil {
return nil, fmt.Errorf("failed to generate swarm key: %w", err)
}
keyHex := strings.ToUpper(hex.EncodeToString(keyBytes))
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
// Write and protect
if err := os.WriteFile(swarmKeyPath, []byte(content), 0600); err != nil {
return nil, fmt.Errorf("failed to save swarm key: %w", err)
}
return []byte(content), nil
}
// EnsureNodeIdentity gets or generates the node's LibP2P identity
func (sg *SecretGenerator) EnsureNodeIdentity(nodeType string) (peer.ID, error) {
keyDir := filepath.Join(sg.debrosDir, "data", nodeType)
keyPath := filepath.Join(keyDir, "identity.key")
// Ensure data directory exists
if err := os.MkdirAll(keyDir, 0755); err != nil {
return "", fmt.Errorf("failed to create data directory: %w", err)
}
// Try to read existing key
if data, err := os.ReadFile(keyPath); err == nil {
priv, err := crypto.UnmarshalPrivateKey(data)
if err == nil {
pub := priv.GetPublic()
peerID, _ := peer.IDFromPublicKey(pub)
return peerID, nil
}
}
// Generate new identity
priv, pub, err := crypto.GenerateKeyPair(crypto.Ed25519, 2048)
if err != nil {
return "", fmt.Errorf("failed to generate identity: %w", err)
}
peerID, _ := peer.IDFromPublicKey(pub)
// Marshal and save private key
keyData, err := crypto.MarshalPrivateKey(priv)
if err != nil {
return "", fmt.Errorf("failed to marshal private key: %w", err)
}
if err := os.WriteFile(keyPath, keyData, 0600); err != nil {
return "", fmt.Errorf("failed to save identity key: %w", err)
}
return peerID, nil
}
// SaveConfig writes a configuration file to disk
func (sg *SecretGenerator) SaveConfig(filename string, content string) error {
var configDir string
// gateway.yaml goes to data/ directory, other configs go to configs/
if filename == "gateway.yaml" {
configDir = filepath.Join(sg.debrosDir, "data")
} else {
configDir = filepath.Join(sg.debrosDir, "configs")
}
if err := os.MkdirAll(configDir, 0755); err != nil {
return fmt.Errorf("failed to create config directory: %w", err)
}
configPath := filepath.Join(configDir, filename)
if err := os.WriteFile(configPath, []byte(content), 0644); err != nil {
return fmt.Errorf("failed to write config %s: %w", filename, err)
}
// Fix ownership
exec.Command("chown", "debros:debros", configPath).Run()
return nil
}

View File

@ -0,0 +1,691 @@
package production
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
// BinaryInstaller handles downloading and installing external binaries
type BinaryInstaller struct {
arch string
logWriter interface{} // io.Writer
}
// NewBinaryInstaller creates a new binary installer
func NewBinaryInstaller(arch string, logWriter interface{}) *BinaryInstaller {
return &BinaryInstaller{
arch: arch,
logWriter: logWriter,
}
}
// InstallRQLite downloads and installs RQLite
func (bi *BinaryInstaller) InstallRQLite() error {
if _, err := exec.LookPath("rqlited"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ RQLite already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing RQLite...\n")
version := "8.43.0"
tarball := fmt.Sprintf("rqlite-v%s-linux-%s.tar.gz", version, bi.arch)
url := fmt.Sprintf("https://github.com/rqlite/rqlite/releases/download/v%s/%s", version, tarball)
// Download
cmd := exec.Command("wget", "-q", url, "-O", "/tmp/"+tarball)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to download RQLite: %w", err)
}
// Extract
cmd = exec.Command("tar", "-C", "/tmp", "-xzf", "/tmp/"+tarball)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to extract RQLite: %w", err)
}
// Copy binaries
dir := fmt.Sprintf("/tmp/rqlite-v%s-linux-%s", version, bi.arch)
if err := exec.Command("cp", dir+"/rqlited", "/usr/local/bin/").Run(); err != nil {
return fmt.Errorf("failed to copy rqlited binary: %w", err)
}
exec.Command("chmod", "+x", "/usr/local/bin/rqlited").Run()
// Ensure PATH includes /usr/local/bin
os.Setenv("PATH", os.Getenv("PATH")+":/usr/local/bin")
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ RQLite installed\n")
return nil
}
// InstallIPFS downloads and installs IPFS (Kubo)
// Follows official steps from https://docs.ipfs.tech/install/command-line/
func (bi *BinaryInstaller) InstallIPFS() error {
if _, err := exec.LookPath("ipfs"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing IPFS (Kubo)...\n")
// Follow official installation steps in order
kuboVersion := "v0.38.2"
tarball := fmt.Sprintf("kubo_%s_linux-%s.tar.gz", kuboVersion, bi.arch)
url := fmt.Sprintf("https://dist.ipfs.tech/kubo/%s/%s", kuboVersion, tarball)
tmpDir := "/tmp"
tarPath := filepath.Join(tmpDir, tarball)
kuboDir := filepath.Join(tmpDir, "kubo")
// Step 1: Download the Linux binary from dist.ipfs.tech
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 1: Downloading Kubo v%s...\n", kuboVersion)
cmd := exec.Command("wget", "-q", url, "-O", tarPath)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to download kubo from %s: %w", url, err)
}
// Verify tarball exists
if _, err := os.Stat(tarPath); err != nil {
return fmt.Errorf("kubo tarball not found after download at %s: %w", tarPath, err)
}
// Step 2: Unzip the file
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 2: Extracting Kubo archive...\n")
cmd = exec.Command("tar", "-xzf", tarPath, "-C", tmpDir)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to extract kubo tarball: %w", err)
}
// Verify extraction
if _, err := os.Stat(kuboDir); err != nil {
return fmt.Errorf("kubo directory not found after extraction at %s: %w", kuboDir, err)
}
// Step 3: Move into the kubo folder (cd kubo)
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 3: Running installation script...\n")
// Step 4: Run the installation script (sudo bash install.sh)
installScript := filepath.Join(kuboDir, "install.sh")
if _, err := os.Stat(installScript); err != nil {
return fmt.Errorf("install.sh not found in extracted kubo directory at %s: %w", installScript, err)
}
cmd = exec.Command("bash", installScript)
cmd.Dir = kuboDir
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to run install.sh: %v\n%s", err, string(output))
}
// Step 5: Test that Kubo has installed correctly
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 5: Verifying installation...\n")
cmd = exec.Command("ipfs", "--version")
output, err := cmd.CombinedOutput()
if err != nil {
// ipfs might not be in PATH yet in this process, check file directly
ipfsLocations := []string{"/usr/local/bin/ipfs", "/usr/bin/ipfs"}
found := false
for _, loc := range ipfsLocations {
if info, err := os.Stat(loc); err == nil && !info.IsDir() {
found = true
// Ensure it's executable
if info.Mode()&0111 == 0 {
os.Chmod(loc, 0755)
}
break
}
}
if !found {
return fmt.Errorf("ipfs binary not found after installation in %v", ipfsLocations)
}
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " %s", string(output))
}
// Ensure PATH is updated for current process
os.Setenv("PATH", os.Getenv("PATH")+":/usr/local/bin")
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS installed successfully\n")
return nil
}
// InstallIPFSCluster downloads and installs IPFS Cluster Service
func (bi *BinaryInstaller) InstallIPFSCluster() error {
if _, err := exec.LookPath("ipfs-cluster-service"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS Cluster already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing IPFS Cluster Service...\n")
// Check if Go is available
if _, err := exec.LookPath("go"); err != nil {
return fmt.Errorf("go not found - required to install IPFS Cluster. Please install Go first")
}
cmd := exec.Command("go", "install", "github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest")
cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin")
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to install IPFS Cluster: %w", err)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS Cluster installed\n")
return nil
}
// InstallOlric downloads and installs Olric server
func (bi *BinaryInstaller) InstallOlric() error {
if _, err := exec.LookPath("olric-server"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Olric already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing Olric...\n")
// Check if Go is available
if _, err := exec.LookPath("go"); err != nil {
return fmt.Errorf("go not found - required to install Olric. Please install Go first")
}
cmd := exec.Command("go", "install", "github.com/olric-data/olric/cmd/olric-server@v0.7.0")
cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin")
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to install Olric: %w", err)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Olric installed\n")
return nil
}
// InstallGo downloads and installs Go toolchain
func (bi *BinaryInstaller) InstallGo() error {
if _, err := exec.LookPath("go"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Go already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing Go...\n")
goTarball := fmt.Sprintf("go1.21.6.linux-%s.tar.gz", bi.arch)
goURL := fmt.Sprintf("https://go.dev/dl/%s", goTarball)
// Download
cmd := exec.Command("wget", "-q", goURL, "-O", "/tmp/"+goTarball)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to download Go: %w", err)
}
// Extract
cmd = exec.Command("tar", "-C", "/usr/local", "-xzf", "/tmp/"+goTarball)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to extract Go: %w", err)
}
// Add to PATH
newPath := os.Getenv("PATH") + ":/usr/local/go/bin"
os.Setenv("PATH", newPath)
// Verify installation
if _, err := exec.LookPath("go"); err != nil {
return fmt.Errorf("go installed but not found in PATH after installation")
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Go installed\n")
return nil
}
// ResolveBinaryPath finds the fully-qualified path to a required executable
func (bi *BinaryInstaller) ResolveBinaryPath(binary string, extraPaths ...string) (string, error) {
// First try to find in PATH
if path, err := exec.LookPath(binary); err == nil {
if abs, err := filepath.Abs(path); err == nil {
return abs, nil
}
return path, nil
}
// Then try extra candidate paths
for _, candidate := range extraPaths {
if candidate == "" {
continue
}
if info, err := os.Stat(candidate); err == nil && !info.IsDir() && info.Mode()&0111 != 0 {
if abs, err := filepath.Abs(candidate); err == nil {
return abs, nil
}
return candidate, nil
}
}
// Not found - generate error message
checked := make([]string, 0, len(extraPaths))
for _, candidate := range extraPaths {
if candidate != "" {
checked = append(checked, candidate)
}
}
if len(checked) == 0 {
return "", fmt.Errorf("required binary %q not found in path", binary)
}
return "", fmt.Errorf("required binary %q not found in path (also checked %s)", binary, strings.Join(checked, ", "))
}
// InstallDeBrosBinaries clones and builds DeBros binaries
func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, debrosHome string, skipRepoUpdate bool) error {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building DeBros binaries...\n")
srcDir := filepath.Join(debrosHome, "src")
binDir := filepath.Join(debrosHome, "bin")
// Ensure directories exist
os.MkdirAll(srcDir, 0755)
os.MkdirAll(binDir, 0755)
// Check if git repository is already initialized
repoInitialized := false
if _, err := os.Stat(filepath.Join(srcDir, ".git")); err == nil {
repoInitialized = true
}
// Handle repository update/clone based on skipRepoUpdate flag
if skipRepoUpdate {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Skipping repo clone/pull (--no-pull flag)\n")
if !repoInitialized {
return fmt.Errorf("cannot skip pull: repository not found at %s", srcDir)
}
// Verify srcDir exists and has content
if entries, err := os.ReadDir(srcDir); err != nil {
return fmt.Errorf("failed to read source directory %s: %w", srcDir, err)
} else if len(entries) == 0 {
return fmt.Errorf("source directory %s is empty", srcDir)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Using existing repository at %s (skipping git operations)\n", srcDir)
// Skip to build step - don't execute any git commands
} else {
// Clone repository if not present, otherwise update it
if !repoInitialized {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Cloning repository...\n")
cmd := exec.Command("git", "clone", "--branch", branch, "--depth", "1", "https://github.com/DeBrosOfficial/network.git", srcDir)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to clone repository: %w", err)
}
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Updating repository to latest changes...\n")
if output, err := exec.Command("git", "-C", srcDir, "fetch", "origin", branch).CombinedOutput(); err != nil {
return fmt.Errorf("failed to fetch repository updates: %v\n%s", err, string(output))
}
if output, err := exec.Command("git", "-C", srcDir, "reset", "--hard", "origin/"+branch).CombinedOutput(); err != nil {
return fmt.Errorf("failed to reset repository: %v\n%s", err, string(output))
}
if output, err := exec.Command("git", "-C", srcDir, "clean", "-fd").CombinedOutput(); err != nil {
return fmt.Errorf("failed to clean repository: %v\n%s", err, string(output))
}
}
}
// Build binaries
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building binaries...\n")
cmd := exec.Command("make", "build")
cmd.Dir = srcDir
cmd.Env = append(os.Environ(), "HOME="+debrosHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin")
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build: %v\n%s", err, string(output))
}
// Copy binaries
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Copying binaries...\n")
srcBinDir := filepath.Join(srcDir, "bin")
// Check if source bin directory exists
if _, err := os.Stat(srcBinDir); os.IsNotExist(err) {
return fmt.Errorf("source bin directory does not exist at %s - build may have failed", srcBinDir)
}
// Check if there are any files to copy
entries, err := os.ReadDir(srcBinDir)
if err != nil {
return fmt.Errorf("failed to read source bin directory: %w", err)
}
if len(entries) == 0 {
return fmt.Errorf("source bin directory is empty - build may have failed")
}
// Copy each binary individually to avoid wildcard expansion issues
for _, entry := range entries {
if entry.IsDir() {
continue
}
srcPath := filepath.Join(srcBinDir, entry.Name())
dstPath := filepath.Join(binDir, entry.Name())
// Read source file
data, err := os.ReadFile(srcPath)
if err != nil {
return fmt.Errorf("failed to read binary %s: %w", entry.Name(), err)
}
// Write destination file
if err := os.WriteFile(dstPath, data, 0755); err != nil {
return fmt.Errorf("failed to write binary %s: %w", entry.Name(), err)
}
}
exec.Command("chmod", "-R", "755", binDir).Run()
exec.Command("chown", "-R", "debros:debros", binDir).Run()
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ DeBros binaries installed\n")
return nil
}
// InstallSystemDependencies installs system-level dependencies via apt
func (bi *BinaryInstaller) InstallSystemDependencies() error {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing system dependencies...\n")
// Update package list
cmd := exec.Command("apt-get", "update")
if err := cmd.Run(); err != nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Warning: apt update failed\n")
}
// Install dependencies
cmd = exec.Command("apt-get", "install", "-y", "curl", "git", "make", "build-essential", "wget")
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to install dependencies: %w", err)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ System dependencies installed\n")
return nil
}
// InitializeIPFSRepo initializes an IPFS repository for a node
func (bi *BinaryInstaller) InitializeIPFSRepo(nodeType, ipfsRepoPath string, swarmKeyPath string, apiPort, gatewayPort, swarmPort int) error {
configPath := filepath.Join(ipfsRepoPath, "config")
repoExists := false
if _, err := os.Stat(configPath); err == nil {
repoExists = true
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS repo for %s already exists, ensuring configuration...\n", nodeType)
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS repo for %s...\n", nodeType)
}
if err := os.MkdirAll(ipfsRepoPath, 0755); err != nil {
return fmt.Errorf("failed to create IPFS repo directory: %w", err)
}
// Resolve IPFS binary path
ipfsBinary, err := bi.ResolveBinaryPath("ipfs", "/usr/local/bin/ipfs", "/usr/bin/ipfs")
if err != nil {
return err
}
// Initialize IPFS if repo doesn't exist
if !repoExists {
cmd := exec.Command(ipfsBinary, "init", "--profile=server", "--repo-dir="+ipfsRepoPath)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to initialize IPFS: %v\n%s", err, string(output))
}
}
// Copy swarm key if present
swarmKeyExists := false
if data, err := os.ReadFile(swarmKeyPath); err == nil {
swarmKeyDest := filepath.Join(ipfsRepoPath, "swarm.key")
if err := os.WriteFile(swarmKeyDest, data, 0600); err != nil {
return fmt.Errorf("failed to copy swarm key: %w", err)
}
swarmKeyExists = true
}
// Configure IPFS addresses (API, Gateway, Swarm) by modifying the config file directly
// This ensures the ports are set correctly and avoids conflicts with RQLite on port 5001
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Configuring IPFS addresses (API: %d, Gateway: %d, Swarm: %d)...\n", apiPort, gatewayPort, swarmPort)
if err := bi.configureIPFSAddresses(ipfsRepoPath, apiPort, gatewayPort, swarmPort); err != nil {
return fmt.Errorf("failed to configure IPFS addresses: %w", err)
}
// Always disable AutoConf for private swarm when swarm.key is present
// This is critical - IPFS will fail to start if AutoConf is enabled on a private network
// We do this even for existing repos to fix repos initialized before this fix was applied
if swarmKeyExists {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Disabling AutoConf for private swarm...\n")
cmd := exec.Command(ipfsBinary, "config", "--json", "AutoConf.Enabled", "false")
cmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to disable AutoConf: %v\n%s", err, string(output))
}
// Clear AutoConf placeholders from config to prevent Kubo startup errors
// When AutoConf is disabled, 'auto' placeholders must be replaced with explicit values or empty
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Clearing AutoConf placeholders from IPFS config...\n")
type configCommand struct {
desc string
args []string
}
// List of config replacements to clear 'auto' placeholders
cleanup := []configCommand{
{"clearing Bootstrap peers", []string{"config", "Bootstrap", "--json", "[]"}},
{"clearing Routing.DelegatedRouters", []string{"config", "Routing.DelegatedRouters", "--json", "[]"}},
{"clearing Ipns.DelegatedPublishers", []string{"config", "Ipns.DelegatedPublishers", "--json", "[]"}},
{"clearing DNS.Resolvers", []string{"config", "DNS.Resolvers", "--json", "{}"}},
}
for _, step := range cleanup {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " %s...\n", step.desc)
cmd := exec.Command(ipfsBinary, step.args...)
cmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed while %s: %v\n%s", step.desc, err, string(output))
}
}
}
// Fix ownership
exec.Command("chown", "-R", "debros:debros", ipfsRepoPath).Run()
return nil
}
// configureIPFSAddresses configures the IPFS API, Gateway, and Swarm addresses in the config file
func (bi *BinaryInstaller) configureIPFSAddresses(ipfsRepoPath string, apiPort, gatewayPort, swarmPort int) error {
configPath := filepath.Join(ipfsRepoPath, "config")
// Read existing config
data, err := os.ReadFile(configPath)
if err != nil {
return fmt.Errorf("failed to read IPFS config: %w", err)
}
var config map[string]interface{}
if err := json.Unmarshal(data, &config); err != nil {
return fmt.Errorf("failed to parse IPFS config: %w", err)
}
// Set Addresses
config["Addresses"] = map[string]interface{}{
"API": []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", apiPort),
},
"Gateway": []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", gatewayPort),
},
"Swarm": []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", swarmPort),
fmt.Sprintf("/ip6/::/tcp/%d", swarmPort),
},
}
// Write config back
updatedData, err := json.MarshalIndent(config, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal IPFS config: %w", err)
}
if err := os.WriteFile(configPath, updatedData, 0600); err != nil {
return fmt.Errorf("failed to write IPFS config: %w", err)
}
return nil
}
// InitializeIPFSClusterConfig initializes IPFS Cluster configuration
// This runs `ipfs-cluster-service init` to create the service.json configuration file.
// For existing installations, it ensures the cluster secret is up to date.
// bootstrapClusterPeers should be in format: ["/ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>"]
func (bi *BinaryInstaller) InitializeIPFSClusterConfig(nodeType, clusterPath, clusterSecret string, ipfsAPIPort int, bootstrapClusterPeers []string) error {
serviceJSONPath := filepath.Join(clusterPath, "service.json")
configExists := false
if _, err := os.Stat(serviceJSONPath); err == nil {
configExists = true
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS Cluster config for %s already exists, ensuring it's up to date...\n", nodeType)
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Preparing IPFS Cluster path for %s...\n", nodeType)
}
if err := os.MkdirAll(clusterPath, 0755); err != nil {
return fmt.Errorf("failed to create IPFS Cluster directory: %w", err)
}
// Fix ownership before running init
exec.Command("chown", "-R", "debros:debros", clusterPath).Run()
// Resolve ipfs-cluster-service binary path
clusterBinary, err := bi.ResolveBinaryPath("ipfs-cluster-service", "/usr/local/bin/ipfs-cluster-service", "/usr/bin/ipfs-cluster-service")
if err != nil {
return fmt.Errorf("ipfs-cluster-service binary not found: %w", err)
}
// Initialize cluster config if it doesn't exist
if !configExists {
// Initialize cluster config with ipfs-cluster-service init
// This creates the service.json file with all required sections
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS Cluster config...\n")
cmd := exec.Command(clusterBinary, "init", "--force")
cmd.Env = append(os.Environ(), "IPFS_CLUSTER_PATH="+clusterPath)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to initialize IPFS Cluster config: %v\n%s", err, string(output))
}
}
// Always update the cluster secret, IPFS port, and peer addresses (for both new and existing configs)
// This ensures existing installations get the secret and port synchronized
if clusterSecret != "" {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Updating cluster secret, IPFS port, and peer addresses...\n")
if err := bi.updateClusterConfig(clusterPath, clusterSecret, ipfsAPIPort, bootstrapClusterPeers); err != nil {
return fmt.Errorf("failed to update cluster config: %w", err)
}
}
// Fix ownership again after updates
exec.Command("chown", "-R", "debros:debros", clusterPath).Run()
return nil
}
// updateClusterConfig updates the secret, IPFS port, and peer addresses in IPFS Cluster service.json
func (bi *BinaryInstaller) updateClusterConfig(clusterPath, secret string, ipfsAPIPort int, bootstrapClusterPeers []string) error {
serviceJSONPath := filepath.Join(clusterPath, "service.json")
// Read existing config
data, err := os.ReadFile(serviceJSONPath)
if err != nil {
return fmt.Errorf("failed to read service.json: %w", err)
}
// Parse JSON
var config map[string]interface{}
if err := json.Unmarshal(data, &config); err != nil {
return fmt.Errorf("failed to parse service.json: %w", err)
}
// Update cluster secret and peer addresses
if cluster, ok := config["cluster"].(map[string]interface{}); ok {
cluster["secret"] = secret
// Configure peer addresses for cluster discovery
// This allows nodes to find and connect to each other
if len(bootstrapClusterPeers) > 0 {
cluster["peer_addresses"] = bootstrapClusterPeers
}
} else {
clusterConfig := map[string]interface{}{
"secret": secret,
}
if len(bootstrapClusterPeers) > 0 {
clusterConfig["peer_addresses"] = bootstrapClusterPeers
}
config["cluster"] = clusterConfig
}
// Update IPFS port in IPFS Proxy configuration
ipfsNodeMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsAPIPort)
if api, ok := config["api"].(map[string]interface{}); ok {
if ipfsproxy, ok := api["ipfsproxy"].(map[string]interface{}); ok {
ipfsproxy["node_multiaddress"] = ipfsNodeMultiaddr
}
}
// Update IPFS port in IPFS Connector configuration
if ipfsConnector, ok := config["ipfs_connector"].(map[string]interface{}); ok {
if ipfshttp, ok := ipfsConnector["ipfshttp"].(map[string]interface{}); ok {
ipfshttp["node_multiaddress"] = ipfsNodeMultiaddr
}
}
// Write back
updatedData, err := json.MarshalIndent(config, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal service.json: %w", err)
}
if err := os.WriteFile(serviceJSONPath, updatedData, 0644); err != nil {
return fmt.Errorf("failed to write service.json: %w", err)
}
return nil
}
// GetClusterPeerMultiaddr reads the IPFS Cluster peer ID and returns its multiaddress
// Returns format: /ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>
func (bi *BinaryInstaller) GetClusterPeerMultiaddr(clusterPath string, nodeIP string) (string, error) {
identityPath := filepath.Join(clusterPath, "identity.json")
// Read identity file
data, err := os.ReadFile(identityPath)
if err != nil {
return "", fmt.Errorf("failed to read identity.json: %w", err)
}
// Parse JSON
var identity map[string]interface{}
if err := json.Unmarshal(data, &identity); err != nil {
return "", fmt.Errorf("failed to parse identity.json: %w", err)
}
// Get peer ID
peerID, ok := identity["id"].(string)
if !ok || peerID == "" {
return "", fmt.Errorf("peer ID not found in identity.json")
}
// Construct multiaddress: /ip4/<ip>/tcp/9098/p2p/<peer-id>
// Port 9098 is the default cluster listen port
multiaddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", nodeIP, peerID)
return multiaddr, nil
}
// InitializeRQLiteDataDir initializes RQLite data directory
func (bi *BinaryInstaller) InitializeRQLiteDataDir(nodeType, dataDir string) error {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing RQLite data dir for %s...\n", nodeType)
if err := os.MkdirAll(dataDir, 0755); err != nil {
return fmt.Errorf("failed to create RQLite data directory: %w", err)
}
exec.Command("chown", "-R", "debros:debros", dataDir).Run()
return nil
}

View File

@ -0,0 +1,600 @@
package production
import (
"fmt"
"io"
"net"
"os"
"os/exec"
"path/filepath"
"strings"
)
// ProductionSetup orchestrates the entire production deployment
type ProductionSetup struct {
osInfo *OSInfo
arch string
debrosHome string
debrosDir string
logWriter io.Writer
forceReconfigure bool
skipOptionalDeps bool
skipResourceChecks bool
clusterSecretOverride string
privChecker *PrivilegeChecker
osDetector *OSDetector
archDetector *ArchitectureDetector
resourceChecker *ResourceChecker
fsProvisioner *FilesystemProvisioner
userProvisioner *UserProvisioner
stateDetector *StateDetector
configGenerator *ConfigGenerator
secretGenerator *SecretGenerator
serviceGenerator *SystemdServiceGenerator
serviceController *SystemdController
binaryInstaller *BinaryInstaller
branch string
skipRepoUpdate bool
NodePeerID string // Captured during Phase3 for later display
}
// ReadBranchPreference reads the stored branch preference from disk
func ReadBranchPreference(debrosDir string) string {
branchFile := filepath.Join(debrosDir, ".branch")
data, err := os.ReadFile(branchFile)
if err != nil {
return "main" // Default to main if file doesn't exist
}
branch := strings.TrimSpace(string(data))
if branch == "" {
return "main"
}
return branch
}
// SaveBranchPreference saves the branch preference to disk
func SaveBranchPreference(debrosDir, branch string) error {
branchFile := filepath.Join(debrosDir, ".branch")
if err := os.MkdirAll(debrosDir, 0755); err != nil {
return fmt.Errorf("failed to create debros directory: %w", err)
}
if err := os.WriteFile(branchFile, []byte(branch), 0644); err != nil {
return fmt.Errorf("failed to save branch preference: %w", err)
}
exec.Command("chown", "debros:debros", branchFile).Run()
return nil
}
// NewProductionSetup creates a new production setup orchestrator
func NewProductionSetup(debrosHome string, logWriter io.Writer, forceReconfigure bool, branch string, skipRepoUpdate bool, skipResourceChecks bool, clusterSecretOverride string) *ProductionSetup {
debrosDir := debrosHome + "/.debros"
arch, _ := (&ArchitectureDetector{}).Detect()
normalizedSecret := strings.TrimSpace(strings.ToLower(clusterSecretOverride))
// If branch is empty, try to read from stored preference, otherwise default to main
if branch == "" {
branch = ReadBranchPreference(debrosDir)
}
return &ProductionSetup{
debrosHome: debrosHome,
debrosDir: debrosDir,
logWriter: logWriter,
forceReconfigure: forceReconfigure,
arch: arch,
branch: branch,
skipRepoUpdate: skipRepoUpdate,
skipResourceChecks: skipResourceChecks,
clusterSecretOverride: normalizedSecret,
privChecker: &PrivilegeChecker{},
osDetector: &OSDetector{},
archDetector: &ArchitectureDetector{},
resourceChecker: NewResourceChecker(),
fsProvisioner: NewFilesystemProvisioner(debrosHome),
userProvisioner: NewUserProvisioner("debros", debrosHome, "/bin/bash"),
stateDetector: NewStateDetector(debrosDir),
configGenerator: NewConfigGenerator(debrosDir),
secretGenerator: NewSecretGenerator(debrosDir, normalizedSecret),
serviceGenerator: NewSystemdServiceGenerator(debrosHome, debrosDir),
serviceController: NewSystemdController(),
binaryInstaller: NewBinaryInstaller(arch, logWriter),
}
}
// logf writes a formatted message to the log writer
func (ps *ProductionSetup) logf(format string, args ...interface{}) {
if ps.logWriter != nil {
fmt.Fprintf(ps.logWriter, format+"\n", args...)
}
}
// IsUpdate detects if this is an update to an existing installation
func (ps *ProductionSetup) IsUpdate() bool {
return ps.stateDetector.IsConfigured() || ps.stateDetector.HasIPFSData()
}
// Phase1CheckPrerequisites performs initial environment validation
func (ps *ProductionSetup) Phase1CheckPrerequisites() error {
ps.logf("Phase 1: Checking prerequisites...")
// Check root
if err := ps.privChecker.CheckRoot(); err != nil {
return fmt.Errorf("privilege check failed: %w", err)
}
ps.logf(" ✓ Running as root")
// Check Linux OS
if err := ps.privChecker.CheckLinuxOS(); err != nil {
return fmt.Errorf("OS check failed: %w", err)
}
ps.logf(" ✓ Running on Linux")
// Detect OS
osInfo, err := ps.osDetector.Detect()
if err != nil {
return fmt.Errorf("failed to detect OS: %w", err)
}
ps.osInfo = osInfo
ps.logf(" ✓ Detected OS: %s", osInfo.Name)
// Check if supported
if !ps.osDetector.IsSupportedOS(osInfo) {
ps.logf(" ⚠️ OS %s is not officially supported (Ubuntu 22/24/25, Debian 12)", osInfo.Name)
ps.logf(" Proceeding anyway, but issues may occur")
}
// Detect architecture
arch, err := ps.archDetector.Detect()
if err != nil {
return fmt.Errorf("failed to detect architecture: %w", err)
}
ps.arch = arch
ps.logf(" ✓ Detected architecture: %s", arch)
// Check basic dependencies
depChecker := NewDependencyChecker(ps.skipOptionalDeps)
if missing, err := depChecker.CheckAll(); err != nil {
ps.logf(" ❌ Missing dependencies:")
for _, dep := range missing {
ps.logf(" - %s: %s", dep.Name, dep.InstallHint)
}
return err
}
ps.logf(" ✓ Basic dependencies available")
// Check system resources
if ps.skipResourceChecks {
ps.logf(" ⚠️ Skipping system resource checks (disk, RAM, CPU) due to --ignore-resource-checks flag")
} else {
if err := ps.resourceChecker.CheckDiskSpace(ps.debrosHome); err != nil {
ps.logf(" ❌ %v", err)
return err
}
ps.logf(" ✓ Sufficient disk space available")
if err := ps.resourceChecker.CheckRAM(); err != nil {
ps.logf(" ❌ %v", err)
return err
}
ps.logf(" ✓ Sufficient RAM available")
if err := ps.resourceChecker.CheckCPU(); err != nil {
ps.logf(" ❌ %v", err)
return err
}
ps.logf(" ✓ Sufficient CPU cores available")
}
return nil
}
// Phase2ProvisionEnvironment sets up users and filesystems
func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
ps.logf("Phase 2: Provisioning environment...")
// Create debros user
if !ps.userProvisioner.UserExists() {
if err := ps.userProvisioner.CreateUser(); err != nil {
return fmt.Errorf("failed to create debros user: %w", err)
}
ps.logf(" ✓ Created 'debros' user")
} else {
ps.logf(" ✓ 'debros' user already exists")
}
// Set up sudoers access if invoked via sudo
sudoUser := os.Getenv("SUDO_USER")
if sudoUser != "" {
if err := ps.userProvisioner.SetupSudoersAccess(sudoUser); err != nil {
ps.logf(" ⚠️ Failed to setup sudoers: %v", err)
} else {
ps.logf(" ✓ Sudoers access configured")
}
}
// Create directory structure (base directories only - node-specific dirs created in Phase2c)
if err := ps.fsProvisioner.EnsureDirectoryStructure(""); err != nil {
return fmt.Errorf("failed to create directory structure: %w", err)
}
ps.logf(" ✓ Directory structure created")
// Fix ownership
if err := ps.fsProvisioner.FixOwnership(); err != nil {
return fmt.Errorf("failed to fix ownership: %w", err)
}
ps.logf(" ✓ Ownership fixed")
return nil
}
// Phase2bInstallBinaries installs external binaries and DeBros components
func (ps *ProductionSetup) Phase2bInstallBinaries() error {
ps.logf("Phase 2b: Installing binaries...")
// Install system dependencies
if err := ps.binaryInstaller.InstallSystemDependencies(); err != nil {
ps.logf(" ⚠️ System dependencies warning: %v", err)
}
// Install Go if not present
if err := ps.binaryInstaller.InstallGo(); err != nil {
return fmt.Errorf("failed to install Go: %w", err)
}
// Install binaries
if err := ps.binaryInstaller.InstallRQLite(); err != nil {
ps.logf(" ⚠️ RQLite install warning: %v", err)
}
if err := ps.binaryInstaller.InstallIPFS(); err != nil {
ps.logf(" ⚠️ IPFS install warning: %v", err)
}
if err := ps.binaryInstaller.InstallIPFSCluster(); err != nil {
ps.logf(" ⚠️ IPFS Cluster install warning: %v", err)
}
if err := ps.binaryInstaller.InstallOlric(); err != nil {
ps.logf(" ⚠️ Olric install warning: %v", err)
}
// Install DeBros binaries
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.debrosHome, ps.skipRepoUpdate); err != nil {
return fmt.Errorf("failed to install DeBros binaries: %w", err)
}
ps.logf(" ✓ All binaries installed")
return nil
}
// Phase2cInitializeServices initializes service repositories and configurations
func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string, bootstrapPeers []string, vpsIP string) error {
ps.logf("Phase 2c: Initializing services...")
// Ensure node-specific directories exist
if err := ps.fsProvisioner.EnsureDirectoryStructure(nodeType); err != nil {
return fmt.Errorf("failed to create node-specific directories: %w", err)
}
// Build paths with nodeType awareness to match systemd unit definitions
dataDir := filepath.Join(ps.debrosDir, "data", nodeType)
// Initialize IPFS repo with correct path structure
// Use port 4501 for API (to avoid conflict with RQLite on 5001), 8080 for gateway (standard), 4101 for swarm (to avoid conflict with LibP2P on 4001)
ipfsRepoPath := filepath.Join(dataDir, "ipfs", "repo")
if err := ps.binaryInstaller.InitializeIPFSRepo(nodeType, ipfsRepoPath, filepath.Join(ps.debrosDir, "secrets", "swarm.key"), 4501, 8080, 4101); err != nil {
return fmt.Errorf("failed to initialize IPFS repo: %w", err)
}
// Initialize IPFS Cluster config (runs ipfs-cluster-service init)
clusterPath := filepath.Join(dataDir, "ipfs-cluster")
clusterSecret, err := ps.secretGenerator.EnsureClusterSecret()
if err != nil {
return fmt.Errorf("failed to get cluster secret: %w", err)
}
// Get bootstrap cluster peer addresses for non-bootstrap nodes
var bootstrapClusterPeers []string
if nodeType != "bootstrap" && len(bootstrapPeers) > 0 {
// Try to read bootstrap cluster peer ID and construct multiaddress
bootstrapClusterPath := filepath.Join(ps.debrosDir, "data", "bootstrap", "ipfs-cluster")
// Infer bootstrap IP from bootstrap peers
bootstrapIP := inferBootstrapIP(bootstrapPeers, vpsIP)
if bootstrapIP != "" {
// Check if bootstrap cluster identity exists
if _, err := os.Stat(filepath.Join(bootstrapClusterPath, "identity.json")); err == nil {
// Bootstrap cluster is initialized, get its multiaddress
if clusterMultiaddr, err := ps.binaryInstaller.GetClusterPeerMultiaddr(bootstrapClusterPath, bootstrapIP); err == nil {
bootstrapClusterPeers = []string{clusterMultiaddr}
ps.logf(" Configured IPFS Cluster to connect to bootstrap: %s", clusterMultiaddr)
} else {
ps.logf(" ⚠️ Could not read bootstrap cluster peer ID: %v", err)
ps.logf(" ⚠️ IPFS Cluster will rely on mDNS discovery (may not work across internet)")
}
} else {
ps.logf(" Bootstrap cluster not yet initialized, peer_addresses will be empty")
ps.logf(" IPFS Cluster will rely on mDNS discovery (may not work across internet)")
}
}
}
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(nodeType, clusterPath, clusterSecret, 4501, bootstrapClusterPeers); err != nil {
return fmt.Errorf("failed to initialize IPFS Cluster: %w", err)
}
// Initialize RQLite data directory
rqliteDataDir := filepath.Join(dataDir, "rqlite")
if err := ps.binaryInstaller.InitializeRQLiteDataDir(nodeType, rqliteDataDir); err != nil {
ps.logf(" ⚠️ RQLite initialization warning: %v", err)
}
// Ensure all directories and files created during service initialization have correct ownership
// This is critical because directories/files created as root need to be owned by debros user
if err := ps.fsProvisioner.FixOwnership(); err != nil {
return fmt.Errorf("failed to fix ownership after service initialization: %w", err)
}
ps.logf(" ✓ Services initialized")
return nil
}
// Phase3GenerateSecrets generates shared secrets and keys
func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
ps.logf("Phase 3: Generating secrets...")
// Cluster secret
if _, err := ps.secretGenerator.EnsureClusterSecret(); err != nil {
return fmt.Errorf("failed to ensure cluster secret: %w", err)
}
ps.logf(" ✓ Cluster secret ensured")
// Swarm key
if _, err := ps.secretGenerator.EnsureSwarmKey(); err != nil {
return fmt.Errorf("failed to ensure swarm key: %w", err)
}
ps.logf(" ✓ IPFS swarm key ensured")
// Node identity
nodeType := "node"
if isBootstrap {
nodeType = "bootstrap"
}
peerID, err := ps.secretGenerator.EnsureNodeIdentity(nodeType)
if err != nil {
return fmt.Errorf("failed to ensure node identity: %w", err)
}
peerIDStr := peerID.String()
ps.NodePeerID = peerIDStr // Capture for later display
ps.logf(" ✓ Node identity ensured (Peer ID: %s)", peerIDStr)
return nil
}
// Phase4GenerateConfigs generates node, gateway, and service configs
func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeers []string, vpsIP string, enableHTTPS bool, domain string, bootstrapJoin string) error {
if ps.IsUpdate() {
ps.logf("Phase 4: Updating configurations...")
ps.logf(" (Existing configs will be updated to latest format)")
} else {
ps.logf("Phase 4: Generating configurations...")
}
// Node config
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(isBootstrap, bootstrapPeers, vpsIP, bootstrapJoin)
if err != nil {
return fmt.Errorf("failed to generate node config: %w", err)
}
var configFile string
if isBootstrap {
configFile = "bootstrap.yaml"
} else {
configFile = "node.yaml"
}
if err := ps.secretGenerator.SaveConfig(configFile, nodeConfig); err != nil {
return fmt.Errorf("failed to save node config: %w", err)
}
ps.logf(" ✓ Node config generated: %s", configFile)
// Determine Olric servers for gateway config
// Olric will bind to 0.0.0.0 (all interfaces) but gateway needs specific addresses
var olricServers []string
if isBootstrap {
// Bootstrap node: gateway should connect to vpsIP if provided, otherwise localhost
if vpsIP != "" {
olricServers = []string{net.JoinHostPort(vpsIP, "3320")}
} else {
olricServers = []string{"127.0.0.1:3320"}
}
} else {
// Non-bootstrap node: include bootstrap server and local server
olricServers = []string{"127.0.0.1:3320"} // Default to localhost for single-node
if len(bootstrapPeers) > 0 {
// Try to infer Olric servers from bootstrap peers
bootstrapIP := inferBootstrapIP(bootstrapPeers, vpsIP)
if bootstrapIP != "" {
// Add bootstrap Olric server (use net.JoinHostPort for IPv6 support)
olricServers = []string{net.JoinHostPort(bootstrapIP, "3320")}
// Add local Olric server too
if vpsIP != "" {
olricServers = append(olricServers, net.JoinHostPort(vpsIP, "3320"))
} else {
olricServers = append(olricServers, "127.0.0.1:3320")
}
}
}
}
gatewayConfig, err := ps.configGenerator.GenerateGatewayConfig(bootstrapPeers, enableHTTPS, domain, olricServers)
if err != nil {
return fmt.Errorf("failed to generate gateway config: %w", err)
}
if err := ps.secretGenerator.SaveConfig("gateway.yaml", gatewayConfig); err != nil {
return fmt.Errorf("failed to save gateway config: %w", err)
}
ps.logf(" ✓ Gateway config generated")
// Olric config - bind to 0.0.0.0 to listen on all interfaces
// Gateway will connect using the specific address from olricServers list above
olricConfig, err := ps.configGenerator.GenerateOlricConfig("0.0.0.0", 3320, 3322)
if err != nil {
return fmt.Errorf("failed to generate olric config: %w", err)
}
// Create olric config directory
olricConfigDir := ps.debrosDir + "/configs/olric"
if err := os.MkdirAll(olricConfigDir, 0755); err != nil {
return fmt.Errorf("failed to create olric config directory: %w", err)
}
olricConfigPath := olricConfigDir + "/config.yaml"
if err := os.WriteFile(olricConfigPath, []byte(olricConfig), 0644); err != nil {
return fmt.Errorf("failed to save olric config: %w", err)
}
exec.Command("chown", "debros:debros", olricConfigPath).Run()
ps.logf(" ✓ Olric config generated")
return nil
}
// Phase5CreateSystemdServices creates and enables systemd units
func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP string) error {
ps.logf("Phase 5: Creating systemd services...")
// Validate all required binaries are available before creating services
ipfsBinary, err := ps.binaryInstaller.ResolveBinaryPath("ipfs", "/usr/local/bin/ipfs", "/usr/bin/ipfs")
if err != nil {
return fmt.Errorf("ipfs binary not available: %w", err)
}
clusterBinary, err := ps.binaryInstaller.ResolveBinaryPath("ipfs-cluster-service", "/usr/local/bin/ipfs-cluster-service", "/usr/bin/ipfs-cluster-service")
if err != nil {
return fmt.Errorf("ipfs-cluster-service binary not available: %w", err)
}
// Note: rqlited binary is not needed as a separate service - node manages RQLite internally
olricBinary, err := ps.binaryInstaller.ResolveBinaryPath("olric-server", "/usr/local/bin/olric-server", "/usr/bin/olric-server")
if err != nil {
return fmt.Errorf("olric-server binary not available: %w", err)
}
// IPFS service
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(nodeType, ipfsBinary)
unitName := fmt.Sprintf("debros-ipfs-%s.service", nodeType)
if err := ps.serviceController.WriteServiceUnit(unitName, ipfsUnit); err != nil {
return fmt.Errorf("failed to write IPFS service: %w", err)
}
ps.logf(" ✓ IPFS service created: %s", unitName)
// IPFS Cluster service
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(nodeType, clusterBinary)
clusterUnitName := fmt.Sprintf("debros-ipfs-cluster-%s.service", nodeType)
if err := ps.serviceController.WriteServiceUnit(clusterUnitName, clusterUnit); err != nil {
return fmt.Errorf("failed to write IPFS Cluster service: %w", err)
}
ps.logf(" ✓ IPFS Cluster service created: %s", clusterUnitName)
// Note: RQLite is managed internally by the node process, not as a separate systemd service
ps.logf(" RQLite will be managed by the node process")
// Olric service
olricUnit := ps.serviceGenerator.GenerateOlricService(olricBinary)
if err := ps.serviceController.WriteServiceUnit("debros-olric.service", olricUnit); err != nil {
return fmt.Errorf("failed to write Olric service: %w", err)
}
ps.logf(" ✓ Olric service created")
// Node service
nodeUnit := ps.serviceGenerator.GenerateNodeService(nodeType)
nodeUnitName := fmt.Sprintf("debros-node-%s.service", nodeType)
if err := ps.serviceController.WriteServiceUnit(nodeUnitName, nodeUnit); err != nil {
return fmt.Errorf("failed to write Node service: %w", err)
}
ps.logf(" ✓ Node service created: %s", nodeUnitName)
// Gateway service (optional, only on specific nodes)
gatewayUnit := ps.serviceGenerator.GenerateGatewayService(nodeType)
if err := ps.serviceController.WriteServiceUnit("debros-gateway.service", gatewayUnit); err != nil {
return fmt.Errorf("failed to write Gateway service: %w", err)
}
ps.logf(" ✓ Gateway service created")
// Reload systemd daemon
if err := ps.serviceController.DaemonReload(); err != nil {
return fmt.Errorf("failed to reload systemd: %w", err)
}
ps.logf(" ✓ Systemd daemon reloaded")
// Enable services (RQLite is managed by node, not as separate service)
services := []string{unitName, clusterUnitName, "debros-olric.service", nodeUnitName, "debros-gateway.service"}
for _, svc := range services {
if err := ps.serviceController.EnableService(svc); err != nil {
ps.logf(" ⚠️ Failed to enable %s: %v", svc, err)
} else {
ps.logf(" ✓ Service enabled: %s", svc)
}
}
// Start services in dependency order
ps.logf(" Starting services...")
// Start infrastructure first (IPFS, Olric) - RQLite is managed by node
infraServices := []string{unitName, "debros-olric.service"}
for _, svc := range infraServices {
if err := ps.serviceController.StartService(svc); err != nil {
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
} else {
ps.logf(" - %s started", svc)
}
}
// Wait a moment for infrastructure to stabilize
exec.Command("sleep", "2").Run()
// Start IPFS Cluster
if err := ps.serviceController.StartService(clusterUnitName); err != nil {
ps.logf(" ⚠️ Failed to start %s: %v", clusterUnitName, err)
} else {
ps.logf(" - %s started", clusterUnitName)
}
// Start application services
appServices := []string{nodeUnitName, "debros-gateway.service"}
for _, svc := range appServices {
if err := ps.serviceController.StartService(svc); err != nil {
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
} else {
ps.logf(" - %s started", svc)
}
}
ps.logf(" ✓ All services started")
return nil
}
// LogSetupComplete logs completion information
func (ps *ProductionSetup) LogSetupComplete(peerID string) {
ps.logf("\n" + strings.Repeat("=", 70))
ps.logf("Setup Complete!")
ps.logf(strings.Repeat("=", 70))
ps.logf("\nNode Peer ID: %s", peerID)
ps.logf("\nService Management:")
ps.logf(" systemctl status debros-ipfs-bootstrap")
ps.logf(" journalctl -u debros-node-bootstrap -f")
ps.logf(" tail -f %s/logs/node-bootstrap.log", ps.debrosDir)
ps.logf("\nLog Files:")
ps.logf(" %s/logs/ipfs-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/ipfs-cluster-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/rqlite-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/olric.log", ps.debrosDir)
ps.logf(" %s/logs/node-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/gateway.log", ps.debrosDir)
ps.logf("\nStart All Services:")
ps.logf(" systemctl start debros-ipfs-bootstrap debros-ipfs-cluster-bootstrap debros-olric debros-node-bootstrap debros-gateway")
ps.logf("\nVerify Installation:")
ps.logf(" curl http://localhost:6001/health")
ps.logf(" curl http://localhost:5001/status\n")
}

View File

@ -0,0 +1,247 @@
package production
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
// FilesystemProvisioner manages directory creation and permissions
type FilesystemProvisioner struct {
debrosHome string
debrosDir string
logWriter interface{} // Can be io.Writer for logging
}
// NewFilesystemProvisioner creates a new provisioner
func NewFilesystemProvisioner(debrosHome string) *FilesystemProvisioner {
return &FilesystemProvisioner{
debrosHome: debrosHome,
debrosDir: filepath.Join(debrosHome, ".debros"),
}
}
// EnsureDirectoryStructure creates all required directories
// nodeType can be "bootstrap", "node", or "" (empty string means create base directories only)
func (fp *FilesystemProvisioner) EnsureDirectoryStructure(nodeType string) error {
// Base directories that are always needed
dirs := []string{
fp.debrosDir,
filepath.Join(fp.debrosDir, "configs"),
filepath.Join(fp.debrosDir, "secrets"),
filepath.Join(fp.debrosDir, "data"),
filepath.Join(fp.debrosDir, "logs"),
filepath.Join(fp.debrosDir, "tls-cache"),
filepath.Join(fp.debrosDir, "backups"),
filepath.Join(fp.debrosHome, "bin"),
filepath.Join(fp.debrosHome, "src"),
}
// Only create directories for the requested node type
if nodeType == "bootstrap" || nodeType == "node" {
dirs = append(dirs,
filepath.Join(fp.debrosDir, "data", nodeType, "ipfs", "repo"),
filepath.Join(fp.debrosDir, "data", nodeType, "ipfs-cluster"),
filepath.Join(fp.debrosDir, "data", nodeType, "rqlite"),
)
}
for _, dir := range dirs {
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %w", dir, err)
}
}
// Create log files with correct permissions so systemd can write to them
// Only create logs for the specific nodeType being installed
logsDir := filepath.Join(fp.debrosDir, "logs")
logFiles := []string{
"olric.log",
"gateway.log",
}
// Add node-type-specific log files only if nodeType is specified
if nodeType == "bootstrap" {
logFiles = append(logFiles,
"ipfs-bootstrap.log",
"ipfs-cluster-bootstrap.log",
"node-bootstrap.log",
)
} else if nodeType == "node" {
logFiles = append(logFiles,
"ipfs-node.log",
"ipfs-cluster-node.log",
"node-node.log",
)
}
for _, logFile := range logFiles {
logPath := filepath.Join(logsDir, logFile)
// Create empty file if it doesn't exist
if _, err := os.Stat(logPath); os.IsNotExist(err) {
if err := os.WriteFile(logPath, []byte{}, 0644); err != nil {
return fmt.Errorf("failed to create log file %s: %w", logPath, err)
}
}
}
return nil
}
// FixOwnership changes ownership of .debros directory to debros user
func (fp *FilesystemProvisioner) FixOwnership() error {
// Fix entire .debros directory recursively (includes all data, configs, logs, etc.)
cmd := exec.Command("chown", "-R", "debros:debros", fp.debrosDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.debrosDir, err, string(output))
}
// Also fix home directory ownership
cmd = exec.Command("chown", "debros:debros", fp.debrosHome)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.debrosHome, err, string(output))
}
// Fix bin directory
binDir := filepath.Join(fp.debrosHome, "bin")
cmd = exec.Command("chown", "-R", "debros:debros", binDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", binDir, err, string(output))
}
return nil
}
// UserProvisioner manages system user creation and sudoers setup
type UserProvisioner struct {
username string
home string
shell string
}
// NewUserProvisioner creates a new user provisioner
func NewUserProvisioner(username, home, shell string) *UserProvisioner {
if shell == "" {
shell = "/bin/bash"
}
return &UserProvisioner{
username: username,
home: home,
shell: shell,
}
}
// UserExists checks if the system user exists
func (up *UserProvisioner) UserExists() bool {
cmd := exec.Command("id", up.username)
return cmd.Run() == nil
}
// CreateUser creates the system user
func (up *UserProvisioner) CreateUser() error {
if up.UserExists() {
return nil // User already exists
}
cmd := exec.Command("useradd", "-r", "-m", "-s", up.shell, "-d", up.home, up.username)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to create user %s: %w", up.username, err)
}
return nil
}
// SetupSudoersAccess creates sudoers rule for the invoking user
func (up *UserProvisioner) SetupSudoersAccess(invokerUser string) error {
if invokerUser == "" {
return nil // Skip if no invoker
}
sudoersRule := fmt.Sprintf("%s ALL=(debros) NOPASSWD: ALL\n", invokerUser)
sudoersFile := "/etc/sudoers.d/debros-access"
// Check if rule already exists
if existing, err := os.ReadFile(sudoersFile); err == nil {
if strings.Contains(string(existing), invokerUser) {
return nil // Rule already set
}
}
// Write sudoers rule
if err := os.WriteFile(sudoersFile, []byte(sudoersRule), 0440); err != nil {
return fmt.Errorf("failed to create sudoers rule: %w", err)
}
// Validate sudoers file
cmd := exec.Command("visudo", "-c", "-f", sudoersFile)
if err := cmd.Run(); err != nil {
os.Remove(sudoersFile) // Clean up on validation failure
return fmt.Errorf("sudoers rule validation failed: %w", err)
}
return nil
}
// StateDetector checks for existing production state
type StateDetector struct {
debrosDir string
}
// NewStateDetector creates a state detector
func NewStateDetector(debrosDir string) *StateDetector {
return &StateDetector{
debrosDir: debrosDir,
}
}
// IsConfigured checks if basic configs exist
func (sd *StateDetector) IsConfigured() bool {
nodeConfig := filepath.Join(sd.debrosDir, "configs", "node.yaml")
gatewayConfig := filepath.Join(sd.debrosDir, "configs", "gateway.yaml")
_, err1 := os.Stat(nodeConfig)
_, err2 := os.Stat(gatewayConfig)
return err1 == nil || err2 == nil
}
// HasSecrets checks if cluster secret and swarm key exist
func (sd *StateDetector) HasSecrets() bool {
clusterSecret := filepath.Join(sd.debrosDir, "secrets", "cluster-secret")
swarmKey := filepath.Join(sd.debrosDir, "secrets", "swarm.key")
_, err1 := os.Stat(clusterSecret)
_, err2 := os.Stat(swarmKey)
return err1 == nil && err2 == nil
}
// HasIPFSData checks if IPFS repo is initialized
func (sd *StateDetector) HasIPFSData() bool {
ipfsRepoPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "ipfs", "repo", "config")
_, err := os.Stat(ipfsRepoPath)
return err == nil
}
// HasRQLiteData checks if RQLite data exists
func (sd *StateDetector) HasRQLiteData() bool {
rqliteDataPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "rqlite")
info, err := os.Stat(rqliteDataPath)
return err == nil && info.IsDir()
}
// CheckBinaryInstallation checks if required binaries are in PATH
func (sd *StateDetector) CheckBinaryInstallation() error {
binaries := []string{"ipfs", "ipfs-cluster-service", "rqlited", "olric-server"}
var missing []string
for _, bin := range binaries {
if _, err := exec.LookPath(bin); err != nil {
missing = append(missing, bin)
}
}
if len(missing) > 0 {
return fmt.Errorf("missing binaries: %s", strings.Join(missing, ", "))
}
return nil
}

View File

@ -0,0 +1,349 @@
package production
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
// SystemdServiceGenerator generates systemd unit files
type SystemdServiceGenerator struct {
debrosHome string
debrosDir string
}
// NewSystemdServiceGenerator creates a new service generator
func NewSystemdServiceGenerator(debrosHome, debrosDir string) *SystemdServiceGenerator {
return &SystemdServiceGenerator{
debrosHome: debrosHome,
debrosDir: debrosDir,
}
}
// GenerateIPFSService generates the IPFS daemon systemd unit
func (ssg *SystemdServiceGenerator) GenerateIPFSService(nodeType string, ipfsBinary string) string {
var ipfsRepoPath string
if nodeType == "bootstrap" {
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs", "repo")
} else {
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs", "repo")
}
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("ipfs-%s.log", nodeType))
return fmt.Sprintf(`[Unit]
Description=IPFS Daemon (%[1]s)
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=debros
Group=debros
Environment=HOME=%[2]s
Environment=IPFS_PATH=%[3]s
ExecStartPre=/bin/bash -c 'if [ -f %[4]s/secrets/swarm.key ] && [ ! -f %[3]s/swarm.key ]; then cp %[4]s/secrets/swarm.key %[3]s/swarm.key && chmod 600 %[3]s/swarm.key; fi'
ExecStart=%[6]s daemon --enable-pubsub-experiment --repo-dir=%[3]s
Restart=always
RestartSec=5
StandardOutput=file:%[5]s
StandardError=file:%[5]s
SyslogIdentifier=ipfs-%[1]s
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[4]s
[Install]
WantedBy=multi-user.target
`, nodeType, ssg.debrosHome, ipfsRepoPath, ssg.debrosDir, logFile, ipfsBinary)
}
// GenerateIPFSClusterService generates the IPFS Cluster systemd unit
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(nodeType string, clusterBinary string) string {
var clusterPath string
if nodeType == "bootstrap" {
clusterPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs-cluster")
} else {
clusterPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs-cluster")
}
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", nodeType))
return fmt.Sprintf(`[Unit]
Description=IPFS Cluster Service (%[1]s)
After=debros-ipfs-%[1]s.service
Wants=debros-ipfs-%[1]s.service
Requires=debros-ipfs-%[1]s.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%[2]s
Environment=HOME=%[2]s
Environment=IPFS_CLUSTER_PATH=%[3]s
ExecStart=%[6]s daemon
Restart=always
RestartSec=5
StandardOutput=file:%[4]s
StandardError=file:%[4]s
SyslogIdentifier=ipfs-cluster-%[1]s
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[5]s
[Install]
WantedBy=multi-user.target
`, nodeType, ssg.debrosHome, clusterPath, logFile, ssg.debrosDir, clusterBinary)
}
// GenerateRQLiteService generates the RQLite systemd unit
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(nodeType string, rqliteBinary string, httpPort, raftPort int, joinAddr string, advertiseIP string) string {
var dataDir string
if nodeType == "bootstrap" {
dataDir = filepath.Join(ssg.debrosDir, "data", "bootstrap", "rqlite")
} else {
dataDir = filepath.Join(ssg.debrosDir, "data", "node", "rqlite")
}
// Use public IP for advertise if provided, otherwise default to localhost
if advertiseIP == "" {
advertiseIP = "127.0.0.1"
}
args := fmt.Sprintf(
`-http-addr 0.0.0.0:%d -http-adv-addr %s:%d -raft-adv-addr %s:%d -raft-addr 0.0.0.0:%d`,
httpPort, advertiseIP, httpPort, advertiseIP, raftPort, raftPort,
)
if joinAddr != "" {
args += fmt.Sprintf(` -join %s -join-attempts 30 -join-interval 10s`, joinAddr)
}
args += fmt.Sprintf(` %s`, dataDir)
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("rqlite-%s.log", nodeType))
return fmt.Sprintf(`[Unit]
Description=RQLite Database (%[1]s)
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=debros
Group=debros
Environment=HOME=%[2]s
ExecStart=%[6]s %[3]s
Restart=always
RestartSec=5
StandardOutput=file:%[4]s
StandardError=file:%[4]s
SyslogIdentifier=rqlite-%[1]s
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[5]s
[Install]
WantedBy=multi-user.target
`, nodeType, ssg.debrosHome, args, logFile, ssg.debrosDir, rqliteBinary)
}
// GenerateOlricService generates the Olric systemd unit
func (ssg *SystemdServiceGenerator) GenerateOlricService(olricBinary string) string {
olricConfigPath := filepath.Join(ssg.debrosDir, "configs", "olric", "config.yaml")
logFile := filepath.Join(ssg.debrosDir, "logs", "olric.log")
return fmt.Sprintf(`[Unit]
Description=Olric Cache Server
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=debros
Group=debros
Environment=HOME=%[1]s
Environment=OLRIC_SERVER_CONFIG=%[2]s
ExecStart=%[5]s
Restart=always
RestartSec=5
StandardOutput=file:%[3]s
StandardError=file:%[3]s
SyslogIdentifier=olric
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[4]s
[Install]
WantedBy=multi-user.target
`, ssg.debrosHome, olricConfigPath, logFile, ssg.debrosDir, olricBinary)
}
// GenerateNodeService generates the DeBros Node systemd unit
func (ssg *SystemdServiceGenerator) GenerateNodeService(nodeType string) string {
var configFile string
if nodeType == "bootstrap" {
configFile = "bootstrap.yaml"
} else {
configFile = "node.yaml"
}
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("node-%s.log", nodeType))
return fmt.Sprintf(`[Unit]
Description=DeBros Network Node (%s)
After=debros-ipfs-cluster-%s.service
Wants=debros-ipfs-cluster-%s.service
Requires=debros-ipfs-cluster-%s.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%s
Environment=HOME=%s
ExecStart=%s/bin/node --config %s/configs/%s
Restart=always
RestartSec=5
StandardOutput=file:%s
StandardError=file:%s
SyslogIdentifier=debros-node-%s
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%s
[Install]
WantedBy=multi-user.target
`, nodeType, nodeType, nodeType, nodeType, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, configFile, logFile, logFile, nodeType, ssg.debrosDir)
}
// GenerateGatewayService generates the DeBros Gateway systemd unit
func (ssg *SystemdServiceGenerator) GenerateGatewayService(nodeType string) string {
nodeService := fmt.Sprintf("debros-node-%s.service", nodeType)
olricService := "debros-olric.service"
logFile := filepath.Join(ssg.debrosDir, "logs", "gateway.log")
return fmt.Sprintf(`[Unit]
Description=DeBros Gateway
After=%s %s
Wants=%s %s
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%s
Environment=HOME=%s
ExecStart=%s/bin/gateway --config %s/data/gateway.yaml
Restart=always
RestartSec=5
StandardOutput=file:%s
StandardError=file:%s
SyslogIdentifier=debros-gateway
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%s
[Install]
WantedBy=multi-user.target
`, nodeService, olricService, nodeService, olricService, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, logFile, logFile, ssg.debrosDir)
}
// SystemdController manages systemd service operations
type SystemdController struct {
systemdDir string
}
// NewSystemdController creates a new controller
func NewSystemdController() *SystemdController {
return &SystemdController{
systemdDir: "/etc/systemd/system",
}
}
// WriteServiceUnit writes a systemd unit file
func (sc *SystemdController) WriteServiceUnit(name string, content string) error {
unitPath := filepath.Join(sc.systemdDir, name)
if err := os.WriteFile(unitPath, []byte(content), 0644); err != nil {
return fmt.Errorf("failed to write unit file %s: %w", name, err)
}
return nil
}
// DaemonReload reloads the systemd daemon
func (sc *SystemdController) DaemonReload() error {
cmd := exec.Command("systemctl", "daemon-reload")
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to reload systemd daemon: %w", err)
}
return nil
}
// EnableService enables a service to start on boot
func (sc *SystemdController) EnableService(name string) error {
cmd := exec.Command("systemctl", "enable", name)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to enable service %s: %w", name, err)
}
return nil
}
// StartService starts a service immediately
func (sc *SystemdController) StartService(name string) error {
cmd := exec.Command("systemctl", "start", name)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to start service %s: %w", name, err)
}
return nil
}
// RestartService restarts a service
func (sc *SystemdController) RestartService(name string) error {
cmd := exec.Command("systemctl", "restart", name)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to restart service %s: %w", name, err)
}
return nil
}
// StopService stops a service
func (sc *SystemdController) StopService(name string) error {
cmd := exec.Command("systemctl", "stop", name)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to stop service %s: %w", name, err)
}
return nil
}
// StatusService gets the status of a service
func (sc *SystemdController) StatusService(name string) (bool, error) {
cmd := exec.Command("systemctl", "is-active", "--quiet", name)
err := cmd.Run()
if err == nil {
return true, nil
}
// Check for "inactive" vs actual error
if strings.Contains(err.Error(), "exit status 3") {
return false, nil // Service is inactive
}
return false, fmt.Errorf("failed to check service status %s: %w", name, err)
}

View File

@ -0,0 +1,114 @@
package production
import (
"strings"
"testing"
)
// TestGenerateRQLiteService verifies RQLite service generation with advertise IP and join address
func TestGenerateRQLiteService(t *testing.T) {
tests := []struct {
name string
nodeType string
joinAddr string
advertiseIP string
expectJoinInUnit bool
expectAdvertiseIP string
}{
{
name: "bootstrap with localhost advertise",
nodeType: "bootstrap",
joinAddr: "",
advertiseIP: "",
expectJoinInUnit: false,
expectAdvertiseIP: "127.0.0.1",
},
{
name: "bootstrap with public IP advertise",
nodeType: "bootstrap",
joinAddr: "",
advertiseIP: "10.0.0.1",
expectJoinInUnit: false,
expectAdvertiseIP: "10.0.0.1",
},
{
name: "node joining cluster",
nodeType: "node",
joinAddr: "10.0.0.1:7001",
advertiseIP: "10.0.0.2",
expectJoinInUnit: true,
expectAdvertiseIP: "10.0.0.2",
},
{
name: "node with localhost (should still include join)",
nodeType: "node",
joinAddr: "localhost:7001",
advertiseIP: "127.0.0.1",
expectJoinInUnit: true,
expectAdvertiseIP: "127.0.0.1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ssg := &SystemdServiceGenerator{
debrosHome: "/home/debros",
debrosDir: "/home/debros/.debros",
}
unit := ssg.GenerateRQLiteService(tt.nodeType, "/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP)
// Check advertise IP is present
expectedAdvertise := tt.expectAdvertiseIP + ":5001"
if !strings.Contains(unit, expectedAdvertise) {
t.Errorf("expected advertise address %q in unit, got:\n%s", expectedAdvertise, unit)
}
// Check raft advertise IP is present
expectedRaftAdvertise := tt.expectAdvertiseIP + ":7001"
if !strings.Contains(unit, expectedRaftAdvertise) {
t.Errorf("expected raft advertise address %q in unit, got:\n%s", expectedRaftAdvertise, unit)
}
// Check join flag presence
hasJoin := strings.Contains(unit, "-join")
if hasJoin != tt.expectJoinInUnit {
t.Errorf("expected join in unit: %v, hasJoin: %v\nUnit:\n%s", tt.expectJoinInUnit, hasJoin, unit)
}
if tt.expectJoinInUnit && tt.joinAddr != "" && !strings.Contains(unit, tt.joinAddr) {
t.Errorf("expected join address %q in unit, not found", tt.joinAddr)
}
})
}
}
// TestGenerateRQLiteServiceArgs verifies the ExecStart command arguments
func TestGenerateRQLiteServiceArgs(t *testing.T) {
ssg := &SystemdServiceGenerator{
debrosHome: "/home/debros",
debrosDir: "/home/debros/.debros",
}
unit := ssg.GenerateRQLiteService("node", "/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2")
// Verify essential flags are present
if !strings.Contains(unit, "-http-addr 0.0.0.0:5001") {
t.Error("missing -http-addr 0.0.0.0:5001")
}
if !strings.Contains(unit, "-http-adv-addr 10.0.0.2:5001") {
t.Error("missing -http-adv-addr 10.0.0.2:5001")
}
if !strings.Contains(unit, "-raft-addr 0.0.0.0:7001") {
t.Error("missing -raft-addr 0.0.0.0:7001")
}
if !strings.Contains(unit, "-raft-adv-addr 10.0.0.2:7001") {
t.Error("missing -raft-adv-addr 10.0.0.2:7001")
}
if !strings.Contains(unit, "-join 10.0.0.1:7001") {
t.Error("missing -join 10.0.0.1:7001")
}
if !strings.Contains(unit, "-join-attempts 30") {
t.Error("missing -join-attempts 30")
}
}

View File

@ -0,0 +1,43 @@
node:
id: "{{.NodeID}}"
type: "bootstrap"
listen_addresses:
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
data_dir: "{{.DataDir}}"
max_connections: 50
database:
data_dir: "{{.DataDir}}/rqlite"
replication_factor: 3
shard_count: 16
max_database_size: 1073741824
backup_interval: "24h"
rqlite_port: {{.RQLiteHTTPPort}}
rqlite_raft_port: {{.RQLiteRaftPort}}
rqlite_join_address: "{{.RQLiteJoinAddress}}"
cluster_sync_interval: "30s"
peer_inactivity_limit: "24h"
min_cluster_size: 3
ipfs:
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
api_url: "http://localhost:{{.IPFSAPIPort}}"
timeout: "60s"
replication_factor: 3
enable_encryption: true
discovery:
bootstrap_peers:
{{range .BootstrapPeers}} - "{{.}}"
{{end}}
discovery_interval: "15s"
bootstrap_port: {{.P2PPort}}
http_adv_address: "{{.HTTPAdvAddress}}"
raft_adv_address: "{{.RaftAdvAddress}}"
node_namespace: "default"
security:
enable_tls: false
logging:
level: "info"
format: "console"

View File

@ -0,0 +1,19 @@
listen_addr: ":{{.ListenPort}}"
client_namespace: "default"
rqlite_dsn: "{{.RQLiteDSN}}"
bootstrap_peers:
{{range .BootstrapPeers}} - "{{.}}"
{{end}}
enable_https: {{.EnableHTTPS}}
{{if .EnableHTTPS}}domain_name: "{{.DomainName}}"
tls_cache_dir: "{{.TLSCacheDir}}"
{{end}}
olric_servers:
{{range .OlricServers}} - "{{.}}"
{{end}}
olric_timeout: "10s"
ipfs_cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
ipfs_api_url: "http://localhost:{{.IPFSAPIPort}}"
ipfs_timeout: "60s"
ipfs_replication_factor: 3

View File

@ -0,0 +1,44 @@
node:
id: "{{.NodeID}}"
type: "node"
listen_addresses:
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
data_dir: "{{.DataDir}}"
max_connections: 50
database:
data_dir: "{{.DataDir}}/rqlite"
replication_factor: 3
shard_count: 16
max_database_size: 1073741824
backup_interval: "24h"
rqlite_port: {{.RQLiteHTTPPort}}
rqlite_raft_port: {{.RQLiteRaftPort}}
rqlite_join_address: "{{.RQLiteJoinAddress}}"
cluster_sync_interval: "30s"
peer_inactivity_limit: "24h"
min_cluster_size: 3
ipfs:
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
api_url: "http://localhost:{{.IPFSAPIPort}}"
timeout: "60s"
replication_factor: 3
enable_encryption: true
discovery:
bootstrap_peers:
{{range .BootstrapPeers}} - "{{.}}"
{{end}}
discovery_interval: "15s"
bootstrap_port: {{.P2PPort}}
http_adv_address: "{{.HTTPAdvAddress}}"
raft_adv_address: "{{.RaftAdvAddress}}"
node_namespace: "default"
security:
enable_tls: false
logging:
level: "info"
format: "console"

View File

@ -0,0 +1,8 @@
server:
bindAddr: "{{.BindAddr}}"
bindPort: { { .HTTPPort } }
memberlist:
environment: local
bindAddr: "{{.BindAddr}}"
bindPort: { { .MemberlistPort } }

View File

@ -0,0 +1,197 @@
package templates
import (
"bytes"
"embed"
"fmt"
"regexp"
"text/template"
)
//go:embed *.yaml *.service
var templatesFS embed.FS
// BootstrapConfigData holds parameters for bootstrap.yaml rendering
type BootstrapConfigData struct {
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int
ClusterAPIPort int
IPFSAPIPort int // Default: 4501
BootstrapPeers []string // List of bootstrap peer multiaddrs
RQLiteJoinAddress string // Optional: join address for secondary bootstraps
HTTPAdvAddress string // Advertised HTTP address (IP:port)
RaftAdvAddress string // Advertised Raft address (IP:port)
}
// NodeConfigData holds parameters for node.yaml rendering
type NodeConfigData struct {
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int
RQLiteJoinAddress string
BootstrapPeers []string
ClusterAPIPort int
IPFSAPIPort int // Default: 4501+
HTTPAdvAddress string // Advertised HTTP address (IP:port)
RaftAdvAddress string // Advertised Raft address (IP:port)
}
// GatewayConfigData holds parameters for gateway.yaml rendering
type GatewayConfigData struct {
ListenPort int
BootstrapPeers []string
OlricServers []string
ClusterAPIPort int
IPFSAPIPort int // Default: 4501
EnableHTTPS bool
DomainName string
TLSCacheDir string
RQLiteDSN string
}
// OlricConfigData holds parameters for olric.yaml rendering
type OlricConfigData struct {
BindAddr string
HTTPPort int
MemberlistPort int
}
// SystemdIPFSData holds parameters for systemd IPFS service rendering
type SystemdIPFSData struct {
NodeType string
HomeDir string
IPFSRepoPath string
SecretsDir string
DebrosDir string
}
// SystemdIPFSClusterData holds parameters for systemd IPFS Cluster service rendering
type SystemdIPFSClusterData struct {
NodeType string
HomeDir string
ClusterPath string
DebrosDir string
}
// SystemdRQLiteData holds parameters for systemd RQLite service rendering
type SystemdRQLiteData struct {
NodeType string
HomeDir string
HTTPPort int
RaftPort int
DataDir string
JoinAddr string
DebrosDir string
}
// SystemdOlricData holds parameters for systemd Olric service rendering
type SystemdOlricData struct {
HomeDir string
ConfigPath string
DebrosDir string
}
// SystemdNodeData holds parameters for systemd Node service rendering
type SystemdNodeData struct {
NodeType string
HomeDir string
ConfigFile string
DebrosDir string
}
// SystemdGatewayData holds parameters for systemd Gateway service rendering
type SystemdGatewayData struct {
HomeDir string
DebrosDir string
}
// RenderBootstrapConfig renders the bootstrap config template with the given data
func RenderBootstrapConfig(data BootstrapConfigData) (string, error) {
return renderTemplate("bootstrap.yaml", data)
}
// RenderNodeConfig renders the node config template with the given data
func RenderNodeConfig(data NodeConfigData) (string, error) {
return renderTemplate("node.yaml", data)
}
// RenderGatewayConfig renders the gateway config template with the given data
func RenderGatewayConfig(data GatewayConfigData) (string, error) {
return renderTemplate("gateway.yaml", data)
}
// RenderOlricConfig renders the olric config template with the given data
func RenderOlricConfig(data OlricConfigData) (string, error) {
return renderTemplate("olric.yaml", data)
}
// RenderIPFSService renders the IPFS systemd service template
func RenderIPFSService(data SystemdIPFSData) (string, error) {
return renderTemplate("systemd_ipfs.service", data)
}
// RenderIPFSClusterService renders the IPFS Cluster systemd service template
func RenderIPFSClusterService(data SystemdIPFSClusterData) (string, error) {
return renderTemplate("systemd_ipfs_cluster.service", data)
}
// RenderRQLiteService renders the RQLite systemd service template
func RenderRQLiteService(data SystemdRQLiteData) (string, error) {
return renderTemplate("systemd_rqlite.service", data)
}
// RenderOlricService renders the Olric systemd service template
func RenderOlricService(data SystemdOlricData) (string, error) {
return renderTemplate("systemd_olric.service", data)
}
// RenderNodeService renders the DeBros Node systemd service template
func RenderNodeService(data SystemdNodeData) (string, error) {
return renderTemplate("systemd_node.service", data)
}
// RenderGatewayService renders the DeBros Gateway systemd service template
func RenderGatewayService(data SystemdGatewayData) (string, error) {
return renderTemplate("systemd_gateway.service", data)
}
// normalizeTemplate normalizes template placeholders from spaced format { { .Var } } to {{.Var}}
func normalizeTemplate(content string) string {
// Match patterns like { { .Variable } } or { {.Variable} } or { { .Variable} } etc.
// and convert them to {{.Variable}}
// Pattern matches: { { .Something } } -> {{.Something}}
// This regex specifically matches Go template variables (starting with .)
re := regexp.MustCompile(`\{\s*\{\s*(\.\S+)\s*\}\s*\}`)
normalized := re.ReplaceAllString(content, "{{$1}}")
return normalized
}
// renderTemplate is a helper that renders any template from the embedded FS
func renderTemplate(name string, data interface{}) (string, error) {
// Read template content
tmplBytes, err := templatesFS.ReadFile(name)
if err != nil {
return "", fmt.Errorf("failed to read template %s: %w", name, err)
}
// Normalize template content to handle both { { .Var } } and {{.Var}} formats
normalizedContent := normalizeTemplate(string(tmplBytes))
// Parse normalized template
tmpl, err := template.New(name).Parse(normalizedContent)
if err != nil {
return "", fmt.Errorf("failed to parse template %s: %w", name, err)
}
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
return "", fmt.Errorf("failed to render template %s: %w", name, err)
}
return buf.String(), nil
}

View File

@ -0,0 +1,166 @@
package templates
import (
"strings"
"testing"
)
func TestRenderBootstrapConfig(t *testing.T) {
data := BootstrapConfigData{
NodeID: "bootstrap",
P2PPort: 4001,
DataDir: "/home/debros/.debros/bootstrap",
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
IPFSAPIPort: 5001,
}
result, err := RenderBootstrapConfig(data)
if err != nil {
t.Fatalf("RenderBootstrapConfig failed: %v", err)
}
// Check for required fields
checks := []string{
"id: \"bootstrap\"",
"type: \"bootstrap\"",
"tcp/4001",
"rqlite_port: 5001",
"rqlite_raft_port: 7001",
"cluster_api_url: \"http://localhost:9094\"",
"api_url: \"http://localhost:5001\"",
}
for _, check := range checks {
if !strings.Contains(result, check) {
t.Errorf("Bootstrap config missing: %s", check)
}
}
}
func TestRenderNodeConfig(t *testing.T) {
bootstrapMultiaddr := "/ip4/127.0.0.1/tcp/4001/p2p/Qm1234567890"
data := NodeConfigData{
NodeID: "node2",
P2PPort: 4002,
DataDir: "/home/debros/.debros/node2",
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
RQLiteJoinAddress: "localhost:5001",
BootstrapPeers: []string{bootstrapMultiaddr},
ClusterAPIPort: 9104,
IPFSAPIPort: 5002,
}
result, err := RenderNodeConfig(data)
if err != nil {
t.Fatalf("RenderNodeConfig failed: %v", err)
}
// Check for required fields
checks := []string{
"id: \"node2\"",
"type: \"node\"",
"tcp/4002",
"rqlite_port: 5002",
"rqlite_raft_port: 7002",
"rqlite_join_address: \"localhost:5001\"",
bootstrapMultiaddr,
"cluster_api_url: \"http://localhost:9104\"",
}
for _, check := range checks {
if !strings.Contains(result, check) {
t.Errorf("Node config missing: %s", check)
}
}
}
func TestRenderGatewayConfig(t *testing.T) {
bootstrapMultiaddr := "/ip4/127.0.0.1/tcp/4001/p2p/Qm1234567890"
data := GatewayConfigData{
ListenPort: 6001,
BootstrapPeers: []string{bootstrapMultiaddr},
OlricServers: []string{"127.0.0.1:3320"},
ClusterAPIPort: 9094,
IPFSAPIPort: 5001,
}
result, err := RenderGatewayConfig(data)
if err != nil {
t.Fatalf("RenderGatewayConfig failed: %v", err)
}
// Check for required fields
checks := []string{
"listen_addr: \":6001\"",
bootstrapMultiaddr,
"127.0.0.1:3320",
"ipfs_cluster_api_url: \"http://localhost:9094\"",
"ipfs_api_url: \"http://localhost:5001\"",
}
for _, check := range checks {
if !strings.Contains(result, check) {
t.Errorf("Gateway config missing: %s", check)
}
}
}
func TestRenderOlricConfig(t *testing.T) {
data := OlricConfigData{
BindAddr: "127.0.0.1",
HTTPPort: 3320,
MemberlistPort: 3322,
}
result, err := RenderOlricConfig(data)
if err != nil {
t.Fatalf("RenderOlricConfig failed: %v", err)
}
// Check for required fields
checks := []string{
"bindAddr: \"127.0.0.1\"",
"bindPort: 3320",
"memberlist",
"bindPort: 3322",
}
for _, check := range checks {
if !strings.Contains(result, check) {
t.Errorf("Olric config missing: %s", check)
}
}
}
func TestRenderWithMultipleBootstrapPeers(t *testing.T) {
peers := []string{
"/ip4/127.0.0.1/tcp/4001/p2p/Qm1111",
"/ip4/127.0.0.1/tcp/4002/p2p/Qm2222",
}
data := NodeConfigData{
NodeID: "node-test",
P2PPort: 4002,
DataDir: "/test/data",
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
RQLiteJoinAddress: "localhost:5001",
BootstrapPeers: peers,
ClusterAPIPort: 9104,
IPFSAPIPort: 5002,
}
result, err := RenderNodeConfig(data)
if err != nil {
t.Fatalf("RenderNodeConfig with multiple peers failed: %v", err)
}
for _, peer := range peers {
if !strings.Contains(result, peer) {
t.Errorf("Bootstrap peer missing: %s", peer)
}
}
}

View File

@ -0,0 +1,29 @@
[Unit]
Description=DeBros Gateway
After=debros-node-node.service
Wants=debros-node-node.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}}
ExecStart={{.HomeDir}}/bin/gateway --config {{.DebrosDir}}/data/gateway.yaml
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=debros-gateway
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,27 @@
[Unit]
Description=IPFS Daemon ({{.NodeType}})
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=debros
Group=debros
Environment=HOME={{.HomeDir}}
Environment=IPFS_PATH={{.IPFSRepoPath}}
ExecStartPre=/bin/bash -c 'if [ -f {{.SecretsDir}}/swarm.key ] && [ ! -f {{.IPFSRepoPath}}/swarm.key ]; then cp {{.SecretsDir}}/swarm.key {{.IPFSRepoPath}}/swarm.key && chmod 600 {{.IPFSRepoPath}}/swarm.key; fi'
ExecStart=/usr/bin/ipfs daemon --enable-pubsub-experiment --repo-dir={{.IPFSRepoPath}}
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=ipfs-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,28 @@
[Unit]
Description=IPFS Cluster Service ({{.NodeType}})
After=debros-ipfs-{{.NodeType}}.service
Wants=debros-ipfs-{{.NodeType}}.service
Requires=debros-ipfs-{{.NodeType}}.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}}
Environment=CLUSTER_PATH={{.ClusterPath}}
ExecStart=/usr/local/bin/ipfs-cluster-service daemon --config {{.ClusterPath}}/service.json
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=ipfs-cluster-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,27 @@
[Unit]
Description=DeBros Network Node ({{.NodeType}})
After=debros-ipfs-cluster-{{.NodeType}}.service
Wants=debros-ipfs-cluster-{{.NodeType}}.service
Requires=debros-ipfs-cluster-{{.NodeType}}.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}}
ExecStart={{.HomeDir}}/bin/node --config {{.DebrosDir}}/configs/{{.ConfigFile}}
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=debros-node-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,26 @@
[Unit]
Description=Olric Cache Server
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=debros
Group=debros
Environment=HOME={{.HomeDir}}
Environment=OLRIC_SERVER_CONFIG={{.ConfigPath}}
ExecStart=/usr/local/bin/olric-server
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=olric
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,25 @@
[Unit]
Description=RQLite Database ({{.NodeType}})
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=debros
Group=debros
Environment=HOME={{.HomeDir}}
ExecStart=/usr/local/bin/rqlited -http-addr 0.0.0.0:{{.HTTPPort}} -http-adv-addr 127.0.0.1:{{.HTTPPort}} -raft-adv-addr 127.0.0.1:{{.RaftPort}} -raft-addr 0.0.0.0:{{.RaftPort}}{{if .JoinAddr}} -join {{.JoinAddr}} -join-attempts 30 -join-interval 10s{{end}} {{.DataDir}}
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=rqlite-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
[Install]
WantedBy=multi-user.target

View File

@ -234,7 +234,7 @@ func isPrivateOrLocalHost(host string) bool {
}
// Check for localhost variants
if host == "localhost" || host == "127.0.0.1" || host == "::1" {
if host == "localhost" || host == "::1" {
return true
}

View File

@ -92,7 +92,7 @@ func TestAnonProxyHandler_PrivateAddressBlocking(t *testing.T) {
url string
}{
{"localhost", "http://localhost/test"},
{"127.0.0.1", "http://127.0.0.1/test"},
{"localhost", "http://localhost/test"},
{"private 10.x", "http://10.0.0.1/test"},
{"private 192.168.x", "http://192.168.1.1/test"},
{"private 172.16.x", "http://172.16.0.1/test"},
@ -166,7 +166,7 @@ func TestIsPrivateOrLocalHost(t *testing.T) {
expected bool
}{
{"localhost", true},
{"127.0.0.1", true},
{"localhost", true},
{"::1", true},
{"10.0.0.1", true},
{"192.168.1.1", true},

View File

@ -1125,6 +1125,108 @@ func (g *Gateway) logoutHandler(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusBadRequest, "nothing to revoke: provide refresh_token or all=true")
}
// simpleAPIKeyHandler creates an API key directly from a wallet address without signature verification
// This is a simplified flow for development/testing
// Requires: POST { wallet, namespace }
func (g *Gateway) simpleAPIKeyHandler(w http.ResponseWriter, r *http.Request) {
if g.client == nil {
writeError(w, http.StatusServiceUnavailable, "client not initialized")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
var req struct {
Wallet string `json:"wallet"`
Namespace string `json:"namespace"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "invalid json body")
return
}
if strings.TrimSpace(req.Wallet) == "" {
writeError(w, http.StatusBadRequest, "wallet is required")
return
}
ns := strings.TrimSpace(req.Namespace)
if ns == "" {
ns = strings.TrimSpace(g.cfg.ClientNamespace)
if ns == "" {
ns = "default"
}
}
ctx := r.Context()
internalCtx := client.WithInternalAuth(ctx)
db := g.client.Database()
// Resolve or create namespace
if _, err := db.Query(internalCtx, "INSERT OR IGNORE INTO namespaces(name) VALUES (?)", ns); err != nil {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
nres, err := db.Query(internalCtx, "SELECT id FROM namespaces WHERE name = ? LIMIT 1", ns)
if err != nil || nres == nil || nres.Count == 0 || len(nres.Rows) == 0 || len(nres.Rows[0]) == 0 {
writeError(w, http.StatusInternalServerError, "failed to resolve namespace")
return
}
nsID := nres.Rows[0][0]
// Check if api key already exists for (namespace, wallet)
var apiKey string
r1, err := db.Query(internalCtx,
"SELECT api_keys.key FROM wallet_api_keys JOIN api_keys ON wallet_api_keys.api_key_id = api_keys.id WHERE wallet_api_keys.namespace_id = ? AND LOWER(wallet_api_keys.wallet) = LOWER(?) LIMIT 1",
nsID, req.Wallet,
)
if err == nil && r1 != nil && r1.Count > 0 && len(r1.Rows) > 0 && len(r1.Rows[0]) > 0 {
if s, ok := r1.Rows[0][0].(string); ok {
apiKey = s
} else {
b, _ := json.Marshal(r1.Rows[0][0])
_ = json.Unmarshal(b, &apiKey)
}
}
// If no existing key, create a new one
if strings.TrimSpace(apiKey) == "" {
buf := make([]byte, 18)
if _, err := rand.Read(buf); err != nil {
writeError(w, http.StatusInternalServerError, "failed to generate api key")
return
}
apiKey = "ak_" + base64.RawURLEncoding.EncodeToString(buf) + ":" + ns
if _, err := db.Query(internalCtx, "INSERT INTO api_keys(key, name, namespace_id) VALUES (?, ?, ?)", apiKey, "", nsID); err != nil {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
// Link wallet to api key
rid, err := db.Query(internalCtx, "SELECT id FROM api_keys WHERE key = ? LIMIT 1", apiKey)
if err == nil && rid != nil && rid.Count > 0 && len(rid.Rows) > 0 && len(rid.Rows[0]) > 0 {
apiKeyID := rid.Rows[0][0]
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO wallet_api_keys(namespace_id, wallet, api_key_id) VALUES (?, ?, ?)", nsID, strings.ToLower(req.Wallet), apiKeyID)
}
}
// Record ownerships (best-effort)
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO namespace_ownership(namespace_id, owner_type, owner_id) VALUES (?, 'api_key', ?)", nsID, apiKey)
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO namespace_ownership(namespace_id, owner_type, owner_id) VALUES (?, 'wallet', ?)", nsID, req.Wallet)
writeJSON(w, http.StatusOK, map[string]any{
"api_key": apiKey,
"namespace": ns,
"wallet": strings.ToLower(strings.TrimPrefix(strings.TrimPrefix(req.Wallet, "0x"), "0X")),
"created": time.Now().Format(time.RFC3339),
})
}
// base58Decode decodes a base58-encoded string (Bitcoin alphabet)
// Used for decoding Solana public keys (base58-encoded 32-byte ed25519 public keys)
func base58Decode(encoded string) ([]byte, error) {

View File

@ -0,0 +1,456 @@
package gateway
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/logging"
olriclib "github.com/olric-data/olric"
"go.uber.org/zap"
)
// Cache HTTP handlers for Olric distributed cache
func (g *Gateway) cacheHealthHandler(w http.ResponseWriter, r *http.Request) {
if g.olricClient == nil {
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
return
}
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
defer cancel()
err := g.olricClient.Health(ctx)
if err != nil {
writeError(w, http.StatusServiceUnavailable, fmt.Sprintf("cache health check failed: %v", err))
return
}
writeJSON(w, http.StatusOK, map[string]any{
"status": "ok",
"service": "olric",
})
}
func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) {
if g.olricClient == nil {
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
var req struct {
DMap string `json:"dmap"` // Distributed map name
Key string `json:"key"` // Key to retrieve
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "invalid json body")
return
}
if strings.TrimSpace(req.DMap) == "" || strings.TrimSpace(req.Key) == "" {
writeError(w, http.StatusBadRequest, "dmap and key are required")
return
}
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
client := g.olricClient.GetClient()
dm, err := client.NewDMap(req.DMap)
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
return
}
gr, err := dm.Get(ctx, req.Key)
if err != nil {
// Check for key not found error - handle both wrapped and direct errors
if errors.Is(err, olriclib.ErrKeyNotFound) || err.Error() == "key not found" || strings.Contains(err.Error(), "key not found") {
writeError(w, http.StatusNotFound, "key not found")
return
}
g.logger.ComponentError(logging.ComponentGeneral, "failed to get key from cache",
zap.String("dmap", req.DMap),
zap.String("key", req.Key),
zap.Error(err))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get key: %v", err))
return
}
value, err := decodeValueFromOlric(gr)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to decode value from cache",
zap.String("dmap", req.DMap),
zap.String("key", req.Key),
zap.Error(err))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to decode value: %v", err))
return
}
writeJSON(w, http.StatusOK, map[string]any{
"key": req.Key,
"value": value,
"dmap": req.DMap,
})
}
// decodeValueFromOlric decodes a value from Olric GetResponse
// Handles JSON-serialized complex types and basic types (string, number, bool)
func decodeValueFromOlric(gr *olriclib.GetResponse) (any, error) {
var value any
// First, try to get as bytes (for JSON-serialized complex types)
var bytesVal []byte
if err := gr.Scan(&bytesVal); err == nil && len(bytesVal) > 0 {
// Try to deserialize as JSON
var jsonVal any
if err := json.Unmarshal(bytesVal, &jsonVal); err == nil {
value = jsonVal
} else {
// If JSON unmarshal fails, treat as string
value = string(bytesVal)
}
} else {
// Try as string (for simple string values)
if strVal, err := gr.String(); err == nil {
value = strVal
} else {
// Fallback: try to scan as any type
var anyVal any
if err := gr.Scan(&anyVal); err == nil {
value = anyVal
} else {
// Last resort: try String() again, ignoring error
strVal, _ := gr.String()
value = strVal
}
}
}
return value, nil
}
func (g *Gateway) cacheMultiGetHandler(w http.ResponseWriter, r *http.Request) {
if g.olricClient == nil {
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
var req struct {
DMap string `json:"dmap"` // Distributed map name
Keys []string `json:"keys"` // Keys to retrieve
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "invalid json body")
return
}
if strings.TrimSpace(req.DMap) == "" {
writeError(w, http.StatusBadRequest, "dmap is required")
return
}
if len(req.Keys) == 0 {
writeError(w, http.StatusBadRequest, "keys array is required and cannot be empty")
return
}
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
client := g.olricClient.GetClient()
dm, err := client.NewDMap(req.DMap)
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
return
}
// Get all keys and collect results
var results []map[string]any
for _, key := range req.Keys {
if strings.TrimSpace(key) == "" {
continue // Skip empty keys
}
gr, err := dm.Get(ctx, key)
if err != nil {
// Skip keys that are not found - don't include them in results
// This matches the SDK's expectation that only found keys are returned
if err == olriclib.ErrKeyNotFound {
continue
}
// For other errors, log but continue with other keys
// We don't want one bad key to fail the entire request
continue
}
value, err := decodeValueFromOlric(gr)
if err != nil {
// If we can't decode, skip this key
continue
}
results = append(results, map[string]any{
"key": key,
"value": value,
})
}
writeJSON(w, http.StatusOK, map[string]any{
"results": results,
"dmap": req.DMap,
})
}
func (g *Gateway) cachePutHandler(w http.ResponseWriter, r *http.Request) {
if g.olricClient == nil {
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
var req struct {
DMap string `json:"dmap"` // Distributed map name
Key string `json:"key"` // Key to store
Value any `json:"value"` // Value to store
TTL string `json:"ttl"` // Optional TTL (duration string like "1h", "30m")
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "invalid json body")
return
}
if strings.TrimSpace(req.DMap) == "" || strings.TrimSpace(req.Key) == "" {
writeError(w, http.StatusBadRequest, "dmap and key are required")
return
}
if req.Value == nil {
writeError(w, http.StatusBadRequest, "value is required")
return
}
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
client := g.olricClient.GetClient()
dm, err := client.NewDMap(req.DMap)
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
return
}
// TODO: TTL support - need to check Olric v0.7 API for TTL/expiry options
// For now, ignore TTL if provided
if req.TTL != "" {
_, err := time.ParseDuration(req.TTL)
if err != nil {
writeError(w, http.StatusBadRequest, fmt.Sprintf("invalid ttl format: %v", err))
return
}
// TTL parsing succeeded but not yet implemented in API
// Will be added once we confirm the correct Olric API method
}
// Serialize complex types (maps, slices) to JSON bytes for Olric storage
// Olric can handle basic types (string, number, bool) directly, but complex
// types need to be serialized to bytes
var valueToStore any
switch req.Value.(type) {
case map[string]any:
// Serialize maps to JSON bytes
jsonBytes, err := json.Marshal(req.Value)
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to marshal value: %v", err))
return
}
valueToStore = jsonBytes
case []any:
// Serialize slices to JSON bytes
jsonBytes, err := json.Marshal(req.Value)
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to marshal value: %v", err))
return
}
valueToStore = jsonBytes
case string:
// Basic string type can be stored directly
valueToStore = req.Value
case float64:
// Basic number type can be stored directly
valueToStore = req.Value
case int:
// Basic int type can be stored directly
valueToStore = req.Value
case int64:
// Basic int64 type can be stored directly
valueToStore = req.Value
case bool:
// Basic bool type can be stored directly
valueToStore = req.Value
case nil:
// Nil can be stored directly
valueToStore = req.Value
default:
// For any other type, serialize to JSON to be safe
jsonBytes, err := json.Marshal(req.Value)
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to marshal value: %v", err))
return
}
valueToStore = jsonBytes
}
err = dm.Put(ctx, req.Key, valueToStore)
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to put key: %v", err))
return
}
writeJSON(w, http.StatusOK, map[string]any{
"status": "ok",
"key": req.Key,
"dmap": req.DMap,
})
}
func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) {
if g.olricClient == nil {
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
var req struct {
DMap string `json:"dmap"` // Distributed map name
Key string `json:"key"` // Key to delete
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "invalid json body")
return
}
if strings.TrimSpace(req.DMap) == "" || strings.TrimSpace(req.Key) == "" {
writeError(w, http.StatusBadRequest, "dmap and key are required")
return
}
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
client := g.olricClient.GetClient()
dm, err := client.NewDMap(req.DMap)
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
return
}
deletedCount, err := dm.Delete(ctx, req.Key)
if err != nil {
// Check for key not found error - handle both wrapped and direct errors
if errors.Is(err, olriclib.ErrKeyNotFound) || err.Error() == "key not found" || strings.Contains(err.Error(), "key not found") {
writeError(w, http.StatusNotFound, "key not found")
return
}
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to delete key: %v", err))
return
}
if deletedCount == 0 {
writeError(w, http.StatusNotFound, "key not found")
return
}
writeJSON(w, http.StatusOK, map[string]any{
"status": "ok",
"key": req.Key,
"dmap": req.DMap,
})
}
func (g *Gateway) cacheScanHandler(w http.ResponseWriter, r *http.Request) {
if g.olricClient == nil {
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
var req struct {
DMap string `json:"dmap"` // Distributed map name
Match string `json:"match"` // Optional regex pattern to match keys
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "invalid json body")
return
}
if strings.TrimSpace(req.DMap) == "" {
writeError(w, http.StatusBadRequest, "dmap is required")
return
}
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
client := g.olricClient.GetClient()
dm, err := client.NewDMap(req.DMap)
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
return
}
var iterator olriclib.Iterator
if req.Match != "" {
iterator, err = dm.Scan(ctx, olriclib.Match(req.Match))
} else {
iterator, err = dm.Scan(ctx)
}
if err != nil {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to scan: %v", err))
return
}
defer iterator.Close()
var keys []string
for iterator.Next() {
keys = append(keys, iterator.Key())
}
writeJSON(w, http.StatusOK, map[string]any{
"keys": keys,
"count": len(keys),
"dmap": req.DMap,
})
}

View File

@ -0,0 +1,202 @@
package gateway
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/olric"
"go.uber.org/zap"
)
func TestCacheHealthHandler(t *testing.T) {
// Create a test logger
logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral)
// Create gateway without Olric client (should return service unavailable)
cfg := &Config{
ListenAddr: ":6001",
ClientNamespace: "test",
}
gw := &Gateway{
logger: logger,
cfg: cfg,
}
req := httptest.NewRequest("GET", "/v1/cache/health", nil)
w := httptest.NewRecorder()
gw.cacheHealthHandler(w, req)
if w.Code != http.StatusServiceUnavailable {
t.Errorf("expected status %d, got %d", http.StatusServiceUnavailable, w.Code)
}
var resp map[string]any
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if resp["error"] == nil {
t.Error("expected error in response")
}
}
func TestCacheGetHandler_MissingClient(t *testing.T) {
logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral)
cfg := &Config{
ListenAddr: ":6001",
ClientNamespace: "test",
}
gw := &Gateway{
logger: logger,
cfg: cfg,
}
reqBody := map[string]string{
"dmap": "test-dmap",
"key": "test-key",
}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest("POST", "/v1/cache/get", bytes.NewReader(bodyBytes))
w := httptest.NewRecorder()
gw.cacheGetHandler(w, req)
if w.Code != http.StatusServiceUnavailable {
t.Errorf("expected status %d, got %d", http.StatusServiceUnavailable, w.Code)
}
}
func TestCacheGetHandler_InvalidBody(t *testing.T) {
logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral)
cfg := &Config{
ListenAddr: ":6001",
ClientNamespace: "test",
}
gw := &Gateway{
logger: logger,
cfg: cfg,
olricClient: &olric.Client{}, // Mock client
}
req := httptest.NewRequest("POST", "/v1/cache/get", bytes.NewReader([]byte("invalid json")))
w := httptest.NewRecorder()
gw.cacheGetHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
func TestCachePutHandler_MissingFields(t *testing.T) {
logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral)
cfg := &Config{
ListenAddr: ":6001",
ClientNamespace: "test",
}
gw := &Gateway{
logger: logger,
cfg: cfg,
olricClient: &olric.Client{},
}
// Test missing dmap
reqBody := map[string]string{
"key": "test-key",
}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest("POST", "/v1/cache/put", bytes.NewReader(bodyBytes))
w := httptest.NewRecorder()
gw.cachePutHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code)
}
// Test missing key
reqBody = map[string]string{
"dmap": "test-dmap",
}
bodyBytes, _ = json.Marshal(reqBody)
req = httptest.NewRequest("POST", "/v1/cache/put", bytes.NewReader(bodyBytes))
w = httptest.NewRecorder()
gw.cachePutHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
func TestCacheDeleteHandler_WrongMethod(t *testing.T) {
logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral)
cfg := &Config{
ListenAddr: ":6001",
ClientNamespace: "test",
}
gw := &Gateway{
logger: logger,
cfg: cfg,
olricClient: &olric.Client{},
}
req := httptest.NewRequest("GET", "/v1/cache/delete", nil)
w := httptest.NewRecorder()
gw.cacheDeleteHandler(w, req)
if w.Code != http.StatusMethodNotAllowed {
t.Errorf("expected status %d, got %d", http.StatusMethodNotAllowed, w.Code)
}
}
func TestCacheScanHandler_InvalidBody(t *testing.T) {
logger, _ := logging.NewDefaultLogger(logging.ComponentGeneral)
cfg := &Config{
ListenAddr: ":6001",
ClientNamespace: "test",
}
gw := &Gateway{
logger: logger,
cfg: cfg,
olricClient: &olric.Client{},
}
req := httptest.NewRequest("POST", "/v1/cache/scan", bytes.NewReader([]byte("invalid")))
w := httptest.NewRecorder()
gw.cacheScanHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
// Test Olric client wrapper
func TestOlricClientConfig(t *testing.T) {
logger := zap.NewNop()
// Test default servers
cfg := olric.Config{}
client, err := olric.NewClient(cfg, logger)
if err == nil {
// If client creation succeeds, test that it has default servers
// This will fail if Olric server is not running, which is expected in tests
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
_ = client.Close(ctx)
}
}

View File

@ -5,18 +5,33 @@ import (
"crypto/rand"
"crypto/rsa"
"database/sql"
"fmt"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/DeBrosOfficial/network/pkg/client"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/olric"
"github.com/DeBrosOfficial/network/pkg/rqlite"
"github.com/multiformats/go-multiaddr"
"go.uber.org/zap"
_ "github.com/rqlite/gorqlite/stdlib"
)
const (
olricInitMaxAttempts = 5
olricInitInitialBackoff = 500 * time.Millisecond
olricInitMaxBackoff = 5 * time.Second
)
// Config holds configuration for the gateway server
type Config struct {
ListenAddr string
@ -31,6 +46,17 @@ type Config struct {
EnableHTTPS bool // Enable HTTPS with ACME (Let's Encrypt)
DomainName string // Domain name for HTTPS certificate
TLSCacheDir string // Directory to cache TLS certificates (default: ~/.debros/tls-cache)
// Olric cache configuration
OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"]
OlricTimeout time.Duration // Timeout for Olric operations (default: 10s)
// IPFS Cluster configuration
IPFSClusterAPIURL string // IPFS Cluster HTTP API URL (e.g., "http://localhost:9094"). If empty, gateway will discover from node configs
IPFSAPIURL string // IPFS HTTP API URL for content retrieval (e.g., "http://localhost:5001"). If empty, gateway will discover from node configs
IPFSTimeout time.Duration // Timeout for IPFS operations (default: 60s)
IPFSReplicationFactor int // Replication factor for pins (default: 3)
IPFSEnableEncryption bool // Enable client-side encryption before upload (default: true, discovered from node configs)
}
type Gateway struct {
@ -46,6 +72,12 @@ type Gateway struct {
ormClient rqlite.Client
ormHTTP *rqlite.HTTPGateway
// Olric cache client
olricClient *olric.Client
// IPFS storage client
ipfsClient ipfs.IPFSClient
// Local pub/sub bypass for same-gateway subscribers
localSubscribers map[string][]*localSubscriber // topic+namespace -> subscribers
mu sync.RWMutex
@ -132,6 +164,135 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
)
}
logger.ComponentInfo(logging.ComponentGeneral, "Initializing Olric cache client...")
// Discover Olric servers dynamically from LibP2P peers if not explicitly configured
olricServers := cfg.OlricServers
if len(olricServers) == 0 {
logger.ComponentInfo(logging.ComponentGeneral, "Olric servers not configured, discovering from LibP2P peers...")
discovered := discoverOlricServers(c, logger.Logger)
if len(discovered) > 0 {
olricServers = discovered
logger.ComponentInfo(logging.ComponentGeneral, "Discovered Olric servers from LibP2P peers",
zap.Strings("servers", olricServers))
} else {
// Fallback to localhost for local development
olricServers = []string{"localhost:3320"}
logger.ComponentInfo(logging.ComponentGeneral, "No Olric servers discovered, using localhost fallback")
}
} else {
logger.ComponentInfo(logging.ComponentGeneral, "Using explicitly configured Olric servers",
zap.Strings("servers", olricServers))
}
olricCfg := olric.Config{
Servers: olricServers,
Timeout: cfg.OlricTimeout,
}
olricClient, olricErr := initializeOlricClientWithRetry(olricCfg, logger)
if olricErr != nil {
logger.ComponentWarn(logging.ComponentGeneral, "failed to initialize Olric cache client; cache endpoints disabled", zap.Error(olricErr))
} else {
gw.olricClient = olricClient
logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client ready",
zap.Strings("servers", olricCfg.Servers),
zap.Duration("timeout", olricCfg.Timeout),
)
}
logger.ComponentInfo(logging.ComponentGeneral, "Initializing IPFS Cluster client...")
// Discover IPFS endpoints from node configs if not explicitly configured
ipfsClusterURL := cfg.IPFSClusterAPIURL
ipfsAPIURL := cfg.IPFSAPIURL
ipfsTimeout := cfg.IPFSTimeout
ipfsReplicationFactor := cfg.IPFSReplicationFactor
ipfsEnableEncryption := cfg.IPFSEnableEncryption
if ipfsClusterURL == "" {
logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster URL not configured, discovering from node configs...")
discovered := discoverIPFSFromNodeConfigs(logger.Logger)
if discovered.clusterURL != "" {
ipfsClusterURL = discovered.clusterURL
ipfsAPIURL = discovered.apiURL
if discovered.timeout > 0 {
ipfsTimeout = discovered.timeout
}
if discovered.replicationFactor > 0 {
ipfsReplicationFactor = discovered.replicationFactor
}
ipfsEnableEncryption = discovered.enableEncryption
logger.ComponentInfo(logging.ComponentGeneral, "Discovered IPFS endpoints from node configs",
zap.String("cluster_url", ipfsClusterURL),
zap.String("api_url", ipfsAPIURL),
zap.Bool("encryption_enabled", ipfsEnableEncryption))
} else {
// Fallback to localhost defaults
ipfsClusterURL = "http://localhost:9094"
ipfsAPIURL = "http://localhost:5001"
ipfsEnableEncryption = true // Default to true
logger.ComponentInfo(logging.ComponentGeneral, "No IPFS config found in node configs, using localhost defaults")
}
}
if ipfsAPIURL == "" {
ipfsAPIURL = "http://localhost:5001"
}
if ipfsTimeout == 0 {
ipfsTimeout = 60 * time.Second
}
if ipfsReplicationFactor == 0 {
ipfsReplicationFactor = 3
}
if !cfg.IPFSEnableEncryption && !ipfsEnableEncryption {
// Only disable if explicitly set to false in both places
ipfsEnableEncryption = false
} else {
// Default to true if not explicitly disabled
ipfsEnableEncryption = true
}
ipfsCfg := ipfs.Config{
ClusterAPIURL: ipfsClusterURL,
Timeout: ipfsTimeout,
}
ipfsClient, ipfsErr := ipfs.NewClient(ipfsCfg, logger.Logger)
if ipfsErr != nil {
logger.ComponentWarn(logging.ComponentGeneral, "failed to initialize IPFS Cluster client; storage endpoints disabled", zap.Error(ipfsErr))
} else {
gw.ipfsClient = ipfsClient
// Check peer count and warn if insufficient (use background context to avoid blocking)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if peerCount, err := ipfsClient.GetPeerCount(ctx); err == nil {
if peerCount < ipfsReplicationFactor {
logger.ComponentWarn(logging.ComponentGeneral, "insufficient cluster peers for replication factor",
zap.Int("peer_count", peerCount),
zap.Int("replication_factor", ipfsReplicationFactor),
zap.String("message", "Some pin operations may fail until more peers join the cluster"))
} else {
logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster peer count sufficient",
zap.Int("peer_count", peerCount),
zap.Int("replication_factor", ipfsReplicationFactor))
}
} else {
logger.ComponentWarn(logging.ComponentGeneral, "failed to get cluster peer count", zap.Error(err))
}
logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster client ready",
zap.String("cluster_api_url", ipfsCfg.ClusterAPIURL),
zap.String("ipfs_api_url", ipfsAPIURL),
zap.Duration("timeout", ipfsCfg.Timeout),
zap.Int("replication_factor", ipfsReplicationFactor),
zap.Bool("encryption_enabled", ipfsEnableEncryption),
)
}
// Store IPFS settings in gateway for use by handlers
gw.cfg.IPFSAPIURL = ipfsAPIURL
gw.cfg.IPFSReplicationFactor = ipfsReplicationFactor
gw.cfg.IPFSEnableEncryption = ipfsEnableEncryption
logger.ComponentInfo(logging.ComponentGeneral, "Gateway creation completed, returning...")
return gw, nil
}
@ -151,6 +312,20 @@ func (g *Gateway) Close() {
if g.sqlDB != nil {
_ = g.sqlDB.Close()
}
if g.olricClient != nil {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := g.olricClient.Close(ctx); err != nil {
g.logger.ComponentWarn(logging.ComponentGeneral, "error during Olric client close", zap.Error(err))
}
}
if g.ipfsClient != nil {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := g.ipfsClient.Close(ctx); err != nil {
g.logger.ComponentWarn(logging.ComponentGeneral, "error during IPFS client close", zap.Error(err))
}
}
}
// getLocalSubscribers returns all local subscribers for a given topic and namespace
@ -161,3 +336,202 @@ func (g *Gateway) getLocalSubscribers(topic, namespace string) []*localSubscribe
}
return nil
}
func initializeOlricClientWithRetry(cfg olric.Config, logger *logging.ColoredLogger) (*olric.Client, error) {
backoff := olricInitInitialBackoff
for attempt := 1; attempt <= olricInitMaxAttempts; attempt++ {
client, err := olric.NewClient(cfg, logger.Logger)
if err == nil {
if attempt > 1 {
logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client initialized after retries",
zap.Int("attempts", attempt))
}
return client, nil
}
logger.ComponentWarn(logging.ComponentGeneral, "Olric cache client init attempt failed",
zap.Int("attempt", attempt),
zap.Duration("retry_in", backoff),
zap.Error(err))
if attempt == olricInitMaxAttempts {
return nil, fmt.Errorf("failed to initialize Olric cache client after %d attempts: %w", attempt, err)
}
time.Sleep(backoff)
backoff *= 2
if backoff > olricInitMaxBackoff {
backoff = olricInitMaxBackoff
}
}
return nil, fmt.Errorf("failed to initialize Olric cache client")
}
// discoverOlricServers discovers Olric server addresses from LibP2P peers
// Returns a list of IP:port addresses where Olric servers are expected to run (port 3320)
func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger) []string {
// Get network info to access peer information
networkInfo := networkClient.Network()
if networkInfo == nil {
logger.Debug("Network info not available for Olric discovery")
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
peers, err := networkInfo.GetPeers(ctx)
if err != nil {
logger.Debug("Failed to get peers for Olric discovery", zap.Error(err))
return nil
}
olricServers := make([]string, 0)
seen := make(map[string]bool)
for _, peer := range peers {
for _, addrStr := range peer.Addresses {
// Parse multiaddr
ma, err := multiaddr.NewMultiaddr(addrStr)
if err != nil {
continue
}
// Extract IP address
var ip string
if ipv4, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ipv4 != "" {
ip = ipv4
} else if ipv6, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ipv6 != "" {
ip = ipv6
} else {
continue
}
// Skip localhost loopback addresses (we'll use localhost:3320 as fallback)
if ip == "localhost" || ip == "::1" {
continue
}
// Build Olric server address (standard port 3320)
olricAddr := net.JoinHostPort(ip, "3320")
if !seen[olricAddr] {
olricServers = append(olricServers, olricAddr)
seen[olricAddr] = true
}
}
}
// Also check bootstrap peers from config
if cfg := networkClient.Config(); cfg != nil {
for _, bootstrapAddr := range cfg.BootstrapPeers {
ma, err := multiaddr.NewMultiaddr(bootstrapAddr)
if err != nil {
continue
}
var ip string
if ipv4, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ipv4 != "" {
ip = ipv4
} else if ipv6, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ipv6 != "" {
ip = ipv6
} else {
continue
}
// Skip localhost
if ip == "localhost" || ip == "::1" {
continue
}
olricAddr := net.JoinHostPort(ip, "3320")
if !seen[olricAddr] {
olricServers = append(olricServers, olricAddr)
seen[olricAddr] = true
}
}
}
// If we found servers, log them
if len(olricServers) > 0 {
logger.Info("Discovered Olric servers from LibP2P network",
zap.Strings("servers", olricServers))
}
return olricServers
}
// ipfsDiscoveryResult holds discovered IPFS configuration
type ipfsDiscoveryResult struct {
clusterURL string
apiURL string
timeout time.Duration
replicationFactor int
enableEncryption bool
}
// discoverIPFSFromNodeConfigs discovers IPFS configuration from node.yaml files
// Checks bootstrap.yaml first, then bootstrap2.yaml, node.yaml, node2.yaml, node3.yaml, node4.yaml
func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
homeDir, err := os.UserHomeDir()
if err != nil {
logger.Debug("Failed to get home directory for IPFS discovery", zap.Error(err))
return ipfsDiscoveryResult{}
}
configDir := filepath.Join(homeDir, ".debros")
// Try bootstrap.yaml first, then bootstrap2.yaml, node.yaml, node2.yaml, node3.yaml, node4.yaml
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"}
for _, filename := range configFiles {
configPath := filepath.Join(configDir, filename)
data, err := os.ReadFile(configPath)
if err != nil {
continue
}
var nodeCfg config.Config
if err := config.DecodeStrict(strings.NewReader(string(data)), &nodeCfg); err != nil {
logger.Debug("Failed to parse node config for IPFS discovery",
zap.String("file", filename), zap.Error(err))
continue
}
// Check if IPFS is configured
if nodeCfg.Database.IPFS.ClusterAPIURL != "" {
result := ipfsDiscoveryResult{
clusterURL: nodeCfg.Database.IPFS.ClusterAPIURL,
apiURL: nodeCfg.Database.IPFS.APIURL,
timeout: nodeCfg.Database.IPFS.Timeout,
replicationFactor: nodeCfg.Database.IPFS.ReplicationFactor,
enableEncryption: nodeCfg.Database.IPFS.EnableEncryption,
}
if result.apiURL == "" {
result.apiURL = "http://localhost:5001"
}
if result.timeout == 0 {
result.timeout = 60 * time.Second
}
if result.replicationFactor == 0 {
result.replicationFactor = 3
}
// Default encryption to true if not set
if !result.enableEncryption {
result.enableEncryption = true
}
logger.Info("Discovered IPFS config from node config",
zap.String("file", filename),
zap.String("cluster_url", result.clusterURL),
zap.String("api_url", result.apiURL),
zap.Bool("encryption_enabled", result.enableEncryption))
return result
}
}
return ipfsDiscoveryResult{}
}

View File

@ -131,27 +131,40 @@ func (g *Gateway) authMiddleware(next http.Handler) http.Handler {
}
// extractAPIKey extracts API key from Authorization, X-API-Key header, or query parameters
// Note: Bearer tokens that look like JWTs (have 2 dots) are skipped (they're JWTs, handled separately)
// X-API-Key header is preferred when both Authorization and X-API-Key are present
func extractAPIKey(r *http.Request) string {
// Prefer Authorization header
auth := r.Header.Get("Authorization")
if auth != "" {
// Support "Bearer <token>" and "ApiKey <token>"
lower := strings.ToLower(auth)
if strings.HasPrefix(lower, "bearer ") {
return strings.TrimSpace(auth[len("Bearer "):])
}
if strings.HasPrefix(lower, "apikey ") {
return strings.TrimSpace(auth[len("ApiKey "):])
}
// If header has no scheme, treat the whole value as token (lenient for dev)
if !strings.Contains(auth, " ") {
return strings.TrimSpace(auth)
}
}
// Fallback to X-API-Key header
// Prefer X-API-Key header (most explicit) - check this first
if v := strings.TrimSpace(r.Header.Get("X-API-Key")); v != "" {
return v
}
// Check Authorization header for ApiKey scheme or non-JWT Bearer tokens
auth := r.Header.Get("Authorization")
if auth != "" {
lower := strings.ToLower(auth)
if strings.HasPrefix(lower, "bearer ") {
tok := strings.TrimSpace(auth[len("Bearer "):])
// Skip Bearer tokens that look like JWTs (have 2 dots) - they're JWTs
// But allow Bearer tokens that don't look like JWTs (for backward compatibility)
if strings.Count(tok, ".") == 2 {
// This is a JWT, skip it
} else {
// This doesn't look like a JWT, treat as API key (backward compatibility)
return tok
}
} else if strings.HasPrefix(lower, "apikey ") {
return strings.TrimSpace(auth[len("ApiKey "):])
} else if !strings.Contains(auth, " ") {
// If header has no scheme, treat the whole value as token (lenient for dev)
// But skip if it looks like a JWT (has 2 dots)
tok := strings.TrimSpace(auth)
if strings.Count(tok, ".") != 2 {
return tok
}
}
}
// Fallback to query parameter (for WebSocket support)
if v := strings.TrimSpace(r.URL.Query().Get("api_key")); v != "" {
return v
@ -166,7 +179,7 @@ func extractAPIKey(r *http.Request) string {
// isPublicPath returns true for routes that should be accessible without API key auth
func isPublicPath(p string) bool {
switch p {
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key":
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key", "/v1/auth/simple-key":
return true
default:
return false

View File

@ -26,12 +26,3 @@ func TestExtractAPIKey(t *testing.T) {
t.Fatalf("got %q", got)
}
}
func TestValidateNamespaceParam(t *testing.T) {
g := &Gateway{}
r := httptest.NewRequest(http.MethodGet, "/v1/storage/get?namespace=ns1&key=k", nil)
// no context namespace: should be false
if g.validateNamespaceParam(r) {
t.Fatalf("expected false without context ns")
}
}

View File

@ -22,6 +22,7 @@ func (g *Gateway) Routes() http.Handler {
// New: issue JWT from API key; new: create or return API key for a wallet after verification
mux.HandleFunc("/v1/auth/token", g.apiKeyToJWTHandler)
mux.HandleFunc("/v1/auth/api-key", g.issueAPIKeyHandler)
mux.HandleFunc("/v1/auth/simple-key", g.simpleAPIKeyHandler)
mux.HandleFunc("/v1/auth/register", g.registerHandler)
mux.HandleFunc("/v1/auth/refresh", g.refreshHandler)
mux.HandleFunc("/v1/auth/logout", g.logoutHandler)
@ -47,5 +48,20 @@ func (g *Gateway) Routes() http.Handler {
// anon proxy (authenticated users only)
mux.HandleFunc("/v1/proxy/anon", g.anonProxyHandler)
// cache endpoints (Olric)
mux.HandleFunc("/v1/cache/health", g.cacheHealthHandler)
mux.HandleFunc("/v1/cache/get", g.cacheGetHandler)
mux.HandleFunc("/v1/cache/mget", g.cacheMultiGetHandler)
mux.HandleFunc("/v1/cache/put", g.cachePutHandler)
mux.HandleFunc("/v1/cache/delete", g.cacheDeleteHandler)
mux.HandleFunc("/v1/cache/scan", g.cacheScanHandler)
// storage endpoints (IPFS)
mux.HandleFunc("/v1/storage/upload", g.storageUploadHandler)
mux.HandleFunc("/v1/storage/pin", g.storagePinHandler)
mux.HandleFunc("/v1/storage/status/", g.storageStatusHandler)
mux.HandleFunc("/v1/storage/get/", g.storageGetHandler)
mux.HandleFunc("/v1/storage/unpin/", g.storageUnpinHandler)
return g.withMiddleware(mux)
}

View File

@ -1,13 +1,378 @@
package gateway
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/client"
"github.com/DeBrosOfficial/network/pkg/logging"
"go.uber.org/zap"
)
// Database HTTP handlers
// StorageUploadRequest represents a request to upload content to IPFS
type StorageUploadRequest struct {
Name string `json:"name,omitempty"`
Data string `json:"data,omitempty"` // Base64 encoded data (alternative to multipart)
}
// StorageUploadResponse represents the response from uploading content
type StorageUploadResponse struct {
Cid string `json:"cid"`
Name string `json:"name"`
Size int64 `json:"size"`
}
// StoragePinRequest represents a request to pin a CID
type StoragePinRequest struct {
Cid string `json:"cid"`
Name string `json:"name,omitempty"`
}
// StoragePinResponse represents the response from pinning a CID
type StoragePinResponse struct {
Cid string `json:"cid"`
Name string `json:"name"`
}
// StorageStatusResponse represents the status of a pinned CID
type StorageStatusResponse struct {
Cid string `json:"cid"`
Name string `json:"name"`
Status string `json:"status"`
ReplicationMin int `json:"replication_min"`
ReplicationMax int `json:"replication_max"`
ReplicationFactor int `json:"replication_factor"`
Peers []string `json:"peers"`
Error string `json:"error,omitempty"`
}
// storageUploadHandler handles POST /v1/storage/upload
func (g *Gateway) storageUploadHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
// Get namespace from context
namespace := g.getNamespaceFromContext(r.Context())
if namespace == "" {
writeError(w, http.StatusUnauthorized, "namespace required")
return
}
// Get replication factor from config (default: 3)
replicationFactor := g.cfg.IPFSReplicationFactor
if replicationFactor == 0 {
replicationFactor = 3
}
// Check if it's multipart/form-data or JSON
contentType := r.Header.Get("Content-Type")
var reader io.Reader
var name string
var shouldPin bool = true // Default to true
if strings.HasPrefix(contentType, "multipart/form-data") {
// Handle multipart upload
if err := r.ParseMultipartForm(32 << 20); err != nil { // 32MB max
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to parse multipart form: %v", err))
return
}
file, header, err := r.FormFile("file")
if err != nil {
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to get file: %v", err))
return
}
defer file.Close()
reader = file
name = header.Filename
// Parse pin flag from form (default: true)
if pinValue := r.FormValue("pin"); pinValue != "" {
shouldPin = strings.ToLower(pinValue) == "true"
}
} else {
// Handle JSON request with base64 data
var req StorageUploadRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to decode request: %v", err))
return
}
if req.Data == "" {
writeError(w, http.StatusBadRequest, "data field required")
return
}
// Decode base64 data
data, err := base64Decode(req.Data)
if err != nil {
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to decode base64 data: %v", err))
return
}
reader = bytes.NewReader(data)
name = req.Name
// For JSON requests, pin defaults to true (can be extended if needed)
}
// Add to IPFS
ctx := r.Context()
addResp, err := g.ipfsClient.Add(ctx, reader, name)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to add content to IPFS", zap.Error(err))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to add content: %v", err))
return
}
// Return response immediately - don't block on pinning
response := StorageUploadResponse{
Cid: addResp.Cid,
Name: addResp.Name,
Size: addResp.Size,
}
// Pin asynchronously in background if requested
if shouldPin {
go g.pinAsync(addResp.Cid, name, replicationFactor)
}
writeJSON(w, http.StatusOK, response)
}
// storagePinHandler handles POST /v1/storage/pin
func (g *Gateway) storagePinHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
var req StoragePinRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to decode request: %v", err))
return
}
if req.Cid == "" {
writeError(w, http.StatusBadRequest, "cid required")
return
}
// Get replication factor from config (default: 3)
replicationFactor := g.cfg.IPFSReplicationFactor
if replicationFactor == 0 {
replicationFactor = 3
}
ctx := r.Context()
pinResp, err := g.ipfsClient.Pin(ctx, req.Cid, req.Name, replicationFactor)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to pin CID", zap.Error(err), zap.String("cid", req.Cid))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to pin: %v", err))
return
}
// Use name from request if response doesn't have it
name := pinResp.Name
if name == "" {
name = req.Name
}
response := StoragePinResponse{
Cid: pinResp.Cid,
Name: name,
}
writeJSON(w, http.StatusOK, response)
}
// storageStatusHandler handles GET /v1/storage/status/:cid
func (g *Gateway) storageStatusHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
// Extract CID from path
path := strings.TrimPrefix(r.URL.Path, "/v1/storage/status/")
if path == "" {
writeError(w, http.StatusBadRequest, "cid required")
return
}
ctx := r.Context()
status, err := g.ipfsClient.PinStatus(ctx, path)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to get pin status", zap.Error(err), zap.String("cid", path))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get status: %v", err))
return
}
response := StorageStatusResponse{
Cid: status.Cid,
Name: status.Name,
Status: status.Status,
ReplicationMin: status.ReplicationMin,
ReplicationMax: status.ReplicationMax,
ReplicationFactor: status.ReplicationFactor,
Peers: status.Peers,
Error: status.Error,
}
writeJSON(w, http.StatusOK, response)
}
// storageGetHandler handles GET /v1/storage/get/:cid
func (g *Gateway) storageGetHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
// Extract CID from path
path := strings.TrimPrefix(r.URL.Path, "/v1/storage/get/")
if path == "" {
writeError(w, http.StatusBadRequest, "cid required")
return
}
// Get namespace from context
namespace := g.getNamespaceFromContext(r.Context())
if namespace == "" {
writeError(w, http.StatusUnauthorized, "namespace required")
return
}
// Get IPFS API URL from config
ipfsAPIURL := g.cfg.IPFSAPIURL
if ipfsAPIURL == "" {
ipfsAPIURL = "http://localhost:5001"
}
ctx := r.Context()
reader, err := g.ipfsClient.Get(ctx, path, ipfsAPIURL)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to get content from IPFS", zap.Error(err), zap.String("cid", path))
// Check if error indicates content not found (404)
if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "status 404") {
writeError(w, http.StatusNotFound, fmt.Sprintf("content not found: %s", path))
} else {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get content: %v", err))
}
return
}
defer reader.Close()
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", path))
if _, err := io.Copy(w, reader); err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to write content", zap.Error(err))
}
}
// storageUnpinHandler handles DELETE /v1/storage/unpin/:cid
func (g *Gateway) storageUnpinHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodDelete {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
// Extract CID from path
path := strings.TrimPrefix(r.URL.Path, "/v1/storage/unpin/")
if path == "" {
writeError(w, http.StatusBadRequest, "cid required")
return
}
ctx := r.Context()
if err := g.ipfsClient.Unpin(ctx, path); err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to unpin CID", zap.Error(err), zap.String("cid", path))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to unpin: %v", err))
return
}
writeJSON(w, http.StatusOK, map[string]any{"status": "ok", "cid": path})
}
// pinAsync pins a CID asynchronously in the background with retry logic
// Retries once if the first attempt fails, then gives up
func (g *Gateway) pinAsync(cid, name string, replicationFactor int) {
ctx := context.Background()
// First attempt
_, err := g.ipfsClient.Pin(ctx, cid, name, replicationFactor)
if err == nil {
g.logger.ComponentWarn(logging.ComponentGeneral, "async pin succeeded", zap.String("cid", cid))
return
}
// Log first failure
g.logger.ComponentWarn(logging.ComponentGeneral, "async pin failed, retrying once",
zap.Error(err), zap.String("cid", cid))
// Retry once after a short delay
time.Sleep(2 * time.Second)
_, err = g.ipfsClient.Pin(ctx, cid, name, replicationFactor)
if err != nil {
// Final failure - log and give up
g.logger.ComponentWarn(logging.ComponentGeneral, "async pin retry failed, giving up",
zap.Error(err), zap.String("cid", cid))
} else {
g.logger.ComponentWarn(logging.ComponentGeneral, "async pin succeeded on retry", zap.String("cid", cid))
}
}
// base64Decode decodes base64 string to bytes
func base64Decode(s string) ([]byte, error) {
return base64.StdEncoding.DecodeString(s)
}
// getNamespaceFromContext extracts namespace from request context
func (g *Gateway) getNamespaceFromContext(ctx context.Context) string {
if v := ctx.Value(ctxKeyNamespaceOverride); v != nil {
if s, ok := v.(string); ok && s != "" {
return s
}
}
return ""
}
// Network HTTP handlers
func (g *Gateway) networkStatusHandler(w http.ResponseWriter, r *http.Request) {
if g.client == nil {
@ -36,7 +401,19 @@ func (g *Gateway) networkPeersHandler(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
writeJSON(w, http.StatusOK, peers)
// Flatten peer addresses into a list of multiaddr strings
// Each PeerInfo can have multiple addresses, so we collect all of them
peerAddrs := make([]string, 0)
for _, peer := range peers {
// Add peer ID as /p2p/ multiaddr format
if peer.ID != "" {
peerAddrs = append(peerAddrs, "/p2p/"+peer.ID)
}
// Add all addresses for this peer
peerAddrs = append(peerAddrs, peer.Addresses...)
}
// Return peers in expected format: {"peers": ["/p2p/...", "/ip4/...", ...]}
writeJSON(w, http.StatusOK, map[string]any{"peers": peerAddrs})
}
func (g *Gateway) networkConnectHandler(w http.ResponseWriter, r *http.Request) {
@ -84,17 +461,3 @@ func (g *Gateway) networkDisconnectHandler(w http.ResponseWriter, r *http.Reques
}
writeJSON(w, http.StatusOK, map[string]any{"status": "ok"})
}
func (g *Gateway) validateNamespaceParam(r *http.Request) bool {
qns := r.URL.Query().Get("namespace")
if qns == "" {
return true
}
if v := r.Context().Value(ctxKeyNamespaceOverride); v != nil {
if s, ok := v.(string); ok && s != "" {
return s == qns
}
}
// If no namespace in context, disallow explicit namespace param
return false
}

View File

@ -0,0 +1,562 @@
package gateway
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/DeBrosOfficial/network/pkg/logging"
)
// mockIPFSClient is a mock implementation of ipfs.IPFSClient for testing
type mockIPFSClient struct {
addFunc func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error)
pinFunc func(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error)
pinStatusFunc func(ctx context.Context, cid string) (*ipfs.PinStatus, error)
getFunc func(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error)
unpinFunc func(ctx context.Context, cid string) error
getPeerCountFunc func(ctx context.Context) (int, error)
}
func (m *mockIPFSClient) Add(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) {
if m.addFunc != nil {
return m.addFunc(ctx, reader, name)
}
return &ipfs.AddResponse{Cid: "QmTest123", Name: name, Size: 100}, nil
}
func (m *mockIPFSClient) Pin(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error) {
if m.pinFunc != nil {
return m.pinFunc(ctx, cid, name, replicationFactor)
}
return &ipfs.PinResponse{Cid: cid, Name: name}, nil
}
func (m *mockIPFSClient) PinStatus(ctx context.Context, cid string) (*ipfs.PinStatus, error) {
if m.pinStatusFunc != nil {
return m.pinStatusFunc(ctx, cid)
}
return &ipfs.PinStatus{
Cid: cid,
Name: "test",
Status: "pinned",
ReplicationMin: 3,
ReplicationMax: 3,
ReplicationFactor: 3,
Peers: []string{"peer1", "peer2", "peer3"},
}, nil
}
func (m *mockIPFSClient) Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) {
if m.getFunc != nil {
return m.getFunc(ctx, cid, ipfsAPIURL)
}
return io.NopCloser(strings.NewReader("test content")), nil
}
func (m *mockIPFSClient) Unpin(ctx context.Context, cid string) error {
if m.unpinFunc != nil {
return m.unpinFunc(ctx, cid)
}
return nil
}
func (m *mockIPFSClient) Health(ctx context.Context) error {
return nil
}
func (m *mockIPFSClient) GetPeerCount(ctx context.Context) (int, error) {
if m.getPeerCountFunc != nil {
return m.getPeerCountFunc(ctx)
}
return 3, nil
}
func (m *mockIPFSClient) Close(ctx context.Context) error {
return nil
}
func newTestGatewayWithIPFS(t *testing.T, ipfsClient ipfs.IPFSClient) *Gateway {
logger, err := logging.NewColoredLogger(logging.ComponentGeneral, true)
if err != nil {
t.Fatalf("Failed to create logger: %v", err)
}
cfg := &Config{
ListenAddr: ":6001",
ClientNamespace: "test",
IPFSReplicationFactor: 3,
IPFSEnableEncryption: true,
IPFSAPIURL: "http://localhost:5001",
}
gw := &Gateway{
logger: logger,
cfg: cfg,
}
if ipfsClient != nil {
gw.ipfsClient = ipfsClient
}
return gw
}
func TestStorageUploadHandler_MissingIPFSClient(t *testing.T) {
gw := newTestGatewayWithIPFS(t, nil)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", nil)
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusServiceUnavailable {
t.Errorf("Expected status %d, got %d", http.StatusServiceUnavailable, w.Code)
}
}
func TestStorageUploadHandler_MethodNotAllowed(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodGet, "/v1/storage/upload", nil)
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusMethodNotAllowed {
t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, w.Code)
}
}
func TestStorageUploadHandler_MissingNamespace(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", nil)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusUnauthorized {
t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, w.Code)
}
}
func TestStorageUploadHandler_MultipartUpload(t *testing.T) {
expectedCID := "QmTest456"
expectedName := "test.txt"
expectedSize := int64(200)
mockClient := &mockIPFSClient{
addFunc: func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) {
// Read and verify content
data, _ := io.ReadAll(reader)
if len(data) == 0 {
return nil, io.ErrUnexpectedEOF
}
return &ipfs.AddResponse{
Cid: expectedCID,
Name: name,
Size: expectedSize,
}, nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, _ := writer.CreateFormFile("file", expectedName)
part.Write([]byte("test file content"))
writer.Close()
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", &buf)
req.Header.Set("Content-Type", writer.FormDataContentType())
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp StorageUploadResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, resp.Name)
}
if resp.Size != expectedSize {
t.Errorf("Expected size %d, got %d", expectedSize, resp.Size)
}
}
func TestStorageUploadHandler_JSONUpload(t *testing.T) {
expectedCID := "QmTest789"
expectedName := "test.json"
testData := []byte("test json data")
base64Data := base64.StdEncoding.EncodeToString(testData)
mockClient := &mockIPFSClient{
addFunc: func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) {
data, _ := io.ReadAll(reader)
if string(data) != string(testData) {
return nil, io.ErrUnexpectedEOF
}
return &ipfs.AddResponse{
Cid: expectedCID,
Name: name,
Size: int64(len(testData)),
}, nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
reqBody := StorageUploadRequest{
Name: expectedName,
Data: base64Data,
}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", bytes.NewReader(bodyBytes))
req.Header.Set("Content-Type", "application/json")
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp StorageUploadResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
}
func TestStorageUploadHandler_InvalidBase64(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
reqBody := StorageUploadRequest{
Name: "test.txt",
Data: "invalid base64!!!",
}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", bytes.NewReader(bodyBytes))
req.Header.Set("Content-Type", "application/json")
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
func TestStorageUploadHandler_IPFSError(t *testing.T) {
mockClient := &mockIPFSClient{
addFunc: func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) {
return nil, io.ErrUnexpectedEOF
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, _ := writer.CreateFormFile("file", "test.txt")
part.Write([]byte("test"))
writer.Close()
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", &buf)
req.Header.Set("Content-Type", writer.FormDataContentType())
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusInternalServerError {
t.Errorf("Expected status %d, got %d", http.StatusInternalServerError, w.Code)
}
}
func TestStoragePinHandler_Success(t *testing.T) {
expectedCID := "QmPin123"
expectedName := "pinned-file"
mockClient := &mockIPFSClient{
pinFunc: func(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error) {
if cid != expectedCID {
return nil, io.ErrUnexpectedEOF
}
if replicationFactor != 3 {
return nil, io.ErrUnexpectedEOF
}
return &ipfs.PinResponse{Cid: cid, Name: name}, nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
reqBody := StoragePinRequest{
Cid: expectedCID,
Name: expectedName,
}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/pin", bytes.NewReader(bodyBytes))
w := httptest.NewRecorder()
gw.storagePinHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp StoragePinResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, resp.Name)
}
}
func TestStoragePinHandler_MissingCID(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
reqBody := StoragePinRequest{}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/pin", bytes.NewReader(bodyBytes))
w := httptest.NewRecorder()
gw.storagePinHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
func TestStorageStatusHandler_Success(t *testing.T) {
expectedCID := "QmStatus123"
mockClient := &mockIPFSClient{
pinStatusFunc: func(ctx context.Context, cid string) (*ipfs.PinStatus, error) {
return &ipfs.PinStatus{
Cid: cid,
Name: "test-file",
Status: "pinned",
ReplicationMin: 3,
ReplicationMax: 3,
ReplicationFactor: 3,
Peers: []string{"peer1", "peer2", "peer3"},
}, nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
req := httptest.NewRequest(http.MethodGet, "/v1/storage/status/"+expectedCID, nil)
w := httptest.NewRecorder()
gw.storageStatusHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp StorageStatusResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Status != "pinned" {
t.Errorf("Expected status 'pinned', got %s", resp.Status)
}
if resp.ReplicationFactor != 3 {
t.Errorf("Expected replication factor 3, got %d", resp.ReplicationFactor)
}
}
func TestStorageStatusHandler_MissingCID(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodGet, "/v1/storage/status/", nil)
w := httptest.NewRecorder()
gw.storageStatusHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
func TestStorageGetHandler_Success(t *testing.T) {
expectedCID := "QmGet123"
expectedContent := "test content from IPFS"
mockClient := &mockIPFSClient{
getFunc: func(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) {
if cid != expectedCID {
return nil, io.ErrUnexpectedEOF
}
return io.NopCloser(strings.NewReader(expectedContent)), nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
req := httptest.NewRequest(http.MethodGet, "/v1/storage/get/"+expectedCID, nil)
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageGetHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
if w.Body.String() != expectedContent {
t.Errorf("Expected content %s, got %s", expectedContent, w.Body.String())
}
if w.Header().Get("Content-Type") != "application/octet-stream" {
t.Errorf("Expected Content-Type 'application/octet-stream', got %s", w.Header().Get("Content-Type"))
}
}
func TestStorageGetHandler_MissingNamespace(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodGet, "/v1/storage/get/QmTest123", nil)
w := httptest.NewRecorder()
gw.storageGetHandler(w, req)
if w.Code != http.StatusUnauthorized {
t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, w.Code)
}
}
func TestStorageUnpinHandler_Success(t *testing.T) {
expectedCID := "QmUnpin123"
mockClient := &mockIPFSClient{
unpinFunc: func(ctx context.Context, cid string) error {
if cid != expectedCID {
return io.ErrUnexpectedEOF
}
return nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
req := httptest.NewRequest(http.MethodDelete, "/v1/storage/unpin/"+expectedCID, nil)
w := httptest.NewRecorder()
gw.storageUnpinHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp map[string]any
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp["cid"] != expectedCID {
t.Errorf("Expected CID %s, got %v", expectedCID, resp["cid"])
}
}
func TestStorageUnpinHandler_MissingCID(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodDelete, "/v1/storage/unpin/", nil)
w := httptest.NewRecorder()
gw.storageUnpinHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
// Test helper functions
func TestBase64Decode(t *testing.T) {
testData := []byte("test data")
encoded := base64.StdEncoding.EncodeToString(testData)
decoded, err := base64Decode(encoded)
if err != nil {
t.Fatalf("Failed to decode: %v", err)
}
if string(decoded) != string(testData) {
t.Errorf("Expected %s, got %s", string(testData), string(decoded))
}
// Test invalid base64
_, err = base64Decode("invalid!!!")
if err == nil {
t.Error("Expected error for invalid base64")
}
}
func TestGetNamespaceFromContext(t *testing.T) {
gw := newTestGatewayWithIPFS(t, nil)
// Test with namespace in context
ctx := context.WithValue(context.Background(), ctxKeyNamespaceOverride, "test-ns")
ns := gw.getNamespaceFromContext(ctx)
if ns != "test-ns" {
t.Errorf("Expected 'test-ns', got %s", ns)
}
// Test without namespace
ctx2 := context.Background()
ns2 := gw.getNamespaceFromContext(ctx2)
if ns2 != "" {
t.Errorf("Expected empty namespace, got %s", ns2)
}
}

422
pkg/ipfs/client.go Normal file
View File

@ -0,0 +1,422 @@
package ipfs
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"time"
"go.uber.org/zap"
)
// IPFSClient defines the interface for IPFS operations
type IPFSClient interface {
Add(ctx context.Context, reader io.Reader, name string) (*AddResponse, error)
Pin(ctx context.Context, cid string, name string, replicationFactor int) (*PinResponse, error)
PinStatus(ctx context.Context, cid string) (*PinStatus, error)
Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error)
Unpin(ctx context.Context, cid string) error
Health(ctx context.Context) error
GetPeerCount(ctx context.Context) (int, error)
Close(ctx context.Context) error
}
// Client wraps an IPFS Cluster HTTP API client for storage operations
type Client struct {
apiURL string
httpClient *http.Client
logger *zap.Logger
}
// Config holds configuration for the IPFS client
type Config struct {
// ClusterAPIURL is the base URL for IPFS Cluster HTTP API (e.g., "http://localhost:9094")
// If empty, defaults to "http://localhost:9094"
ClusterAPIURL string
// Timeout is the timeout for client operations
// If zero, defaults to 60 seconds
Timeout time.Duration
}
// PinStatus represents the status of a pinned CID
type PinStatus struct {
Cid string `json:"cid"`
Name string `json:"name"`
Status string `json:"status"` // "pinned", "pinning", "queued", "unpinned", "error"
ReplicationMin int `json:"replication_min"`
ReplicationMax int `json:"replication_max"`
ReplicationFactor int `json:"replication_factor"`
Peers []string `json:"peers"`
Error string `json:"error,omitempty"`
}
// AddResponse represents the response from adding content to IPFS
type AddResponse struct {
Name string `json:"name"`
Cid string `json:"cid"`
Size int64 `json:"size"`
}
// PinResponse represents the response from pinning a CID
type PinResponse struct {
Cid string `json:"cid"`
Name string `json:"name"`
}
// NewClient creates a new IPFS Cluster client wrapper
func NewClient(cfg Config, logger *zap.Logger) (*Client, error) {
apiURL := cfg.ClusterAPIURL
if apiURL == "" {
apiURL = "http://localhost:9094"
}
timeout := cfg.Timeout
if timeout == 0 {
timeout = 60 * time.Second
}
httpClient := &http.Client{
Timeout: timeout,
}
return &Client{
apiURL: apiURL,
httpClient: httpClient,
logger: logger,
}, nil
}
// Health checks if the IPFS Cluster API is healthy
func (c *Client) Health(ctx context.Context) error {
req, err := http.NewRequestWithContext(ctx, "GET", c.apiURL+"/id", nil)
if err != nil {
return fmt.Errorf("failed to create health check request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("health check request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("health check failed with status: %d", resp.StatusCode)
}
return nil
}
// GetPeerCount returns the number of cluster peers
func (c *Client) GetPeerCount(ctx context.Context) (int, error) {
req, err := http.NewRequestWithContext(ctx, "GET", c.apiURL+"/peers", nil)
if err != nil {
return 0, fmt.Errorf("failed to create peers request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return 0, fmt.Errorf("peers request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return 0, fmt.Errorf("peers request failed with status: %d", resp.StatusCode)
}
// The /peers endpoint returns NDJSON (newline-delimited JSON), not a JSON array
// We need to stream-read each peer object
dec := json.NewDecoder(resp.Body)
peerCount := 0
for {
var peer map[string]interface{}
err := dec.Decode(&peer)
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return 0, fmt.Errorf("failed to decode peers response: %w", err)
}
peerCount++
}
return peerCount, nil
}
// Add adds content to IPFS and returns the CID
func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddResponse, error) {
// Track original size by reading into memory first
// This allows us to return the actual byte count, not the DAG size
data, err := io.ReadAll(reader)
if err != nil {
return nil, fmt.Errorf("failed to read data: %w", err)
}
originalSize := int64(len(data))
// Create multipart form request for IPFS Cluster API
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
// Create form file field
part, err := writer.CreateFormFile("file", name)
if err != nil {
return nil, fmt.Errorf("failed to create form file: %w", err)
}
if _, err := io.Copy(part, bytes.NewReader(data)); err != nil {
return nil, fmt.Errorf("failed to copy data: %w", err)
}
if err := writer.Close(); err != nil {
return nil, fmt.Errorf("failed to close writer: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", c.apiURL+"/add", &buf)
if err != nil {
return nil, fmt.Errorf("failed to create add request: %w", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("add request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("add failed with status %d: %s", resp.StatusCode, string(body))
}
// IPFS Cluster streams NDJSON responses. We need to drain the entire stream
// to prevent the connection from closing prematurely, which would cancel
// the cluster's pinning operation. Read all JSON objects and keep the last one.
dec := json.NewDecoder(resp.Body)
var last AddResponse
var hasResult bool
for {
var chunk AddResponse
if err := dec.Decode(&chunk); err != nil {
if errors.Is(err, io.EOF) {
break
}
return nil, fmt.Errorf("failed to decode add response: %w", err)
}
last = chunk
hasResult = true
}
if !hasResult {
return nil, fmt.Errorf("add response missing CID")
}
// Ensure name is set if provided
if last.Name == "" && name != "" {
last.Name = name
}
// Override size with original byte count (not DAG size)
last.Size = originalSize
return &last, nil
}
// Pin pins a CID with specified replication factor
// IPFS Cluster expects pin options (including name) as query parameters, not in JSON body
func (c *Client) Pin(ctx context.Context, cid string, name string, replicationFactor int) (*PinResponse, error) {
// Build URL with query parameters
reqURL := c.apiURL + "/pins/" + cid
values := url.Values{}
values.Set("replication-min", fmt.Sprintf("%d", replicationFactor))
values.Set("replication-max", fmt.Sprintf("%d", replicationFactor))
if name != "" {
values.Set("name", name)
}
if len(values) > 0 {
reqURL += "?" + values.Encode()
}
req, err := http.NewRequestWithContext(ctx, "POST", reqURL, nil)
if err != nil {
return nil, fmt.Errorf("failed to create pin request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("pin request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("pin failed with status %d: %s", resp.StatusCode, string(body))
}
var result PinResponse
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode pin response: %w", err)
}
// If IPFS Cluster doesn't return the name in the response, use the one from the request
if result.Name == "" && name != "" {
result.Name = name
}
// Ensure CID is set
if result.Cid == "" {
result.Cid = cid
}
return &result, nil
}
// PinStatus retrieves the status of a pinned CID
func (c *Client) PinStatus(ctx context.Context, cid string) (*PinStatus, error) {
req, err := http.NewRequestWithContext(ctx, "GET", c.apiURL+"/pins/"+cid, nil)
if err != nil {
return nil, fmt.Errorf("failed to create pin status request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("pin status request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, fmt.Errorf("pin not found: %s", cid)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("pin status failed with status %d: %s", resp.StatusCode, string(body))
}
// IPFS Cluster returns GlobalPinInfo, we need to map it to our PinStatus
var gpi struct {
Cid string `json:"cid"`
Name string `json:"name"`
PeerMap map[string]struct {
Status interface{} `json:"status"` // TrackerStatus can be string or int
Error string `json:"error,omitempty"`
} `json:"peer_map"`
}
if err := json.NewDecoder(resp.Body).Decode(&gpi); err != nil {
return nil, fmt.Errorf("failed to decode pin status response: %w", err)
}
// Use name from GlobalPinInfo
name := gpi.Name
// Extract status from peer map (use first peer's status, or aggregate)
status := "unknown"
peers := make([]string, 0, len(gpi.PeerMap))
var errorMsg string
for peerID, pinInfo := range gpi.PeerMap {
peers = append(peers, peerID)
if pinInfo.Status != nil {
// Convert status to string
if s, ok := pinInfo.Status.(string); ok {
if status == "unknown" || s != "" {
status = s
}
} else if status == "unknown" {
// If status is not a string, try to convert it
status = fmt.Sprintf("%v", pinInfo.Status)
}
}
if pinInfo.Error != "" {
errorMsg = pinInfo.Error
}
}
// Normalize status string (common IPFS Cluster statuses)
if status == "" || status == "unknown" {
status = "pinned" // Default to pinned if we have peers
if len(peers) == 0 {
status = "unknown"
}
}
result := &PinStatus{
Cid: gpi.Cid,
Name: name,
Status: status,
ReplicationMin: 0, // Not available in GlobalPinInfo
ReplicationMax: 0, // Not available in GlobalPinInfo
ReplicationFactor: len(peers),
Peers: peers,
Error: errorMsg,
}
// Ensure CID is set
if result.Cid == "" {
result.Cid = cid
}
return result, nil
}
// Unpin removes a pin from a CID
func (c *Client) Unpin(ctx context.Context, cid string) error {
req, err := http.NewRequestWithContext(ctx, "DELETE", c.apiURL+"/pins/"+cid, nil)
if err != nil {
return fmt.Errorf("failed to create unpin request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("unpin request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("unpin failed with status %d: %s", resp.StatusCode, string(body))
}
return nil
}
// Get retrieves content from IPFS by CID
// Note: This uses the IPFS HTTP API (typically on port 5001), not the Cluster API
func (c *Client) Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) {
if ipfsAPIURL == "" {
ipfsAPIURL = "http://localhost:5001"
}
url := fmt.Sprintf("%s/api/v0/cat?arg=%s", ipfsAPIURL, cid)
req, err := http.NewRequestWithContext(ctx, "POST", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create get request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("get request failed: %w", err)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, fmt.Errorf("content not found (CID: %s). The content may not be available on the IPFS node, or the IPFS API may not be accessible at %s", cid, ipfsAPIURL)
}
return nil, fmt.Errorf("get failed with status %d: %s", resp.StatusCode, string(body))
}
return resp.Body, nil
}
// Close closes the IPFS client connection
func (c *Client) Close(ctx context.Context) error {
// HTTP client doesn't need explicit closing
return nil
}

491
pkg/ipfs/client_test.go Normal file
View File

@ -0,0 +1,491 @@
package ipfs
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"time"
"go.uber.org/zap"
)
func TestNewClient(t *testing.T) {
logger := zap.NewNop()
t.Run("default_config", func(t *testing.T) {
cfg := Config{}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
if client.apiURL != "http://localhost:9094" {
t.Errorf("Expected default API URL 'http://localhost:9094', got %s", client.apiURL)
}
if client.httpClient.Timeout != 60*time.Second {
t.Errorf("Expected default timeout 60s, got %v", client.httpClient.Timeout)
}
})
t.Run("custom_config", func(t *testing.T) {
cfg := Config{
ClusterAPIURL: "http://custom:9094",
Timeout: 30 * time.Second,
}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
if client.apiURL != "http://custom:9094" {
t.Errorf("Expected API URL 'http://custom:9094', got %s", client.apiURL)
}
if client.httpClient.Timeout != 30*time.Second {
t.Errorf("Expected timeout 30s, got %v", client.httpClient.Timeout)
}
})
}
func TestClient_Add(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmTest123"
expectedName := "test.txt"
testContent := "test content"
expectedSize := int64(len(testContent)) // Client overrides server size with actual content length
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/add" {
t.Errorf("Expected path '/add', got %s", r.URL.Path)
}
if r.Method != "POST" {
t.Errorf("Expected method POST, got %s", r.Method)
}
// Verify multipart form
if err := r.ParseMultipartForm(32 << 20); err != nil {
t.Errorf("Failed to parse multipart form: %v", err)
return
}
file, header, err := r.FormFile("file")
if err != nil {
t.Errorf("Failed to get file: %v", err)
return
}
defer file.Close()
if header.Filename != expectedName {
t.Errorf("Expected filename %s, got %s", expectedName, header.Filename)
}
// Read file content
_, _ = io.ReadAll(file)
// Return a different size to verify the client correctly overrides it
response := AddResponse{
Cid: expectedCID,
Name: expectedName,
Size: 999, // Client will override this with actual content size
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
reader := strings.NewReader(testContent)
resp, err := client.Add(context.Background(), reader, expectedName)
if err != nil {
t.Fatalf("Failed to add content: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, resp.Name)
}
if resp.Size != expectedSize {
t.Errorf("Expected size %d, got %d", expectedSize, resp.Size)
}
})
t.Run("server_error", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("internal error"))
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
reader := strings.NewReader("test")
_, err = client.Add(context.Background(), reader, "test.txt")
if err == nil {
t.Error("Expected error for server error")
}
})
}
func TestClient_Pin(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmPin123"
expectedName := "pinned-file"
expectedReplicationFactor := 3
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/pins/") {
t.Errorf("Expected path '/pins/', got %s", r.URL.Path)
}
if r.Method != "POST" {
t.Errorf("Expected method POST, got %s", r.Method)
}
if cid := strings.TrimPrefix(r.URL.Path, "/pins/"); cid != expectedCID {
t.Errorf("Expected CID %s in path, got %s", expectedCID, cid)
}
query := r.URL.Query()
if got := query.Get("replication-min"); got != strconv.Itoa(expectedReplicationFactor) {
t.Errorf("Expected replication-min %d, got %s", expectedReplicationFactor, got)
}
if got := query.Get("replication-max"); got != strconv.Itoa(expectedReplicationFactor) {
t.Errorf("Expected replication-max %d, got %s", expectedReplicationFactor, got)
}
if got := query.Get("name"); got != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, got)
}
response := PinResponse{
Cid: expectedCID,
Name: expectedName,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
resp, err := client.Pin(context.Background(), expectedCID, expectedName, expectedReplicationFactor)
if err != nil {
t.Fatalf("Failed to pin: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, resp.Name)
}
})
t.Run("accepted_status", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
response := PinResponse{Cid: "QmTest", Name: "test"}
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
_, err = client.Pin(context.Background(), "QmTest", "test", 3)
if err != nil {
t.Errorf("Expected success for Accepted status, got error: %v", err)
}
})
}
func TestClient_PinStatus(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmStatus123"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/pins/") {
t.Errorf("Expected path '/pins/', got %s", r.URL.Path)
}
if r.Method != "GET" {
t.Errorf("Expected method GET, got %s", r.Method)
}
response := map[string]interface{}{
"cid": expectedCID,
"name": "test-file",
"peer_map": map[string]interface{}{
"peer1": map[string]interface{}{"status": "pinned"},
"peer2": map[string]interface{}{"status": "pinned"},
"peer3": map[string]interface{}{"status": "pinned"},
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
status, err := client.PinStatus(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to get pin status: %v", err)
}
if status.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, status.Cid)
}
if status.Status != "pinned" {
t.Errorf("Expected status 'pinned', got %s", status.Status)
}
if len(status.Peers) != 3 {
t.Errorf("Expected 3 peers, got %d", len(status.Peers))
}
})
t.Run("not_found", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
_, err = client.PinStatus(context.Background(), "QmNotFound")
if err == nil {
t.Error("Expected error for not found")
}
})
}
func TestClient_Unpin(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmUnpin123"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/pins/") {
t.Errorf("Expected path '/pins/', got %s", r.URL.Path)
}
if r.Method != "DELETE" {
t.Errorf("Expected method DELETE, got %s", r.Method)
}
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
err = client.Unpin(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to unpin: %v", err)
}
})
t.Run("accepted_status", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
err = client.Unpin(context.Background(), "QmTest")
if err != nil {
t.Errorf("Expected success for Accepted status, got error: %v", err)
}
})
}
func TestClient_Get(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmGet123"
expectedContent := "test content from IPFS"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.URL.Path, "/api/v0/cat") {
t.Errorf("Expected path containing '/api/v0/cat', got %s", r.URL.Path)
}
if r.Method != "POST" {
t.Errorf("Expected method POST, got %s", r.Method)
}
// Verify CID parameter
if !strings.Contains(r.URL.RawQuery, expectedCID) {
t.Errorf("Expected CID %s in query, got %s", expectedCID, r.URL.RawQuery)
}
w.Write([]byte(expectedContent))
}))
defer server.Close()
cfg := Config{ClusterAPIURL: "http://localhost:9094"}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
reader, err := client.Get(context.Background(), expectedCID, server.URL)
if err != nil {
t.Fatalf("Failed to get content: %v", err)
}
defer reader.Close()
data, err := io.ReadAll(reader)
if err != nil {
t.Fatalf("Failed to read content: %v", err)
}
if string(data) != expectedContent {
t.Errorf("Expected content %s, got %s", expectedContent, string(data))
}
})
t.Run("not_found", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: "http://localhost:9094"}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
_, err = client.Get(context.Background(), "QmNotFound", server.URL)
if err == nil {
t.Error("Expected error for not found")
}
})
t.Run("default_ipfs_api_url", func(t *testing.T) {
expectedCID := "QmDefault"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("content"))
}))
defer server.Close()
cfg := Config{ClusterAPIURL: "http://localhost:9094"}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
// Test with empty IPFS API URL (should use default)
// Note: This will fail because we're using a test server, but it tests the logic
_, err = client.Get(context.Background(), expectedCID, "")
// We expect an error here because default localhost:5001 won't exist
if err == nil {
t.Error("Expected error when using default localhost:5001")
}
})
}
func TestClient_Health(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/id" {
t.Errorf("Expected path '/id', got %s", r.URL.Path)
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"id": "test"}`))
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
err = client.Health(context.Background())
if err != nil {
t.Fatalf("Failed health check: %v", err)
}
})
t.Run("unhealthy", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
err = client.Health(context.Background())
if err == nil {
t.Error("Expected error for unhealthy status")
}
})
}
func TestClient_Close(t *testing.T) {
logger := zap.NewNop()
cfg := Config{ClusterAPIURL: "http://localhost:9094"}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
// Close should not error
err = client.Close(context.Background())
if err != nil {
t.Errorf("Close should not error, got: %v", err)
}
}

1121
pkg/ipfs/cluster.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -101,8 +101,10 @@ func getLevelColor(level zapcore.Level) string {
// coloredConsoleEncoder creates a custom encoder with colors
func coloredConsoleEncoder(enableColors bool) zapcore.Encoder {
config := zap.NewDevelopmentEncoderConfig()
// Ultra-short timestamp: HH:MM:SS (no milliseconds, no date, no timezone)
config.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
timeStr := t.Format("2006-01-02T15:04:05.000Z0700")
timeStr := t.Format("15:04:05")
if enableColors {
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, timeStr, Reset))
} else {
@ -110,21 +112,41 @@ func coloredConsoleEncoder(enableColors bool) zapcore.Encoder {
}
}
// Single letter level: D, I, W, E
config.EncodeLevel = func(level zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
levelStr := strings.ToUpper(level.String())
levelMap := map[zapcore.Level]string{
zapcore.DebugLevel: "D",
zapcore.InfoLevel: "I",
zapcore.WarnLevel: "W",
zapcore.ErrorLevel: "E",
}
levelStr := levelMap[level]
if levelStr == "" {
levelStr = "?"
}
if enableColors {
color := getLevelColor(level)
enc.AppendString(fmt.Sprintf("%s%s%-5s%s", color, Bold, levelStr, Reset))
enc.AppendString(fmt.Sprintf("%s%s%s%s", color, Bold, levelStr, Reset))
} else {
enc.AppendString(fmt.Sprintf("%-5s", levelStr))
enc.AppendString(levelStr)
}
}
// Just filename, no line number for cleaner output
config.EncodeCaller = func(caller zapcore.EntryCaller, enc zapcore.PrimitiveArrayEncoder) {
file := caller.File
// Extract just the filename from the path
if idx := strings.LastIndex(file, "/"); idx >= 0 {
file = file[idx+1:]
}
// Remove .go extension for even more compact format
if strings.HasSuffix(file, ".go") {
file = file[:len(file)-3]
}
if enableColors {
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, caller.TrimmedPath(), Reset))
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, file, Reset))
} else {
enc.AppendString(caller.TrimmedPath())
enc.AppendString(file)
}
}

View File

@ -10,6 +10,8 @@ import (
"github.com/mackerelio/go-osstat/cpu"
"github.com/mackerelio/go-osstat/memory"
"go.uber.org/zap"
"github.com/DeBrosOfficial/network/pkg/logging"
)
func logPeerStatus(n *Node, currentPeerCount int, lastPeerCount int, firstCheck bool) (int, bool) {
@ -91,13 +93,13 @@ func announceMetrics(n *Node, peers []peer.ID, cpuUsage uint64, memUsage *memory
}
msg := struct {
PeerID string `json:"peer_id"`
PeerCount int `json:"peer_count"`
PeerIDs []string `json:"peer_ids,omitempty"`
CPU uint64 `json:"cpu_usage"`
Memory uint64 `json:"memory_usage"`
Timestamp int64 `json:"timestamp"`
ClusterHealth map[string]interface{} `json:"cluster_health,omitempty"`
PeerID string `json:"peer_id"`
PeerCount int `json:"peer_count"`
PeerIDs []string `json:"peer_ids,omitempty"`
CPU uint64 `json:"cpu_usage"`
Memory uint64 `json:"memory_usage"`
Timestamp int64 `json:"timestamp"`
ClusterHealth map[string]interface{} `json:"cluster_health,omitempty"`
}{
PeerID: n.host.ID().String(),
PeerCount: len(peers),
@ -210,6 +212,40 @@ func (n *Node) startConnectionMonitoring() {
if err := announceMetrics(n, peers, cpuUsage, mem); err != nil {
n.logger.Error("Failed to announce metrics", zap.Error(err))
}
// Periodically update IPFS Cluster peer addresses
// This discovers all cluster peers and updates peer_addresses in service.json
// so IPFS Cluster can automatically connect to all discovered peers
if n.clusterConfigManager != nil {
// First try to discover from LibP2P connections (works even if cluster peers aren't connected yet)
// This runs every minute to discover peers automatically via LibP2P discovery
if time.Now().Unix()%60 == 0 {
if success, err := n.clusterConfigManager.DiscoverClusterPeersFromLibP2P(n.host); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to discover cluster peers from LibP2P", zap.Error(err))
} else if success {
n.logger.ComponentInfo(logging.ComponentNode, "Cluster peer addresses discovered from LibP2P")
}
}
// Also try to update from cluster API (works once peers are connected)
// Update all cluster peers every 2 minutes to discover new peers
if time.Now().Unix()%120 == 0 {
if success, err := n.clusterConfigManager.UpdateAllClusterPeers(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to update cluster peers during monitoring", zap.Error(err))
} else if success {
n.logger.ComponentInfo(logging.ComponentNode, "Cluster peer addresses updated during monitoring")
}
// Also try to repair bootstrap peers if this is not a bootstrap node
if n.config.Node.Type != "bootstrap" {
if success, err := n.clusterConfigManager.RepairBootstrapPeers(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair bootstrap peers during monitoring", zap.Error(err))
} else if success {
n.logger.ComponentInfo(logging.ComponentNode, "Bootstrap peer configuration repaired during monitoring")
}
}
}
}
}
}()

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
mathrand "math/rand"
"net"
"os"
"path/filepath"
"strings"
@ -22,6 +23,7 @@ import (
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/discovery"
"github.com/DeBrosOfficial/network/pkg/encryption"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/pubsub"
database "github.com/DeBrosOfficial/network/pkg/rqlite"
@ -45,6 +47,9 @@ type Node struct {
// Discovery
discoveryManager *discovery.Manager
// IPFS Cluster config manager
clusterConfigManager *ipfs.ClusterConfigManager
}
// NewNode creates a new network node
@ -127,6 +132,51 @@ func (n *Node) startRQLite(ctx context.Context) error {
return nil
}
// extractIPFromMultiaddr extracts the IP address from a bootstrap peer multiaddr
// Supports IP4, IP6, DNS4, DNS6, and DNSADDR protocols
func extractIPFromMultiaddr(multiaddrStr string) string {
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
if err != nil {
return ""
}
// First, try to extract direct IP address
var ip string
var dnsName string
multiaddr.ForEach(ma, func(c multiaddr.Component) bool {
switch c.Protocol().Code {
case multiaddr.P_IP4, multiaddr.P_IP6:
ip = c.Value()
return false // Stop iteration - found IP
case multiaddr.P_DNS4, multiaddr.P_DNS6, multiaddr.P_DNSADDR:
dnsName = c.Value()
// Continue to check for IP, but remember DNS name as fallback
}
return true
})
// If we found a direct IP, return it
if ip != "" {
return ip
}
// If we found a DNS name, try to resolve it
if dnsName != "" {
if resolvedIPs, err := net.LookupIP(dnsName); err == nil && len(resolvedIPs) > 0 {
// Prefer IPv4 addresses, but accept IPv6 if that's all we have
for _, resolvedIP := range resolvedIPs {
if resolvedIP.To4() != nil {
return resolvedIP.String()
}
}
// Return first IPv6 address if no IPv4 found
return resolvedIPs[0].String()
}
}
return ""
}
// bootstrapPeerSource returns a PeerSource that yields peers from BootstrapPeers.
func bootstrapPeerSource(bootstrapAddrs []string, logger *zap.Logger) func(context.Context, int) <-chan peer.AddrInfo {
return func(ctx context.Context, num int) <-chan peer.AddrInfo {
@ -321,8 +371,8 @@ func (n *Node) startLibP2P() error {
// For localhost/development, disable NAT services
// For production, these would be enabled
isLocalhost := len(n.config.Node.ListenAddresses) > 0 &&
(strings.Contains(n.config.Node.ListenAddresses[0], "127.0.0.1") ||
strings.Contains(n.config.Node.ListenAddresses[0], "localhost"))
(strings.Contains(n.config.Node.ListenAddresses[0], "localhost") ||
strings.Contains(n.config.Node.ListenAddresses[0], "127.0.0.1"))
if isLocalhost {
n.logger.ComponentInfo(logging.ComponentLibP2P, "Localhost detected - disabling NAT services for local development")
@ -631,6 +681,14 @@ func (n *Node) Start(ctx context.Context) error {
return fmt.Errorf("failed to start LibP2P: %w", err)
}
// Initialize IPFS Cluster configuration if enabled
if n.config.Database.IPFS.ClusterAPIURL != "" {
if err := n.startIPFSClusterConfig(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to initialize IPFS Cluster config", zap.Error(err))
// Don't fail node startup if cluster config fails
}
}
// Start RQLite with cluster discovery
if err := n.startRQLite(ctx); err != nil {
return fmt.Errorf("failed to start RQLite: %w", err)
@ -651,3 +709,41 @@ func (n *Node) Start(ctx context.Context) error {
return nil
}
// startIPFSClusterConfig initializes and ensures IPFS Cluster configuration
func (n *Node) startIPFSClusterConfig() error {
n.logger.ComponentInfo(logging.ComponentNode, "Initializing IPFS Cluster configuration")
// Create config manager
cm, err := ipfs.NewClusterConfigManager(n.config, n.logger.Logger)
if err != nil {
return fmt.Errorf("failed to create cluster config manager: %w", err)
}
n.clusterConfigManager = cm
// Fix IPFS config addresses (localhost -> 127.0.0.1) before ensuring cluster config
if err := cm.FixIPFSConfigAddresses(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to fix IPFS config addresses", zap.Error(err))
// Don't fail startup if config fix fails - cluster config will handle it
}
// Ensure configuration exists and is correct
if err := cm.EnsureConfig(); err != nil {
return fmt.Errorf("failed to ensure cluster config: %w", err)
}
// Try to repair bootstrap peer configuration automatically
// This will be retried periodically if bootstrap is not available yet
if n.config.Node.Type != "bootstrap" {
if success, err := cm.RepairBootstrapPeers(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair bootstrap peers, will retry later", zap.Error(err))
} else if success {
n.logger.ComponentInfo(logging.ComponentNode, "Bootstrap peer configuration repaired successfully")
} else {
n.logger.ComponentDebug(logging.ComponentNode, "Bootstrap peer not available yet, will retry periodically")
}
}
n.logger.ComponentInfo(logging.ComponentNode, "IPFS Cluster configuration initialized")
return nil
}

103
pkg/olric/client.go Normal file
View File

@ -0,0 +1,103 @@
package olric
import (
"context"
"fmt"
"time"
olriclib "github.com/olric-data/olric"
"go.uber.org/zap"
)
// Client wraps an Olric cluster client for distributed cache operations
type Client struct {
client olriclib.Client
logger *zap.Logger
}
// Config holds configuration for the Olric client
type Config struct {
// Servers is a list of Olric server addresses (e.g., ["localhost:3320"])
// If empty, defaults to ["localhost:3320"]
Servers []string
// Timeout is the timeout for client operations
// If zero, defaults to 10 seconds
Timeout time.Duration
}
// NewClient creates a new Olric client wrapper
func NewClient(cfg Config, logger *zap.Logger) (*Client, error) {
servers := cfg.Servers
if len(servers) == 0 {
servers = []string{"localhost:3320"}
}
client, err := olriclib.NewClusterClient(servers)
if err != nil {
return nil, fmt.Errorf("failed to create Olric cluster client: %w", err)
}
timeout := cfg.Timeout
if timeout == 0 {
timeout = 10 * time.Second
}
return &Client{
client: client,
logger: logger,
}, nil
}
// Health checks if the Olric client is healthy
func (c *Client) Health(ctx context.Context) error {
// Create a DMap to test connectivity
dm, err := c.client.NewDMap("_health_check")
if err != nil {
return fmt.Errorf("failed to create DMap for health check: %w", err)
}
// Try a simple put/get operation
testKey := fmt.Sprintf("_health_%d", time.Now().UnixNano())
testValue := "ok"
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
err = dm.Put(ctx, testKey, testValue)
if err != nil {
return fmt.Errorf("health check put failed: %w", err)
}
gr, err := dm.Get(ctx, testKey)
if err != nil {
return fmt.Errorf("health check get failed: %w", err)
}
val, err := gr.String()
if err != nil {
return fmt.Errorf("health check value decode failed: %w", err)
}
if val != testValue {
return fmt.Errorf("health check value mismatch: expected %q, got %q", testValue, val)
}
// Clean up test key
_, _ = dm.Delete(ctx, testKey)
return nil
}
// Close closes the Olric client connection
func (c *Client) Close(ctx context.Context) error {
if c.client == nil {
return nil
}
return c.client.Close(ctx)
}
// GetClient returns the underlying Olric client
func (c *Client) GetClient() olriclib.Client {
return c.client
}

View File

@ -3,6 +3,7 @@ package pubsub
import (
"context"
"fmt"
"time"
)
// Publish publishes a message to a topic
@ -27,6 +28,29 @@ func (m *Manager) Publish(ctx context.Context, topic string, data []byte) error
return fmt.Errorf("failed to get topic for publishing: %w", err)
}
// Wait briefly for mesh formation if no peers are in the mesh yet
// GossipSub needs time to discover peers and form a mesh
// With FloodPublish enabled, messages will be flooded to all connected peers
// but we still want to give the mesh a chance to form for better delivery
waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second)
defer waitCancel()
// Check if we have peers in the mesh, wait up to 2 seconds for mesh formation
meshFormed := false
for i := 0; i < 20 && !meshFormed; i++ {
peers := libp2pTopic.ListPeers()
if len(peers) > 0 {
meshFormed = true
break // Mesh has formed, proceed with publish
}
select {
case <-waitCtx.Done():
meshFormed = true // Timeout, proceed anyway (FloodPublish will handle it)
case <-time.After(100 * time.Millisecond):
// Continue waiting
}
}
// Publish message
if err := libp2pTopic.Publish(ctx, data); err != nil {
return fmt.Errorf("failed to publish message: %w", err)

View File

@ -24,24 +24,21 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa
}
namespacedTopic := fmt.Sprintf("%s.%s", ns, topic)
m.mu.Lock()
defer m.mu.Unlock()
// Check if we already have a subscription for this topic
topicSub, exists := m.subscriptions[namespacedTopic]
if exists {
// Add handler to existing subscription
// Fast path: we already have a subscription for this topic
m.mu.RLock()
if existing := m.subscriptions[namespacedTopic]; existing != nil {
m.mu.RUnlock()
handlerID := generateHandlerID()
topicSub.mu.Lock()
topicSub.handlers[handlerID] = handler
topicSub.refCount++
topicSub.mu.Unlock()
existing.mu.Lock()
existing.handlers[handlerID] = handler
existing.refCount++
existing.mu.Unlock()
return nil
}
m.mu.RUnlock()
// Create new subscription
// Get or create topic
// Create the underlying libp2p subscription without holding the manager lock
// to avoid re-entrant lock attempts
libp2pTopic, err := m.getOrCreateTopic(namespacedTopic)
if err != nil {
return fmt.Errorf("failed to get topic: %w", err)
@ -58,26 +55,44 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa
// Create topic subscription with initial handler
handlerID := generateHandlerID()
topicSub = &topicSubscription{
newSub := &topicSubscription{
sub: sub,
cancel: cancel,
handlers: map[HandlerID]MessageHandler{handlerID: handler},
refCount: 1,
}
m.subscriptions[namespacedTopic] = topicSub
// Install the subscription (or merge if another goroutine beat us)
m.mu.Lock()
if existing := m.subscriptions[namespacedTopic]; existing != nil {
m.mu.Unlock()
// Another goroutine already created a subscription while we were working
// Clean up our resources and add to theirs
cancel()
sub.Cancel()
handlerID := generateHandlerID()
existing.mu.Lock()
existing.handlers[handlerID] = handler
existing.refCount++
existing.mu.Unlock()
return nil
}
m.subscriptions[namespacedTopic] = newSub
m.mu.Unlock()
// Announce topic interest to help with peer discovery
go m.announceTopicInterest(namespacedTopic)
// Start message handler goroutine (fan-out to all handlers)
go func() {
defer func() {
sub.Cancel()
}()
go func(ts *topicSubscription) {
defer ts.sub.Cancel()
for {
select {
case <-subCtx.Done():
return
default:
msg, err := sub.Next(subCtx)
msg, err := ts.sub.Next(subCtx)
if err != nil {
if subCtx.Err() != nil {
return // Context cancelled
@ -85,13 +100,18 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa
continue
}
// Filter out internal discovery messages
if string(msg.Data) == "PEER_DISCOVERY_PING" {
continue
}
// Broadcast to all handlers
topicSub.mu.RLock()
handlers := make([]MessageHandler, 0, len(topicSub.handlers))
for _, h := range topicSub.handlers {
ts.mu.RLock()
handlers := make([]MessageHandler, 0, len(ts.handlers))
for _, h := range ts.handlers {
handlers = append(handlers, h)
}
topicSub.mu.RUnlock()
ts.mu.RUnlock()
// Call each handler (don't block on individual handler errors)
for _, h := range handlers {
@ -102,7 +122,7 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa
}
}
}
}()
}(newSub)
return nil
}

View File

@ -4,6 +4,8 @@ import (
"context"
"encoding/json"
"fmt"
"net"
"net/netip"
"os"
"path/filepath"
"strings"
@ -13,19 +15,21 @@ import (
"github.com/DeBrosOfficial/network/pkg/discovery"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
"go.uber.org/zap"
)
// ClusterDiscoveryService bridges LibP2P discovery with RQLite cluster management
type ClusterDiscoveryService struct {
host host.Host
discoveryMgr *discovery.Manager
rqliteManager *RQLiteManager
nodeID string
nodeType string
raftAddress string
httpAddress string
dataDir string
host host.Host
discoveryMgr *discovery.Manager
rqliteManager *RQLiteManager
nodeID string
nodeType string
raftAddress string
httpAddress string
dataDir string
minClusterSize int // Minimum cluster size required
knownPeers map[string]*discovery.RQLiteNodeMetadata // NodeID -> Metadata
peerHealth map[string]*PeerHealth // NodeID -> Health
@ -51,6 +55,11 @@ func NewClusterDiscoveryService(
dataDir string,
logger *zap.Logger,
) *ClusterDiscoveryService {
minClusterSize := 1
if rqliteManager != nil && rqliteManager.config != nil {
minClusterSize = rqliteManager.config.MinClusterSize
}
return &ClusterDiscoveryService{
host: h,
discoveryMgr: discoveryMgr,
@ -60,6 +69,7 @@ func NewClusterDiscoveryService(
raftAddress: raftAddress,
httpAddress: httpAddress,
dataDir: dataDir,
minClusterSize: minClusterSize,
knownPeers: make(map[string]*discovery.RQLiteNodeMetadata),
peerHealth: make(map[string]*PeerHealth),
updateInterval: 30 * time.Second,
@ -156,21 +166,34 @@ func (c *ClusterDiscoveryService) collectPeerMetadata() []*discovery.RQLiteNodeM
connectedPeers := c.host.Network().Peers()
var metadata []*discovery.RQLiteNodeMetadata
c.logger.Debug("Collecting peer metadata from LibP2P",
zap.Int("connected_libp2p_peers", len(connectedPeers)))
// Metadata collection is routine - no need to log every occurrence
c.mu.RLock()
currentRaftAddr := c.raftAddress
currentHTTPAddr := c.httpAddress
c.mu.RUnlock()
// Add ourselves
ourMetadata := &discovery.RQLiteNodeMetadata{
NodeID: c.raftAddress, // RQLite uses raft address as node ID
RaftAddress: c.raftAddress,
HTTPAddress: c.httpAddress,
NodeID: currentRaftAddr, // RQLite uses raft address as node ID
RaftAddress: currentRaftAddr,
HTTPAddress: currentHTTPAddr,
NodeType: c.nodeType,
RaftLogIndex: c.rqliteManager.getRaftLogIndex(),
LastSeen: time.Now(),
ClusterVersion: "1.0",
}
if c.adjustSelfAdvertisedAddresses(ourMetadata) {
c.logger.Debug("Adjusted self-advertised RQLite addresses",
zap.String("raft_address", ourMetadata.RaftAddress),
zap.String("http_address", ourMetadata.HTTPAddress))
}
metadata = append(metadata, ourMetadata)
staleNodeIDs := make([]string, 0)
// Query connected peers for their RQLite metadata
// For now, we'll use a simple approach - store metadata in peer metadata store
// In a full implementation, this would use a custom protocol to exchange RQLite metadata
@ -181,6 +204,9 @@ func (c *ClusterDiscoveryService) collectPeerMetadata() []*discovery.RQLiteNodeM
if jsonData, ok := val.([]byte); ok {
var peerMeta discovery.RQLiteNodeMetadata
if err := json.Unmarshal(jsonData, &peerMeta); err == nil {
if updated, stale := c.adjustPeerAdvertisedAddresses(peerID, &peerMeta); updated && stale != "" {
staleNodeIDs = append(staleNodeIDs, stale)
}
peerMeta.LastSeen = time.Now()
metadata = append(metadata, &peerMeta)
}
@ -188,6 +214,16 @@ func (c *ClusterDiscoveryService) collectPeerMetadata() []*discovery.RQLiteNodeM
}
}
// Clean up stale entries if NodeID changed
if len(staleNodeIDs) > 0 {
c.mu.Lock()
for _, id := range staleNodeIDs {
delete(c.knownPeers, id)
delete(c.peerHealth, id)
}
c.mu.Unlock()
}
return metadata
}
@ -203,9 +239,6 @@ type membershipUpdateResult struct {
func (c *ClusterDiscoveryService) updateClusterMembership() {
metadata := c.collectPeerMetadata()
c.logger.Debug("Collected peer metadata",
zap.Int("metadata_count", len(metadata)))
// Compute membership changes while holding lock
c.mu.Lock()
result := c.computeMembershipChangesLocked(metadata)
@ -215,35 +248,30 @@ func (c *ClusterDiscoveryService) updateClusterMembership() {
if result.changed {
// Log state changes (peer added/removed) at Info level
if len(result.added) > 0 || len(result.updated) > 0 {
c.logger.Info("Cluster membership changed",
c.logger.Info("Membership changed",
zap.Int("added", len(result.added)),
zap.Int("updated", len(result.updated)),
zap.Strings("added_ids", result.added),
zap.Strings("updated_ids", result.updated))
zap.Strings("added", result.added),
zap.Strings("updated", result.updated))
}
// Write peers.json without holding lock
if err := c.writePeersJSONWithData(result.peersJSON); err != nil {
c.logger.Error("CRITICAL: Failed to write peers.json",
c.logger.Error("Failed to write peers.json",
zap.Error(err),
zap.String("data_dir", c.dataDir),
zap.Int("peer_count", len(result.peersJSON)))
zap.Int("peers", len(result.peersJSON)))
} else {
c.logger.Debug("peers.json updated",
zap.Int("peer_count", len(result.peersJSON)))
zap.Int("peers", len(result.peersJSON)))
}
// Update lastUpdate timestamp
c.mu.Lock()
c.lastUpdate = time.Now()
c.mu.Unlock()
} else {
c.mu.RLock()
totalPeers := len(c.knownPeers)
c.mu.RUnlock()
c.logger.Debug("No changes to cluster membership",
zap.Int("total_peers", totalPeers))
}
// No changes - don't log (reduces noise)
}
// computeMembershipChangesLocked computes membership changes and returns snapshot data
@ -268,10 +296,10 @@ func (c *ClusterDiscoveryService) computeMembershipChangesLocked(metadata []*dis
} else {
// New peer discovered
added = append(added, meta.NodeID)
c.logger.Info("Node added to cluster",
zap.String("node_id", meta.NodeID),
zap.String("raft_address", meta.RaftAddress),
zap.String("node_type", meta.NodeType),
c.logger.Info("Node added",
zap.String("node", meta.NodeID),
zap.String("raft", meta.RaftAddress),
zap.String("type", meta.NodeType),
zap.Uint64("log_index", meta.RaftLogIndex))
}
@ -293,18 +321,56 @@ func (c *ClusterDiscoveryService) computeMembershipChangesLocked(metadata []*dis
}
}
// CRITICAL FIX: Count remote peers (excluding self)
remotePeerCount := 0
for _, peer := range c.knownPeers {
if peer.NodeID != c.raftAddress {
remotePeerCount++
}
}
// Get peers JSON snapshot (for checking if it would be empty)
peers := c.getPeersJSONUnlocked()
// Determine if we should write peers.json
shouldWrite := len(added) > 0 || len(updated) > 0 || c.lastUpdate.IsZero()
// CRITICAL FIX: Don't write peers.json until we have minimum cluster size
// This prevents RQLite from starting as a single-node cluster
// For min_cluster_size=3, we need at least 2 remote peers (plus self = 3 total)
if shouldWrite {
// Log initial sync if this is the first time
// For initial sync, wait until we have at least (MinClusterSize - 1) remote peers
// This ensures peers.json contains enough peers for proper cluster formation
if c.lastUpdate.IsZero() {
c.logger.Info("Initial cluster membership sync",
zap.Int("total_peers", len(c.knownPeers)))
requiredRemotePeers := c.minClusterSize - 1
if remotePeerCount < requiredRemotePeers {
c.logger.Info("Waiting for peers",
zap.Int("have", remotePeerCount),
zap.Int("need", requiredRemotePeers),
zap.Int("min_size", c.minClusterSize))
return membershipUpdateResult{
changed: false,
}
}
}
// Additional safety check: don't write empty peers.json (would cause single-node cluster)
if len(peers) == 0 && c.lastUpdate.IsZero() {
c.logger.Info("No remote peers - waiting")
return membershipUpdateResult{
changed: false,
}
}
// Log initial sync if this is the first time
if c.lastUpdate.IsZero() {
c.logger.Info("Initial sync",
zap.Int("total", len(c.knownPeers)),
zap.Int("remote", remotePeerCount),
zap.Int("in_json", len(peers)))
}
// Get peers JSON snapshot
peers := c.getPeersJSONUnlocked()
return membershipUpdateResult{
peersJSON: peers,
added: added,
@ -331,8 +397,8 @@ func (c *ClusterDiscoveryService) removeInactivePeers() {
if inactiveDuration > c.inactivityLimit {
// Mark as inactive and remove
c.logger.Warn("Node removed from cluster",
zap.String("node_id", nodeID),
c.logger.Warn("Node removed",
zap.String("node", nodeID),
zap.String("reason", "inactive"),
zap.Duration("inactive_duration", inactiveDuration))
@ -344,9 +410,9 @@ func (c *ClusterDiscoveryService) removeInactivePeers() {
// Regenerate peers.json if any peers were removed
if len(removed) > 0 {
c.logger.Info("Removed inactive nodes, regenerating peers.json",
zap.Int("removed", len(removed)),
zap.Strings("node_ids", removed))
c.logger.Info("Removed inactive",
zap.Int("count", len(removed)),
zap.Strings("nodes", removed))
if err := c.writePeersJSON(); err != nil {
c.logger.Error("Failed to write peers.json after cleanup", zap.Error(err))
@ -366,6 +432,11 @@ func (c *ClusterDiscoveryService) getPeersJSONUnlocked() []map[string]interface{
peers := make([]map[string]interface{}, 0, len(c.knownPeers))
for _, peer := range c.knownPeers {
// CRITICAL FIX: Include ALL peers (including self) in peers.json
// When using bootstrap-expect with recovery, RQLite needs the complete
// expected cluster configuration to properly form consensus.
// The peers.json file is used by RQLite's recovery mechanism to know
// what the full cluster membership should be, including the local node.
peerEntry := map[string]interface{}{
"id": peer.RaftAddress, // RQLite uses raft address as node ID
"address": peer.RaftAddress,
@ -401,11 +472,7 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
// Get the RQLite raft directory
rqliteDir := filepath.Join(dataDir, "rqlite", "raft")
c.logger.Debug("Writing peers.json",
zap.String("data_dir", c.dataDir),
zap.String("expanded_path", dataDir),
zap.String("raft_dir", rqliteDir),
zap.Int("peer_count", len(peers)))
// Writing peers.json - routine operation, no need to log details
if err := os.MkdirAll(rqliteDir, 0755); err != nil {
return fmt.Errorf("failed to create raft directory %s: %w", rqliteDir, err)
@ -416,7 +483,7 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
// Backup existing peers.json if it exists
if _, err := os.Stat(peersFile); err == nil {
c.logger.Debug("Backing up existing peers.json", zap.String("backup_file", backupFile))
// Backup existing peers.json if it exists - routine operation
data, err := os.ReadFile(peersFile)
if err == nil {
_ = os.WriteFile(backupFile, data, 0644)
@ -429,7 +496,7 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
return fmt.Errorf("failed to marshal peers.json: %w", err)
}
c.logger.Debug("Marshaled peers.json", zap.Int("data_size", len(data)))
// Marshaled peers.json - routine operation
// Write atomically using temp file + rename
tempFile := peersFile + ".tmp"
@ -449,9 +516,8 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
}
c.logger.Info("peers.json written",
zap.String("file", peersFile),
zap.Int("node_count", len(peers)),
zap.Strings("node_ids", nodeIDs))
zap.Int("peers", len(peers)),
zap.Strings("nodes", nodeIDs))
return nil
}
@ -567,17 +633,58 @@ func (c *ClusterDiscoveryService) WaitForDiscoverySettling(ctx context.Context)
// TriggerSync manually triggers a cluster membership sync
func (c *ClusterDiscoveryService) TriggerSync() {
c.logger.Info("Manually triggering cluster membership sync")
// For bootstrap nodes, wait a bit for peer discovery to stabilize
if c.nodeType == "bootstrap" {
c.logger.Info("Bootstrap node: waiting for peer discovery to complete")
time.Sleep(5 * time.Second)
}
c.updateClusterMembership()
}
// ForceWritePeersJSON forces writing peers.json regardless of membership changes
// This is useful after clearing raft state when we need to recreate peers.json
func (c *ClusterDiscoveryService) ForceWritePeersJSON() error {
c.logger.Info("Force writing peers.json")
// First, collect latest peer metadata to ensure we have current information
metadata := c.collectPeerMetadata()
// Update known peers with latest metadata (without writing file yet)
c.mu.Lock()
for _, meta := range metadata {
c.knownPeers[meta.NodeID] = meta
// Update health tracking for remote peers
if meta.NodeID != c.raftAddress {
if _, ok := c.peerHealth[meta.NodeID]; !ok {
c.peerHealth[meta.NodeID] = &PeerHealth{
LastSeen: time.Now(),
LastSuccessful: time.Now(),
Status: "active",
}
} else {
c.peerHealth[meta.NodeID].LastSeen = time.Now()
c.peerHealth[meta.NodeID].Status = "active"
}
}
}
peers := c.getPeersJSONUnlocked()
c.mu.Unlock()
// Now force write the file
if err := c.writePeersJSONWithData(peers); err != nil {
c.logger.Error("Failed to force write peers.json",
zap.Error(err),
zap.String("data_dir", c.dataDir),
zap.Int("peers", len(peers)))
return err
}
c.logger.Info("peers.json written",
zap.Int("peers", len(peers)))
return nil
}
// TriggerPeerExchange actively exchanges peer information with connected peers
// This populates the peerstore with RQLite metadata from other nodes
func (c *ClusterDiscoveryService) TriggerPeerExchange(ctx context.Context) error {
@ -585,25 +692,36 @@ func (c *ClusterDiscoveryService) TriggerPeerExchange(ctx context.Context) error
return fmt.Errorf("discovery manager not available")
}
c.logger.Info("Triggering peer exchange via discovery manager")
collected := c.discoveryMgr.TriggerPeerExchange(ctx)
c.logger.Info("Peer exchange completed", zap.Int("peers_with_metadata", collected))
c.logger.Debug("Exchange completed", zap.Int("with_metadata", collected))
return nil
}
// UpdateOwnMetadata updates our own RQLite metadata in the peerstore
func (c *ClusterDiscoveryService) UpdateOwnMetadata() {
c.mu.RLock()
currentRaftAddr := c.raftAddress
currentHTTPAddr := c.httpAddress
c.mu.RUnlock()
metadata := &discovery.RQLiteNodeMetadata{
NodeID: c.raftAddress, // RQLite uses raft address as node ID
RaftAddress: c.raftAddress,
HTTPAddress: c.httpAddress,
NodeID: currentRaftAddr, // RQLite uses raft address as node ID
RaftAddress: currentRaftAddr,
HTTPAddress: currentHTTPAddr,
NodeType: c.nodeType,
RaftLogIndex: c.rqliteManager.getRaftLogIndex(),
LastSeen: time.Now(),
ClusterVersion: "1.0",
}
// Adjust addresses if needed
if c.adjustSelfAdvertisedAddresses(metadata) {
c.logger.Debug("Adjusted self-advertised RQLite addresses in UpdateOwnMetadata",
zap.String("raft_address", metadata.RaftAddress),
zap.String("http_address", metadata.HTTPAddress))
}
// Store in our own peerstore for peer exchange
data, err := json.Marshal(metadata)
if err != nil {
@ -616,13 +734,28 @@ func (c *ClusterDiscoveryService) UpdateOwnMetadata() {
return
}
c.logger.Debug("Updated own RQLite metadata",
zap.String("node_id", metadata.NodeID),
c.logger.Debug("Metadata updated",
zap.String("node", metadata.NodeID),
zap.Uint64("log_index", metadata.RaftLogIndex))
}
// StoreRemotePeerMetadata stores metadata received from a remote peer
func (c *ClusterDiscoveryService) StoreRemotePeerMetadata(peerID peer.ID, metadata *discovery.RQLiteNodeMetadata) error {
if metadata == nil {
return fmt.Errorf("metadata is nil")
}
// Adjust addresses if needed (replace localhost with actual IP)
if updated, stale := c.adjustPeerAdvertisedAddresses(peerID, metadata); updated && stale != "" {
// Clean up stale entry if NodeID changed
c.mu.Lock()
delete(c.knownPeers, stale)
delete(c.peerHealth, stale)
c.mu.Unlock()
}
metadata.LastSeen = time.Now()
data, err := json.Marshal(metadata)
if err != nil {
return fmt.Errorf("failed to marshal metadata: %w", err)
@ -632,9 +765,240 @@ func (c *ClusterDiscoveryService) StoreRemotePeerMetadata(peerID peer.ID, metada
return fmt.Errorf("failed to store metadata: %w", err)
}
c.logger.Debug("Stored remote peer metadata",
zap.String("peer_id", peerID.String()[:8]+"..."),
zap.String("node_id", metadata.NodeID))
c.logger.Debug("Metadata stored",
zap.String("peer", shortPeerID(peerID)),
zap.String("node", metadata.NodeID))
return nil
}
// adjustPeerAdvertisedAddresses adjusts peer metadata addresses by replacing localhost/loopback
// with the actual IP address from LibP2P connection. Returns (updated, staleNodeID).
// staleNodeID is non-empty if NodeID changed (indicating old entry should be cleaned up).
func (c *ClusterDiscoveryService) adjustPeerAdvertisedAddresses(peerID peer.ID, meta *discovery.RQLiteNodeMetadata) (bool, string) {
ip := c.selectPeerIP(peerID)
if ip == "" {
return false, ""
}
changed, stale := rewriteAdvertisedAddresses(meta, ip, true)
if changed {
c.logger.Debug("Addresses normalized",
zap.String("peer", shortPeerID(peerID)),
zap.String("raft", meta.RaftAddress),
zap.String("http_address", meta.HTTPAddress))
}
return changed, stale
}
// adjustSelfAdvertisedAddresses adjusts our own metadata addresses by replacing localhost/loopback
// with the actual IP address from LibP2P host. Updates internal state if changed.
func (c *ClusterDiscoveryService) adjustSelfAdvertisedAddresses(meta *discovery.RQLiteNodeMetadata) bool {
ip := c.selectSelfIP()
if ip == "" {
return false
}
changed, _ := rewriteAdvertisedAddresses(meta, ip, true)
if !changed {
return false
}
// Update internal state with corrected addresses
c.mu.Lock()
c.raftAddress = meta.RaftAddress
c.httpAddress = meta.HTTPAddress
c.mu.Unlock()
return true
}
// selectPeerIP selects the best IP address for a peer from LibP2P connections.
// Prefers public IPs, falls back to private IPs if no public IP is available.
func (c *ClusterDiscoveryService) selectPeerIP(peerID peer.ID) string {
var fallback string
// First, try to get IP from active connections
for _, conn := range c.host.Network().ConnsToPeer(peerID) {
if ip, public := ipFromMultiaddr(conn.RemoteMultiaddr()); ip != "" {
if shouldReplaceHost(ip) {
continue
}
if public {
return ip
}
if fallback == "" {
fallback = ip
}
}
}
// Fallback to peerstore addresses
for _, addr := range c.host.Peerstore().Addrs(peerID) {
if ip, public := ipFromMultiaddr(addr); ip != "" {
if shouldReplaceHost(ip) {
continue
}
if public {
return ip
}
if fallback == "" {
fallback = ip
}
}
}
return fallback
}
// selectSelfIP selects the best IP address for ourselves from LibP2P host addresses.
// Prefers public IPs, falls back to private IPs if no public IP is available.
func (c *ClusterDiscoveryService) selectSelfIP() string {
var fallback string
for _, addr := range c.host.Addrs() {
if ip, public := ipFromMultiaddr(addr); ip != "" {
if shouldReplaceHost(ip) {
continue
}
if public {
return ip
}
if fallback == "" {
fallback = ip
}
}
}
return fallback
}
// rewriteAdvertisedAddresses rewrites RaftAddress and HTTPAddress in metadata,
// replacing localhost/loopback addresses with the provided IP.
// Returns (changed, staleNodeID). staleNodeID is non-empty if NodeID changed.
func rewriteAdvertisedAddresses(meta *discovery.RQLiteNodeMetadata, newHost string, allowNodeIDRewrite bool) (bool, string) {
if meta == nil || newHost == "" {
return false, ""
}
originalNodeID := meta.NodeID
changed := false
nodeIDChanged := false
// Replace host in RaftAddress if it's localhost/loopback
if newAddr, replaced := replaceAddressHost(meta.RaftAddress, newHost); replaced {
if meta.RaftAddress != newAddr {
meta.RaftAddress = newAddr
changed = true
}
}
// Replace host in HTTPAddress if it's localhost/loopback
if newAddr, replaced := replaceAddressHost(meta.HTTPAddress, newHost); replaced {
if meta.HTTPAddress != newAddr {
meta.HTTPAddress = newAddr
changed = true
}
}
// Update NodeID to match RaftAddress if it changed
if allowNodeIDRewrite {
if meta.RaftAddress != "" && (meta.NodeID == "" || meta.NodeID == originalNodeID || shouldReplaceHost(hostFromAddress(meta.NodeID))) {
if meta.NodeID != meta.RaftAddress {
meta.NodeID = meta.RaftAddress
nodeIDChanged = meta.NodeID != originalNodeID
if nodeIDChanged {
changed = true
}
}
}
}
if nodeIDChanged {
return changed, originalNodeID
}
return changed, ""
}
// replaceAddressHost replaces the host part of an address if it's localhost/loopback.
// Returns (newAddress, replaced). replaced is true if host was replaced.
func replaceAddressHost(address, newHost string) (string, bool) {
if address == "" || newHost == "" {
return address, false
}
host, port, err := net.SplitHostPort(address)
if err != nil {
return address, false
}
if !shouldReplaceHost(host) {
return address, false
}
return net.JoinHostPort(newHost, port), true
}
// shouldReplaceHost returns true if the host should be replaced (localhost, loopback, etc.)
func shouldReplaceHost(host string) bool {
if host == "" {
return true
}
if strings.EqualFold(host, "localhost") {
return true
}
// Check if it's a loopback or unspecified address
if addr, err := netip.ParseAddr(host); err == nil {
if addr.IsLoopback() || addr.IsUnspecified() {
return true
}
}
return false
}
// hostFromAddress extracts the host part from a host:port address
func hostFromAddress(address string) string {
host, _, err := net.SplitHostPort(address)
if err != nil {
return ""
}
return host
}
// ipFromMultiaddr extracts an IP address from a multiaddr and returns (ip, isPublic)
func ipFromMultiaddr(addr multiaddr.Multiaddr) (string, bool) {
if addr == nil {
return "", false
}
if v4, err := addr.ValueForProtocol(multiaddr.P_IP4); err == nil {
return v4, isPublicIP(v4)
}
if v6, err := addr.ValueForProtocol(multiaddr.P_IP6); err == nil {
return v6, isPublicIP(v6)
}
return "", false
}
// isPublicIP returns true if the IP is a public (non-private, non-loopback) address
func isPublicIP(ip string) bool {
addr, err := netip.ParseAddr(ip)
if err != nil {
return false
}
// Exclude loopback, unspecified, link-local, multicast, and private addresses
if addr.IsLoopback() || addr.IsUnspecified() || addr.IsLinkLocalUnicast() || addr.IsLinkLocalMulticast() || addr.IsPrivate() {
return false
}
return true
}
// shortPeerID returns a shortened version of a peer ID for logging
func shortPeerID(id peer.ID) string {
s := id.String()
if len(s) <= 8 {
return s
}
return s[:8] + "..."
}

View File

@ -5,26 +5,98 @@ import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"time"
"go.uber.org/zap"
)
// getRaftLogIndex returns the current Raft log index for this node
// It first tries to get the index from the running RQLite instance via /status endpoint.
// If that fails or returns 0, it falls back to reading persisted snapshot metadata from disk.
// This ensures accurate log index reporting even before RQLite is fully started.
func (r *RQLiteManager) getRaftLogIndex() uint64 {
status, err := r.getRQLiteStatus()
if err != nil {
r.logger.Debug("Failed to get Raft log index", zap.Error(err))
if err == nil {
// Return the highest index we have from runtime status
maxIndex := status.Store.Raft.LastLogIndex
if status.Store.Raft.AppliedIndex > maxIndex {
maxIndex = status.Store.Raft.AppliedIndex
}
if status.Store.Raft.CommitIndex > maxIndex {
maxIndex = status.Store.Raft.CommitIndex
}
// If runtime status reports a valid index, use it
if maxIndex > 0 {
return maxIndex
}
// Runtime status returned 0, fall back to persisted snapshot metadata
// This handles the case where RQLite is running but hasn't applied any logs yet
if persisted := r.getPersistedRaftLogIndex(); persisted > 0 {
r.logger.Debug("Using persisted Raft log index because runtime status reported zero",
zap.Uint64("persisted_index", persisted))
return persisted
}
return 0
}
// Return the highest index we have
maxIndex := status.Store.Raft.LastLogIndex
if status.Store.Raft.AppliedIndex > maxIndex {
maxIndex = status.Store.Raft.AppliedIndex
// RQLite status endpoint is not available (not started yet or unreachable)
// Fall back to reading persisted snapshot metadata from disk
persisted := r.getPersistedRaftLogIndex()
if persisted > 0 {
r.logger.Debug("Using persisted Raft log index before RQLite is reachable",
zap.Uint64("persisted_index", persisted),
zap.Error(err))
return persisted
}
if status.Store.Raft.CommitIndex > maxIndex {
maxIndex = status.Store.Raft.CommitIndex
r.logger.Debug("Failed to get Raft log index", zap.Error(err))
return 0
}
// getPersistedRaftLogIndex reads the highest Raft log index from snapshot metadata files
// This allows us to report accurate log indexes even before RQLite is started
func (r *RQLiteManager) getPersistedRaftLogIndex() uint64 {
rqliteDataDir, err := r.rqliteDataDirPath()
if err != nil {
return 0
}
snapshotsDir := filepath.Join(rqliteDataDir, "rsnapshots")
entries, err := os.ReadDir(snapshotsDir)
if err != nil {
return 0
}
var maxIndex uint64
for _, entry := range entries {
// Only process directories (snapshot directories)
if !entry.IsDir() {
continue
}
// Read meta.json from the snapshot directory
metaPath := filepath.Join(snapshotsDir, entry.Name(), "meta.json")
raw, err := os.ReadFile(metaPath)
if err != nil {
continue
}
// Parse the metadata JSON to extract the Index field
var meta struct {
Index uint64 `json:"Index"`
}
if err := json.Unmarshal(raw, &meta); err != nil {
continue
}
// Track the highest index found
if meta.Index > maxIndex {
maxIndex = meta.Index
}
}
return maxIndex

View File

@ -210,10 +210,11 @@ type txOp struct {
}
type transactionRequest struct {
Ops []txOp `json:"ops"`
ReturnResults bool `json:"return_results"` // if true, returns per-op results
StopOnError bool `json:"stop_on_error"` // default true in tx
PartialResults bool `json:"partial_results"` // ignored for actual TX (atomic); kept for API symmetry
Ops []txOp `json:"ops"`
Statements []string `json:"statements"` // legacy format: array of SQL strings (treated as exec ops)
ReturnResults bool `json:"return_results"` // if true, returns per-op results
StopOnError bool `json:"stop_on_error"` // default true in tx
PartialResults bool `json:"partial_results"` // ignored for actual TX (atomic); kept for API symmetry
}
// --------------------
@ -427,8 +428,21 @@ func (g *HTTPGateway) handleTransaction(w http.ResponseWriter, r *http.Request)
return
}
var body transactionRequest
if err := json.NewDecoder(r.Body).Decode(&body); err != nil || len(body.Ops) == 0 {
writeError(w, http.StatusBadRequest, "invalid body: {ops:[{kind,sql,args?}], return_results?}")
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
writeError(w, http.StatusBadRequest, "invalid body: {ops:[{kind,sql,args?}], return_results?} or {statements:[sql...]}")
return
}
// Support legacy "statements" format by converting to ops
if len(body.Statements) > 0 && len(body.Ops) == 0 {
body.Ops = make([]txOp, len(body.Statements))
for i, stmt := range body.Statements {
body.Ops[i] = txOp{Kind: "exec", SQL: stmt}
}
}
if len(body.Ops) == 0 {
writeError(w, http.StatusBadRequest, "invalid body: {ops:[{kind,sql,args?}], return_results?} or {statements:[sql...]}")
return
}
ctx, cancel := g.withTimeout(r.Context())
@ -501,8 +515,8 @@ func (g *HTTPGateway) handleSchema(w http.ResponseWriter, r *http.Request) {
return
}
writeJSON(w, http.StatusOK, map[string]any{
"objects": rows,
"count": len(rows),
"tables": rows,
"count": len(rows),
})
}

File diff suppressed because it is too large Load Diff

View File

@ -6,13 +6,16 @@ import "time"
type RQLiteStatus struct {
Store struct {
Raft struct {
AppliedIndex uint64 `json:"applied_index"`
CommitIndex uint64 `json:"commit_index"`
LastLogIndex uint64 `json:"last_log_index"`
AppliedIndex uint64 `json:"applied_index"`
CommitIndex uint64 `json:"commit_index"`
LastLogIndex uint64 `json:"last_log_index"`
LastSnapshotIndex uint64 `json:"last_snapshot_index"`
State string `json:"state"`
LeaderID string `json:"leader_id"`
LeaderAddr string `json:"leader_addr"`
State string `json:"state"`
LeaderID string `json:"leader_id"`
LeaderAddr string `json:"leader_addr"`
Term uint64 `json:"term"`
NumPeers int `json:"num_peers"`
Voter bool `json:"voter"`
} `json:"raft"`
DBConf struct {
DSN string `json:"dsn"`
@ -20,30 +23,30 @@ type RQLiteStatus struct {
} `json:"db_conf"`
} `json:"store"`
Runtime struct {
GOARCH string `json:"GOARCH"`
GOOS string `json:"GOOS"`
GOMAXPROCS int `json:"GOMAXPROCS"`
NumCPU int `json:"num_cpu"`
NumGoroutine int `json:"num_goroutine"`
Version string `json:"version"`
GOARCH string `json:"GOARCH"`
GOOS string `json:"GOOS"`
GOMAXPROCS int `json:"GOMAXPROCS"`
NumCPU int `json:"num_cpu"`
NumGoroutine int `json:"num_goroutine"`
Version string `json:"version"`
} `json:"runtime"`
HTTP struct {
Addr string `json:"addr"`
Auth string `json:"auth"`
Addr string `json:"addr"`
Auth string `json:"auth"`
} `json:"http"`
Node struct {
Uptime string `json:"uptime"`
StartTime string `json:"start_time"`
Uptime string `json:"uptime"`
StartTime string `json:"start_time"`
} `json:"node"`
}
// RQLiteNode represents a node in the RQLite cluster
type RQLiteNode struct {
ID string `json:"id"`
Address string `json:"address"`
Leader bool `json:"leader"`
Voter bool `json:"voter"`
Reachable bool `json:"reachable"`
ID string `json:"id"`
Address string `json:"address"`
Leader bool `json:"leader"`
Voter bool `json:"voter"`
Reachable bool `json:"reachable"`
}
// RQLiteNodes represents the response from RQLite's /nodes endpoint
@ -68,4 +71,3 @@ type ClusterMetrics struct {
CurrentLeader string
AveragePeerHealth float64
}

101
scripts/dev-kill-all.sh Executable file
View File

@ -0,0 +1,101 @@
#!/bin/bash
set -euo pipefail
echo "Force killing all processes on dev ports..."
# Define all dev ports (5 nodes topology: bootstrap, bootstrap2, node2, node3, node4)
PORTS=(
# LibP2P
4001 4011 4002 4003 4004
# IPFS Swarm
4101 4111 4102 4103 4104
# IPFS API
4501 4511 4502 4503 4504
# RQLite HTTP
5001 5011 5002 5003 5004
# RQLite Raft
7001 7011 7002 7003 7004
# IPFS Gateway
7501 7511 7502 7503 7504
# Gateway
6001
# Olric
3320 3322
# Anon SOCKS
9050
# IPFS Cluster REST API
9094 9104 9114 9124 9134
# IPFS Cluster P2P
9096 9106 9116 9126 9136
)
killed_count=0
killed_pids=()
# Kill all processes using these ports (LISTEN, ESTABLISHED, or any state)
for port in "${PORTS[@]}"; do
# Get all PIDs using this port in ANY TCP state
pids=$(lsof -nP -iTCP:"$port" -t 2>/dev/null || true)
if [[ -n "$pids" ]]; then
echo "Killing processes on port $port: $pids"
for pid in $pids; do
# Kill the process and all its children
kill -9 "$pid" 2>/dev/null || true
# Also kill any children of this process
pkill -9 -P "$pid" 2>/dev/null || true
killed_pids+=("$pid")
done
killed_count=$((killed_count + 1))
fi
done
# Also kill processes by command name patterns (in case they're orphaned)
# This catches processes that might be using debros ports but not showing up in lsof
COMMANDS=("node" "ipfs" "ipfs-cluster-service" "rqlited" "olric-server" "gateway")
for cmd in "${COMMANDS[@]}"; do
# Find all processes with this command name
all_pids=$(pgrep -f "^.*$cmd.*" 2>/dev/null || true)
if [[ -n "$all_pids" ]]; then
for pid in $all_pids; do
# Check if this process is using any of our dev ports
port_match=$(lsof -nP -p "$pid" -iTCP 2>/dev/null | grep -E ":(400[1-4]|401[1-1]|410[1-4]|411[1-1]|450[1-4]|451[1-1]|500[1-4]|501[1-1]|600[1-1]|700[1-4]|701[1-1]|750[1-4]|751[1-1]|332[02]|9050|909[4-9]|910[4-9]|911[4-9]|912[4-9]|913[4-9]|909[6-9]|910[6-9]|911[6-9]|912[6-9]|913[6-9])" || true)
if [[ -n "$port_match" ]]; then
echo "Killing orphaned $cmd process (PID: $pid) using dev ports"
kill -9 "$pid" 2>/dev/null || true
pkill -9 -P "$pid" 2>/dev/null || true
killed_pids+=("$pid")
fi
done
fi
done
# Clean up PID files
PIDS_DIR="$HOME/.debros/.pids"
if [[ -d "$PIDS_DIR" ]]; then
rm -f "$PIDS_DIR"/*.pid || true
fi
# Remove duplicates and report
if [[ ${#killed_pids[@]} -gt 0 ]]; then
unique_pids=($(printf '%s\n' "${killed_pids[@]}" | sort -u))
echo "✓ Killed ${#unique_pids[@]} unique process(es) on $killed_count port(s)"
else
echo "✓ No processes found on dev ports"
fi
# Final verification: check if any ports are still in use
still_in_use=0
for port in "${PORTS[@]}"; do
pids=$(lsof -nP -iTCP:"$port" -t 2>/dev/null || true)
if [[ -n "$pids" ]]; then
echo "⚠️ Warning: Port $port still in use by: $pids"
still_in_use=$((still_in_use + 1))
fi
done
if [[ $still_in_use -eq 0 ]]; then
echo "✓ All dev ports are now free"
else
echo "⚠️ $still_in_use port(s) still in use - you may need to manually kill processes"
fi

View File

@ -1,17 +1,22 @@
#!/bin/bash
# DeBros Network Installation Script
# Downloads network-cli from GitHub releases and runs interactive setup
# Downloads dbn from GitHub releases and runs the new 'dbn prod install' flow
#
# Supported: Ubuntu 18.04+, Debian 10+
# Supported: Ubuntu 20.04+, Debian 11+
#
# Usage:
# curl -fsSL https://install.debros.network | bash
# OR
# bash scripts/install-debros-network.sh
# OR with specific flags:
# bash scripts/install-debros-network.sh --bootstrap
# bash scripts/install-debros-network.sh --vps-ip 1.2.3.4 --peers /ip4/1.2.3.4/tcp/4001/p2p/Qm...
# bash scripts/install-debros-network.sh --domain example.com
set -e
trap 'echo -e "${RED}An error occurred. Installation aborted.${NOCOLOR}"; exit 1' ERR
set -o pipefail
trap 'error "An error occurred. Installation aborted."; exit 1' ERR
# Color codes
RED='\033[0;31m'
@ -27,33 +32,9 @@ GITHUB_API="https://api.github.com/repos/$GITHUB_REPO"
INSTALL_DIR="/usr/local/bin"
log() { echo -e "${CYAN}[$(date '+%Y-%m-%d %H:%M:%S')]${NOCOLOR} $1"; }
error() { echo -e "${RED}[ERROR]${NOCOLOR} $1"; }
error() { echo -e "${RED}[ERROR]${NOCOLOR} $1" >&2; }
success() { echo -e "${GREEN}[SUCCESS]${NOCOLOR} $1"; }
warning() { echo -e "${YELLOW}[WARNING]${NOCOLOR} $1"; }
# REQUIRE INTERACTIVE MODE
if [ ! -t 0 ]; then
error "This script requires an interactive terminal."
echo -e ""
echo -e "${YELLOW}Please run this script directly:${NOCOLOR}"
echo -e "${CYAN} bash <(curl -fsSL https://install.debros.network)${NOCOLOR}"
echo -e ""
exit 1
fi
# Check if running as root
if [[ $EUID -eq 0 ]]; then
error "This script should NOT be run as root"
echo -e "${YELLOW}Run as a regular user with sudo privileges:${NOCOLOR}"
echo -e "${CYAN} bash $0${NOCOLOR}"
exit 1
fi
# Check for sudo
if ! command -v sudo &>/dev/null; then
error "sudo command not found. Please ensure you have sudo privileges."
exit 1
fi
warning() { echo -e "${YELLOW}[WARNING]${NOCOLOR} $1" >&2; }
display_banner() {
echo -e "${BLUE}========================================================================${NOCOLOR}"
@ -65,7 +46,7 @@ display_banner() {
|____/ \\___|____/|_| \\___/|___/ |_| \\_|\\___|\\__| \\_/\\_/ \\___/|_| |_|\\_\\
${NOCOLOR}"
echo -e "${BLUE}========================================================================${NOCOLOR}"
echo -e "${GREEN} Quick Install Script ${NOCOLOR}"
echo -e "${GREEN} Production Installation ${NOCOLOR}"
echo -e "${BLUE}========================================================================${NOCOLOR}"
}
@ -79,15 +60,13 @@ detect_os() {
OS=$ID
VERSION=$VERSION_ID
# Only support Debian and Ubuntu
# Support Debian and Ubuntu
case $OS in
ubuntu|debian)
log "Detected OS: $OS ${VERSION:-unknown}"
;;
*)
error "Unsupported operating system: $OS"
echo -e "${YELLOW}This script only supports Ubuntu 18.04+ and Debian 10+${NOCOLOR}"
exit 1
warning "Unsupported operating system: $OS (may not work)"
;;
esac
}
@ -110,385 +89,134 @@ check_architecture() {
log "Architecture: $ARCH (using $GITHUB_ARCH)"
}
check_dependencies() {
log "Checking required tools..."
local missing_deps=()
for cmd in curl tar; do
if ! command -v $cmd &>/dev/null; then
missing_deps+=("$cmd")
fi
done
if [ ${#missing_deps[@]} -gt 0 ]; then
log "Installing missing dependencies: ${missing_deps[*]}"
sudo apt update
sudo apt install -y "${missing_deps[@]}"
check_root() {
if [[ $EUID -ne 0 ]]; then
error "This script must be run as root"
echo -e "${YELLOW}Please run with sudo:${NOCOLOR}"
echo -e "${CYAN} sudo bash <(curl -fsSL https://install.debros.network)${NOCOLOR}"
exit 1
fi
success "All required tools available"
}
get_latest_release() {
log "Fetching latest release information..."
log "Fetching latest release..."
# Get latest release (exclude pre-releases and nightly)
LATEST_RELEASE=$(curl -fsSL "$GITHUB_API/releases" | \
grep -v "prerelease.*true" | \
grep -v "draft.*true" | \
grep '"tag_name"' | \
head -1 | \
cut -d'"' -f4)
# Try to get latest release with better error handling
RELEASE_DATA=""
if command -v jq &>/dev/null; then
# Get the latest release (including pre-releases/nightly)
RELEASE_DATA=$(curl -fsSL -H "Accept: application/vnd.github+json" "$GITHUB_API/releases" 2>&1)
if [ $? -ne 0 ]; then
error "Failed to fetch release data from GitHub API"
error "Response: $RELEASE_DATA"
exit 1
fi
LATEST_RELEASE=$(echo "$RELEASE_DATA" | jq -r '.[0] | .tag_name' 2>/dev/null)
else
RELEASE_DATA=$(curl -fsSL "$GITHUB_API/releases" 2>&1)
if [ $? -ne 0 ]; then
error "Failed to fetch release data from GitHub API"
error "Response: $RELEASE_DATA"
exit 1
fi
LATEST_RELEASE=$(echo "$RELEASE_DATA" | grep '"tag_name"' | head -1 | cut -d'"' -f4)
fi
if [ -z "$LATEST_RELEASE" ]; then
error "Could not determine latest release"
if [ -z "$LATEST_RELEASE" ] || [ "$LATEST_RELEASE" = "null" ]; then
error "Could not determine latest release version"
error "GitHub API response may be empty or rate-limited"
exit 1
fi
log "Latest release: $LATEST_RELEASE"
}
download_and_install() {
log "Downloading network-cli..."
download_and_install_cli() {
BINARY_NAME="debros-network_${LATEST_RELEASE#v}_linux_${GITHUB_ARCH}.tar.gz"
DOWNLOAD_URL="$GITHUB_REPO/releases/download/$LATEST_RELEASE/$BINARY_NAME"
# Construct download URL
DOWNLOAD_URL="https://github.com/$GITHUB_REPO/releases/download/$LATEST_RELEASE/debros-network_${LATEST_RELEASE#v}_linux_${GITHUB_ARCH}.tar.gz"
log "Downloading dbn from GitHub releases..."
log "URL: https://github.com/$DOWNLOAD_URL"
# Create temporary directory
TEMP_DIR=$(mktemp -d)
trap "rm -rf $TEMP_DIR" EXIT
# Clean up any stale binaries
rm -f /tmp/network-cli /tmp/dbn.tar.gz "$INSTALL_DIR/dbn"
# Download
log "Downloading from: $DOWNLOAD_URL"
if ! curl -fsSL -o "$TEMP_DIR/network-cli.tar.gz" "$DOWNLOAD_URL"; then
error "Failed to download network-cli"
if ! curl -fsSL -o /tmp/dbn.tar.gz "https://github.com/$DOWNLOAD_URL"; then
error "Failed to download dbn"
exit 1
fi
# Extract
log "Extracting network-cli..."
cd "$TEMP_DIR"
tar xzf network-cli.tar.gz
# Install
log "Installing to $INSTALL_DIR..."
sudo cp network-cli "$INSTALL_DIR/"
sudo chmod +x "$INSTALL_DIR/network-cli"
success "network-cli installed successfully"
}
verify_installation() {
if command -v network-cli &>/dev/null; then
INSTALLED_VERSION=$(network-cli version 2>/dev/null || echo "unknown")
success "network-cli is ready: $INSTALLED_VERSION"
return 0
else
error "network-cli not found in PATH"
return 1
fi
}
install_anon() {
echo -e ""
echo -e "${BLUE}========================================${NOCOLOR}"
echo -e "${GREEN}Step 1.5: Install Anyone Relay (Anon)${NOCOLOR}"
echo -e "${BLUE}========================================${NOCOLOR}"
echo -e ""
log "Installing Anyone relay for anonymous networking..."
# Check if anon is already installed
if command -v anon &>/dev/null; then
success "Anon already installed"
configure_anon_logs
configure_firewall_for_anon
return 0
fi
# Install via APT (official method from docs.anyone.io)
log "Adding Anyone APT repository..."
# Add GPG key
if ! curl -fsSL https://deb.anyone.io/gpg.key | sudo gpg --dearmor -o /usr/share/keyrings/anyone-archive-keyring.gpg 2>/dev/null; then
warning "Failed to add Anyone GPG key"
log "You can manually install later with:"
log " curl -fsSL https://deb.anyone.io/gpg.key | sudo gpg --dearmor -o /usr/share/keyrings/anyone-archive-keyring.gpg"
log " echo 'deb [signed-by=/usr/share/keyrings/anyone-archive-keyring.gpg] https://deb.anyone.io/ anyone main' | sudo tee /etc/apt/sources.list.d/anyone.list"
log " sudo apt update && sudo apt install -y anon"
return 1
fi
# Add repository
echo "deb [signed-by=/usr/share/keyrings/anyone-archive-keyring.gpg] https://deb.anyone.io/ anyone main" | sudo tee /etc/apt/sources.list.d/anyone.list >/dev/null
# Preseed terms acceptance to avoid interactive prompt
log "Pre-accepting Anon terms and conditions..."
# Try multiple debconf question formats
echo "anon anon/terms boolean true" | sudo debconf-set-selections
echo "anon anon/terms seen true" | sudo debconf-set-selections
# Also try with select/string format
echo "anon anon/terms select true" | sudo debconf-set-selections || true
# Query debconf to verify the question exists and set it properly
# Some packages use different question formats
sudo debconf-get-selections | grep -i anon || true
# Create anonrc directory and file with AgreeToTerms before installation
# This ensures terms are accepted even if the post-install script checks the file
sudo mkdir -p /etc/anon
if [ ! -f /etc/anon/anonrc ]; then
echo "AgreeToTerms 1" | sudo tee /etc/anon/anonrc >/dev/null
fi
# Also create a terms-agreement file if Anon checks for it
# Check multiple possible locations where Anon might look for terms acceptance
sudo mkdir -p /var/lib/anon
echo "agreed" | sudo tee /var/lib/anon/terms-agreement >/dev/null 2>&1 || true
sudo mkdir -p /usr/share/anon
echo "agreed" | sudo tee /usr/share/anon/terms-agreement >/dev/null 2>&1 || true
# Also create near the GPG keyring directory (as the user suggested)
sudo mkdir -p /usr/share/keyrings/anon
echo "agreed" | sudo tee /usr/share/keyrings/anon/terms-agreement >/dev/null 2>&1 || true
# Create in the keyring directory itself as a marker file
echo "agreed" | sudo tee /usr/share/keyrings/anyone-terms-agreed >/dev/null 2>&1 || true
# Update and install with non-interactive frontend
log "Installing Anon package..."
sudo apt update -qq
# Use DEBIAN_FRONTEND=noninteractive and set debconf values directly via apt-get options
# This is more reliable than just debconf-set-selections
if ! sudo DEBIAN_FRONTEND=noninteractive \
apt-get install -y \
-o Dpkg::Options::="--force-confdef" \
-o Dpkg::Options::="--force-confold" \
anon; then
warning "Anon installation failed"
return 1
fi
# Verify installation
if ! command -v anon &>/dev/null; then
warning "Anon installation may have failed"
return 1
fi
success "Anon installed successfully"
# Configure with sensible defaults
configure_anon_defaults
# Configure log directory
configure_anon_logs
# Configure firewall if present
configure_firewall_for_anon
# Enable and start service
log "Enabling Anon service..."
sudo systemctl enable anon 2>/dev/null || true
sudo systemctl start anon 2>/dev/null || true
if systemctl is-active --quiet anon; then
success "Anon service is running"
else
warning "Anon service may not be running. Check: sudo systemctl status anon"
fi
return 0
}
configure_anon_defaults() {
log "Configuring Anon with default settings..."
HOSTNAME=$(hostname -s 2>/dev/null || echo "debros-node")
# Create or update anonrc with our defaults
if [ -f /etc/anon/anonrc ]; then
# Backup existing config
sudo cp /etc/anon/anonrc /etc/anon/anonrc.bak 2>/dev/null || true
# Update key settings if not already set
if ! grep -q "^Nickname" /etc/anon/anonrc; then
echo "Nickname ${HOSTNAME}" | sudo tee -a /etc/anon/anonrc >/dev/null
fi
if ! grep -q "^ControlPort" /etc/anon/anonrc; then
echo "ControlPort 9051" | sudo tee -a /etc/anon/anonrc >/dev/null
fi
if ! grep -q "^SocksPort" /etc/anon/anonrc; then
echo "SocksPort 9050" | sudo tee -a /etc/anon/anonrc >/dev/null
fi
# Auto-accept terms in config file
if ! grep -q "^AgreeToTerms" /etc/anon/anonrc; then
echo "AgreeToTerms 1" | sudo tee -a /etc/anon/anonrc >/dev/null
fi
log " Nickname: ${HOSTNAME}"
log " ORPort: 9001 (default)"
log " ControlPort: 9051"
log " SOCKSPort: 9050"
log " AgreeToTerms: 1 (auto-accepted)"
fi
}
configure_anon_logs() {
log "Configuring Anon logs..."
# Create log directory
sudo mkdir -p /home/debros/.debros/logs/anon
# Change ownership to debian-anon (the user anon runs as)
sudo chown -R debian-anon:debian-anon /home/debros/.debros/logs/anon 2>/dev/null || true
# Update anonrc to point logs to our directory
if [ -f /etc/anon/anonrc ]; then
sudo sed -i.bak 's|Log notice file.*|Log notice file /home/debros/.debros/logs/anon/notices.log|g' /etc/anon/anonrc
success "Anon logs configured to /home/debros/.debros/logs/anon"
fi
}
configure_firewall_for_anon() {
log "Checking firewall configuration..."
# Check for UFW
if command -v ufw &>/dev/null && sudo ufw status | grep -q "Status: active"; then
log "UFW detected and active, adding Anon ports..."
sudo ufw allow 9001/tcp comment 'Anon ORPort' 2>/dev/null || true
sudo ufw allow 9051/tcp comment 'Anon ControlPort' 2>/dev/null || true
success "UFW rules added for Anon"
return 0
fi
# Check for firewalld
if command -v firewall-cmd &>/dev/null && sudo firewall-cmd --state 2>/dev/null | grep -q "running"; then
log "firewalld detected and active, adding Anon ports..."
sudo firewall-cmd --permanent --add-port=9001/tcp 2>/dev/null || true
sudo firewall-cmd --permanent --add-port=9051/tcp 2>/dev/null || true
sudo firewall-cmd --reload 2>/dev/null || true
success "firewalld rules added for Anon"
return 0
fi
# Check for iptables
if command -v iptables &>/dev/null; then
# Check if iptables has any rules (indicating it's in use)
if sudo iptables -L -n | grep -q "Chain INPUT"; then
log "iptables detected, adding Anon ports..."
sudo iptables -A INPUT -p tcp --dport 9001 -j ACCEPT -m comment --comment "Anon ORPort" 2>/dev/null || true
sudo iptables -A INPUT -p tcp --dport 9051 -j ACCEPT -m comment --comment "Anon ControlPort" 2>/dev/null || true
# Try to save rules if iptables-persistent is available
if command -v netfilter-persistent &>/dev/null; then
sudo netfilter-persistent save 2>/dev/null || true
elif command -v iptables-save &>/dev/null; then
sudo iptables-save | sudo tee /etc/iptables/rules.v4 >/dev/null 2>&1 || true
fi
success "iptables rules added for Anon"
return 0
fi
fi
log "No active firewall detected, skipping firewall configuration"
}
run_setup() {
echo -e ""
echo -e "${BLUE}========================================${NOCOLOR}"
echo -e "${GREEN}Step 2: Run Interactive Setup${NOCOLOR}"
echo -e "${BLUE}========================================${NOCOLOR}"
echo -e ""
log "The setup command will:"
log " • Create system user and directories"
log " • Install dependencies (RQLite, etc.)"
log " • Build DeBros binaries"
log " • Configure network settings"
log " • Create and start systemd services"
echo -e ""
echo -e "${YELLOW}Ready to run setup? This will prompt for configuration details.${NOCOLOR}"
echo -n "Continue? (yes/no): "
read -r CONTINUE_SETUP
if [[ "$CONTINUE_SETUP" != "yes" && "$CONTINUE_SETUP" != "y" ]]; then
echo -e ""
success "network-cli installed successfully!"
echo -e ""
echo -e "${CYAN}To complete setup later, run:${NOCOLOR}"
echo -e "${GREEN} sudo network-cli setup${NOCOLOR}"
echo -e ""
return 0
fi
echo -e ""
log "Running setup (requires sudo)..."
sudo network-cli setup
}
show_completion() {
echo -e ""
echo -e "${BLUE}========================================================================${NOCOLOR}"
success "DeBros Network installation complete!"
echo -e "${BLUE}========================================================================${NOCOLOR}"
echo -e ""
echo -e "${GREEN}Next Steps:${NOCOLOR}"
echo -e " • Verify installation: ${CYAN}curl http://localhost:6001/health${NOCOLOR}"
echo -e " • Check services: ${CYAN}sudo network-cli service status all${NOCOLOR}"
echo -e " • View logs: ${CYAN}sudo network-cli service logs node --follow${NOCOLOR}"
echo -e " • Authenticate: ${CYAN}network-cli auth login${NOCOLOR}"
echo -e ""
echo -e "${CYAN}Environment Management:${NOCOLOR}"
echo -e " • Switch to devnet: ${CYAN}network-cli devnet enable${NOCOLOR}"
echo -e " • Switch to testnet: ${CYAN}network-cli testnet enable${NOCOLOR}"
echo -e " • Show environment: ${CYAN}network-cli env current${NOCOLOR}"
echo -e ""
echo -e "${CYAN}Anyone Relay (Anon):${NOCOLOR}"
echo -e " • Check Anon status: ${CYAN}sudo systemctl status anon${NOCOLOR}"
echo -e " • View Anon logs: ${CYAN}sudo tail -f /home/debros/.debros/logs/anon/notices.log${NOCOLOR}"
echo -e " • Proxy endpoint: ${CYAN}POST http://localhost:6001/v1/proxy/anon${NOCOLOR}"
echo -e ""
echo -e "${CYAN}Documentation: https://docs.debros.io${NOCOLOR}"
echo -e ""
}
main() {
display_banner
echo -e ""
log "Starting DeBros Network installation..."
echo -e ""
detect_os
check_architecture
check_dependencies
echo -e ""
echo -e "${BLUE}========================================${NOCOLOR}"
echo -e "${GREEN}Step 1: Install network-cli${NOCOLOR}"
echo -e "${BLUE}========================================${NOCOLOR}"
echo -e ""
get_latest_release
download_and_install
# Verify installation
if ! verify_installation; then
# Verify the download was successful
if [ ! -f /tmp/dbn.tar.gz ]; then
error "Download file not found"
exit 1
fi
# Install Anon (optional but recommended)
install_anon || warning "Anon installation skipped or failed"
log "Extracting dbn..."
# Extract to /tmp
tar -xzf /tmp/dbn.tar.gz -C /tmp/
# Run setup
run_setup
# Check for extracted binary (could be named network-cli or dbn)
EXTRACTED_BINARY=""
if [ -f /tmp/network-cli ]; then
EXTRACTED_BINARY="/tmp/network-cli"
elif [ -f /tmp/dbn ]; then
EXTRACTED_BINARY="/tmp/dbn"
else
error "Failed to extract binary (neither network-cli nor dbn found)"
ls -la /tmp/ | grep -E "(network|cli|dbn)"
exit 1
fi
# Show completion message
show_completion
chmod +x "$EXTRACTED_BINARY"
log "Installing dbn to $INSTALL_DIR..."
# Always rename to dbn during installation
mv "$EXTRACTED_BINARY" "$INSTALL_DIR/dbn"
# Sanity check: verify the installed binary is functional and reports correct version
if ! "$INSTALL_DIR/dbn" version &>/dev/null; then
error "Installed dbn failed sanity check (version command failed)"
rm -f "$INSTALL_DIR/dbn"
exit 1
fi
# Clean up
rm -f /tmp/dbn.tar.gz
success "dbn installed successfully"
}
main "$@"
# Main flow
display_banner
# Check prerequisites
check_root
detect_os
check_architecture
# Download and install
get_latest_release
download_and_install_cli
# Show next steps
echo ""
echo -e "${GREEN}Installation complete!${NOCOLOR}"
echo ""
echo -e "${CYAN}Next, run the production setup:${NOCOLOR}"
echo ""
echo "Bootstrap node (first node, main branch):"
echo -e " ${BLUE}sudo dbn prod install --bootstrap${NOCOLOR}"
echo ""
echo "Bootstrap node (nightly branch):"
echo -e " ${BLUE}sudo dbn prod install --bootstrap --branch nightly${NOCOLOR}"
echo ""
echo "Secondary node (join existing cluster):"
echo -e " ${BLUE}sudo dbn prod install --vps-ip <bootstrap_ip> --peers <multiaddr>${NOCOLOR}"
echo ""
echo "With HTTPS/domain:"
echo -e " ${BLUE}sudo dbn prod install --bootstrap --domain example.com${NOCOLOR}"
echo ""
echo "For more help:"
echo -e " ${BLUE}dbn prod --help${NOCOLOR}"
echo ""

379
scripts/test-cluster-health.sh Executable file
View File

@ -0,0 +1,379 @@
#!/bin/bash
# Production Cluster Health Check Script
# Tests RQLite, IPFS, and IPFS Cluster connectivity and replication
# Note: We don't use 'set -e' here because we want to continue testing even if individual checks fail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Node IPs - Update these if needed
BOOTSTRAP="${BOOTSTRAP:-51.83.128.181}"
NODE1="${NODE1:-57.128.223.92}"
NODE2="${NODE2:-185.185.83.89}"
ALL_NODES=($BOOTSTRAP $NODE1 $NODE2)
# Counters
PASSED=0
FAILED=0
WARNINGS=0
# Helper functions
print_header() {
echo ""
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}$1${NC}"
echo -e "${BLUE}========================================${NC}"
}
print_test() {
echo -e "${YELLOW}$1${NC}"
}
print_pass() {
echo -e "${GREEN}$1${NC}"
PASSED=$((PASSED + 1))
}
print_fail() {
echo -e "${RED}$1${NC}"
FAILED=$((FAILED + 1))
}
print_warn() {
echo -e "${YELLOW}$1${NC}"
WARNINGS=$((WARNINGS + 1))
}
print_info() {
echo -e " $1"
}
# Test functions
test_rqlite_status() {
print_header "1. RQLITE CLUSTER STATUS"
local leader_found=false
local follower_count=0
local commit_indices=()
for i in "${!ALL_NODES[@]}"; do
local node="${ALL_NODES[$i]}"
print_test "Testing RQLite on $node"
if ! response=$(curl -s --max-time 5 http://$node:5001/status 2>/dev/null); then
print_fail "Cannot connect to RQLite on $node:5001"
continue
fi
local state=$(echo "$response" | jq -r '.store.raft.state // "unknown"')
local num_peers=$(echo "$response" | jq -r '.store.raft.num_peers // 0')
local commit_index=$(echo "$response" | jq -r '.store.raft.commit_index // 0')
local last_contact=$(echo "$response" | jq -r '.store.raft.last_contact // "N/A"')
local config=$(echo "$response" | jq -r '.store.raft.latest_configuration // "[]"')
local node_count=$(echo "$config" | grep -o "Address" | wc -l | tr -d ' ')
commit_indices+=($commit_index)
print_info "State: $state | Peers: $num_peers | Commit Index: $commit_index | Cluster Nodes: $node_count"
# Check state
if [ "$state" = "Leader" ]; then
leader_found=true
print_pass "Node $node is the Leader"
elif [ "$state" = "Follower" ]; then
follower_count=$((follower_count + 1))
# Check last contact
if [ "$last_contact" != "N/A" ] && [ "$last_contact" != "0" ]; then
print_pass "Node $node is a Follower (last contact: $last_contact)"
else
print_warn "Node $node is Follower but last_contact is $last_contact"
fi
else
print_fail "Node $node has unexpected state: $state"
fi
# Check peer count
if [ "$num_peers" = "2" ]; then
print_pass "Node $node has correct peer count: 2"
else
print_fail "Node $node has incorrect peer count: $num_peers (expected 2)"
fi
# Check cluster configuration
if [ "$node_count" = "3" ]; then
print_pass "Node $node sees all 3 cluster members"
else
print_fail "Node $node only sees $node_count cluster members (expected 3)"
fi
echo ""
done
# Check for exactly 1 leader
if [ "$leader_found" = true ] && [ "$follower_count" = "2" ]; then
print_pass "Cluster has 1 Leader and 2 Followers ✓"
else
print_fail "Invalid cluster state (Leader found: $leader_found, Followers: $follower_count)"
fi
# Check commit index sync
if [ ${#commit_indices[@]} -eq 3 ]; then
local first="${commit_indices[0]}"
local all_same=true
for idx in "${commit_indices[@]}"; do
if [ "$idx" != "$first" ]; then
all_same=false
break
fi
done
if [ "$all_same" = true ]; then
print_pass "All nodes have synced commit index: $first"
else
print_warn "Commit indices differ: ${commit_indices[*]} (might be normal if writes are happening)"
fi
fi
}
test_rqlite_replication() {
print_header "2. RQLITE REPLICATION TEST"
print_test "Creating test table and inserting data on leader ($BOOTSTRAP)"
# Create table
if ! response=$(curl -s --max-time 5 -XPOST "http://$BOOTSTRAP:5001/db/execute" \
-H "Content-Type: application/json" \
-d '[["CREATE TABLE IF NOT EXISTS test_cluster_health (id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp TEXT, node TEXT, value TEXT)"]]' 2>/dev/null); then
print_fail "Failed to create table"
return
fi
if echo "$response" | jq -e '.results[0].error' >/dev/null 2>&1; then
local error=$(echo "$response" | jq -r '.results[0].error')
if [[ "$error" != "table test_cluster_health already exists" ]]; then
print_fail "Table creation error: $error"
return
fi
fi
print_pass "Table exists"
# Insert test data
local test_value="test_$(date +%s)"
if ! response=$(curl -s --max-time 5 -XPOST "http://$BOOTSTRAP:5001/db/execute" \
-H "Content-Type: application/json" \
-d "[
[\"INSERT INTO test_cluster_health (timestamp, node, value) VALUES (datetime('now'), 'bootstrap', '$test_value')\"]
]" 2>/dev/null); then
print_fail "Failed to insert data"
return
fi
if echo "$response" | jq -e '.results[0].error' >/dev/null 2>&1; then
local error=$(echo "$response" | jq -r '.results[0].error')
print_fail "Insert error: $error"
return
fi
print_pass "Data inserted: $test_value"
# Wait for replication
print_info "Waiting 2 seconds for replication..."
sleep 2
# Query from all nodes
for node in "${ALL_NODES[@]}"; do
print_test "Reading from $node"
if ! response=$(curl -s --max-time 5 -XPOST "http://$node:5001/db/query?level=weak" \
-H "Content-Type: application/json" \
-d "[\"SELECT * FROM test_cluster_health WHERE value = '$test_value' LIMIT 1\"]" 2>/dev/null); then
print_fail "Failed to query from $node"
continue
fi
if echo "$response" | jq -e '.results[0].error' >/dev/null 2>&1; then
local error=$(echo "$response" | jq -r '.results[0].error')
print_fail "Query error on $node: $error"
continue
fi
local row_count=$(echo "$response" | jq -r '.results[0].values | length // 0')
if [ "$row_count" = "1" ]; then
local retrieved_value=$(echo "$response" | jq -r '.results[0].values[0][3] // ""')
if [ "$retrieved_value" = "$test_value" ]; then
print_pass "Data replicated correctly to $node"
else
print_fail "Data mismatch on $node (got: $retrieved_value, expected: $test_value)"
fi
else
print_fail "Expected 1 row from $node, got $row_count"
fi
done
}
test_ipfs_status() {
print_header "3. IPFS DAEMON STATUS"
for node in "${ALL_NODES[@]}"; do
print_test "Testing IPFS on $node"
if ! response=$(curl -s --max-time 5 -X POST http://$node:4501/api/v0/id 2>/dev/null); then
print_fail "Cannot connect to IPFS on $node:4501"
continue
fi
local peer_id=$(echo "$response" | jq -r '.ID // "unknown"')
local addr_count=$(echo "$response" | jq -r '.Addresses | length // 0')
local agent=$(echo "$response" | jq -r '.AgentVersion // "unknown"')
if [ "$peer_id" != "unknown" ]; then
print_pass "IPFS running on $node (ID: ${peer_id:0:12}...)"
print_info "Agent: $agent | Addresses: $addr_count"
else
print_fail "IPFS not responding correctly on $node"
fi
done
}
test_ipfs_swarm() {
print_header "4. IPFS SWARM CONNECTIVITY"
for node in "${ALL_NODES[@]}"; do
print_test "Checking IPFS swarm peers on $node"
if ! response=$(curl -s --max-time 5 -X POST http://$node:4501/api/v0/swarm/peers 2>/dev/null); then
print_fail "Failed to get swarm peers from $node"
continue
fi
local peer_count=$(echo "$response" | jq -r '.Peers | length // 0')
if [ "$peer_count" = "2" ]; then
print_pass "Node $node connected to 2 IPFS peers"
elif [ "$peer_count" -gt "0" ]; then
print_warn "Node $node connected to $peer_count IPFS peers (expected 2)"
else
print_fail "Node $node has no IPFS swarm peers"
fi
done
}
test_ipfs_cluster_status() {
print_header "5. IPFS CLUSTER STATUS"
for node in "${ALL_NODES[@]}"; do
print_test "Testing IPFS Cluster on $node"
if ! response=$(curl -s --max-time 5 http://$node:9094/id 2>/dev/null); then
print_fail "Cannot connect to IPFS Cluster on $node:9094"
continue
fi
local cluster_id=$(echo "$response" | jq -r '.id // "unknown"')
local cluster_peers=$(echo "$response" | jq -r '.cluster_peers | length // 0')
local version=$(echo "$response" | jq -r '.version // "unknown"')
if [ "$cluster_id" != "unknown" ]; then
print_pass "IPFS Cluster running on $node (ID: ${cluster_id:0:12}...)"
print_info "Version: $version | Cluster Peers: $cluster_peers"
if [ "$cluster_peers" = "3" ]; then
print_pass "Node $node sees all 3 cluster peers"
else
print_warn "Node $node sees $cluster_peers cluster peers (expected 3)"
fi
else
print_fail "IPFS Cluster not responding correctly on $node"
fi
done
}
test_ipfs_cluster_pins() {
print_header "6. IPFS CLUSTER PIN CONSISTENCY"
local pin_counts=()
for node in "${ALL_NODES[@]}"; do
print_test "Checking pins on $node"
if ! response=$(curl -s --max-time 5 http://$node:9094/pins 2>/dev/null); then
print_fail "Failed to get pins from $node"
pin_counts+=(0)
continue
fi
local pin_count=$(echo "$response" | jq -r 'length // 0')
pin_counts+=($pin_count)
print_pass "Node $node has $pin_count pins"
done
# Check if all nodes have same pin count
if [ ${#pin_counts[@]} -eq 3 ]; then
local first="${pin_counts[0]}"
local all_same=true
for count in "${pin_counts[@]}"; do
if [ "$count" != "$first" ]; then
all_same=false
break
fi
done
if [ "$all_same" = true ]; then
print_pass "All nodes have consistent pin count: $first"
else
print_warn "Pin counts differ: ${pin_counts[*]} (might be syncing)"
fi
fi
}
print_summary() {
print_header "TEST SUMMARY"
echo ""
echo -e "${GREEN}Passed: $PASSED${NC}"
echo -e "${YELLOW}Warnings: $WARNINGS${NC}"
echo -e "${RED}Failed: $FAILED${NC}"
echo ""
if [ $FAILED -eq 0 ]; then
echo -e "${GREEN}🎉 All critical tests passed! Cluster is healthy.${NC}"
exit 0
elif [ $FAILED -le 2 ]; then
echo -e "${YELLOW}⚠️ Some tests failed. Review the output above.${NC}"
exit 1
else
echo -e "${RED}❌ Multiple failures detected. Cluster needs attention.${NC}"
exit 2
fi
}
# Main execution
main() {
echo ""
echo -e "${BLUE}╔════════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ DEBROS Production Cluster Health Check ║${NC}"
echo -e "${BLUE}╚════════════════════════════════════════════╝${NC}"
echo ""
echo "Testing cluster:"
echo " Bootstrap: $BOOTSTRAP"
echo " Node 1: $NODE1"
echo " Node 2: $NODE2"
test_rqlite_status
test_rqlite_replication
test_ipfs_status
test_ipfs_swarm
test_ipfs_cluster_status
test_ipfs_cluster_pins
print_summary
}
# Run main
main

Some files were not shown because too many files have changed in this diff Show More