diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 74f323d..d9e2bad 100644 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -30,6 +30,15 @@ if [ -z "$OTHER_FILES" ]; then exit 0 fi +# Check for skip flag +# To skip changelog generation, set SKIP_CHANGELOG=1 before committing: +# SKIP_CHANGELOG=1 git commit -m "your message" +# SKIP_CHANGELOG=1 git commit +if [ "$SKIP_CHANGELOG" = "1" ] || [ "$SKIP_CHANGELOG" = "true" ]; then + echo -e "${YELLOW}Skipping changelog update (SKIP_CHANGELOG is set)${NOCOLOR}" + exit 0 +fi + # Update changelog before commit if [ -f "$CHANGELOG_SCRIPT" ]; then echo -e "\n${CYAN}Updating changelog...${NOCOLOR}" diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 5a1f0ff..6cebf4a 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -1,6 +1,6 @@ # GoReleaser Configuration for DeBros Network -# Builds and releases the network-cli binary for multiple platforms -# Other binaries (node, gateway, identity) are installed via: network-cli setup +# Builds and releases the dbn binary for multiple platforms +# Other binaries (node, gateway, identity) are installed via: dbn setup project_name: debros-network @@ -8,10 +8,10 @@ env: - GO111MODULE=on builds: - # network-cli binary - only build the CLI - - id: network-cli + # dbn binary - only build the CLI + - id: dbn main: ./cmd/cli - binary: network-cli + binary: dbn goos: - linux - darwin @@ -23,10 +23,10 @@ builds: - -X main.version={{.Version}} - -X main.commit={{.ShortCommit}} - -X main.date={{.Date}} - mod_timestamp: '{{ .CommitTimestamp }}' + mod_timestamp: "{{ .CommitTimestamp }}" archives: - # Tar.gz archives for network-cli + # Tar.gz archives for dbn - id: binaries format: tar.gz name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" @@ -50,10 +50,10 @@ changelog: abbrev: -1 filters: exclude: - - '^docs:' - - '^test:' - - '^chore:' - - '^ci:' + - "^docs:" + - "^test:" + - "^chore:" + - "^ci:" - Merge pull request - Merge branch diff --git a/.zed/debug.json b/.zed/debug.json index 6418b00..4119f7a 100644 --- a/.zed/debug.json +++ b/.zed/debug.json @@ -11,7 +11,7 @@ "program": "./cmd/gateway", "env": { "GATEWAY_ADDR": ":6001", - "GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", + "GATEWAY_BOOTSTRAP_PEERS": "/ip4/localhost/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", "GATEWAY_NAMESPACE": "default", "GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default" } @@ -36,7 +36,7 @@ "program": "./cmd/gateway", "env": { "GATEWAY_ADDR": ":6001", - "GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", + "GATEWAY_BOOTSTRAP_PEERS": "/ip4/localhost/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", "GATEWAY_NAMESPACE": "default", "GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 62ed13e..0b57bb7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,11 +13,756 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed -## [0.53.18] - 2025-11-03 +## [0.69.13] - 2025-11-14 ### Added \n ### Changed +- The Gateway service now waits for the Olric cache service to start before attempting initialization. +- Improved robustness of Olric cache client initialization with retry logic and exponential backoff. + +### Deprecated + +### Removed + +### Fixed +- Corrected the default path logic for 'gateway.yaml' to prioritize the production data directory while maintaining fallback to legacy paths. + +## [0.69.12] - 2025-11-14 + +### Added +- The `prod install` command now requires the `--cluster-secret` flag for all non-bootstrap nodes to ensure correct IPFS Cluster configuration. + +### Changed +- Updated IPFS configuration to bind API and Gateway addresses to `0.0.0.0` instead of `127.0.0.1` for better network accessibility. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.69.11] - 2025-11-13 + +### Added +- Added a new comprehensive shell script (`scripts/test-cluster-health.sh`) for checking the health and replication status of RQLite, IPFS, and IPFS Cluster across production environments. + +### Changed +- Improved RQLite cluster discovery logic to ensure `peers.json` is correctly generated and includes the local node, which is crucial for reliable cluster recovery. +- Refactored logging across discovery and RQLite components for cleaner, more concise output, especially for routine operations. +- Updated the installation and upgrade process to correctly configure IPFS Cluster bootstrap peers using the node's public IP, improving cluster formation reliability. + +### Deprecated + +### Removed + +### Fixed +- Fixed an issue where RQLite recovery operations (like clearing Raft state) did not correctly force the regeneration of `peers.json`, preventing successful cluster rejoin. +- Corrected the port calculation logic for IPFS Cluster to ensure the correct LibP2P listen port (9098) is used for bootstrap peer addressing. + +## [0.69.10] - 2025-11-13 + +### Added +- Automatic health monitoring and recovery for RQLite cluster split-brain scenarios. +- RQLite now waits indefinitely for the minimum cluster size to be met before starting, preventing single-node cluster formation. + +### Changed +- Updated default IPFS swarm port from 4001 to 4101 to avoid conflicts with LibP2P. + +### Deprecated + +### Removed + +### Fixed +- Resolved an issue where RQLite could start as a single-node cluster if peer discovery was slow, by enforcing minimum cluster size before startup. +- Improved cluster recovery logic to correctly use `bootstrap-expect` for new clusters and ensure proper process restart during recovery. + +## [0.69.9] - 2025-11-12 + +### Added +- Added automatic recovery logic for RQLite (database) nodes stuck in a configuration mismatch, which attempts to clear stale Raft state if peers have more recent data. +- Added logic to discover IPFS Cluster peers directly from the LibP2P host's peerstore, improving peer discovery before the Cluster API is fully operational. + +### Changed +- Improved the IPFS Cluster configuration update process to prioritize writing to the `peerstore` file before updating `service.json`, ensuring the source of truth is updated first. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.69.8] - 2025-11-12 + +### Added +- Improved `dbn prod start` to automatically unmask and re-enable services if they were previously masked or disabled. +- Added automatic discovery and configuration of all IPFS Cluster peers during runtime to improve cluster connectivity. + +### Changed +- Enhanced `dbn prod start` and `dbn prod stop` reliability by adding service state resets, retries, and ensuring services are disabled when stopped. +- Filtered peer exchange addresses in LibP2P discovery to only include the standard LibP2P port (4001), preventing exposure of internal service ports. + +### Deprecated + +### Removed + +### Fixed +- Improved IPFS Cluster bootstrap configuration repair logic to automatically infer and update bootstrap peer addresses if the bootstrap node is available. + +## [0.69.7] - 2025-11-12 + +### Added +\n +### Changed +- Improved logic for determining Olric server addresses during configuration generation, especially for bootstrap and non-bootstrap nodes. +- Enhanced IPFS cluster configuration to correctly handle IPv6 addresses when updating bootstrap peers. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.69.6] - 2025-11-12 + +### Added +- Improved production service health checks and port availability validation during install, upgrade, start, and restart commands. +- Added service aliases (node, ipfs, cluster, gateway, olric) to `dbn prod logs` command for easier log viewing. + +### Changed +- Updated node configuration logic to correctly advertise public IP addresses in multiaddrs (for P2P discovery) and RQLite addresses, improving connectivity for nodes behind NAT/firewalls. +- Enhanced `dbn prod install` and `dbn prod upgrade` to automatically detect and preserve existing VPS IP, domain, and cluster join information. +- Improved RQLite cluster discovery to automatically replace localhost/loopback addresses with the actual public IP when exchanging metadata between peers. +- Updated `dbn prod install` to require `--vps-ip` for all node types (bootstrap and regular) for proper network configuration. +- Improved error handling and robustness in the installation script when fetching the latest release from GitHub. + +### Deprecated + +### Removed + +### Fixed +- Fixed an issue where the RQLite process would wait indefinitely for a join target; now uses a 5-minute timeout. +- Corrected the location of the gateway configuration file reference in the README. + +## [0.69.5] - 2025-11-11 + +### Added +\n +### Changed +- Moved the default location for `gateway.yaml` configuration file from `configs/` to the new `data/` directory for better organization. +- Updated configuration path logic to search for `gateway.yaml` in the new `data/` directory first. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.69.4] - 2025-11-11 + +### Added +\n +### Changed +- RQLite database management is now integrated directly into the main node process, removing separate RQLite systemd services (debros-rqlite-*). +- Improved log file provisioning to only create necessary log files based on the node type being installed (bootstrap or node). + +### Deprecated + +### Removed + +### Fixed +\n +## [0.69.3] - 2025-11-11 + +### Added +- Added `--ignore-resource-checks` flag to the install command to skip disk, RAM, and CPU prerequisite validation. + +### Changed +\n +### Deprecated + +### Removed + +### Fixed +\n +## [0.69.2] - 2025-11-11 + +### Added +- Added `--no-pull` flag to `dbn prod upgrade` to skip git repository updates and use existing source code. + +### Changed +- Removed deprecated environment management commands (`env`, `devnet`, `testnet`, `local`). +- Removed deprecated network commands (`health`, `peers`, `status`, `peer-id`, `connect`, `query`, `pubsub`) from the main CLI interface. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.69.1] - 2025-11-11 + +### Added +- Added automatic service stopping before binary upgrades during the `prod upgrade` process to ensure a clean update. +- Added logic to preserve existing configuration settings (like `bootstrap_peers`, `domain`, and `rqlite_join_address`) when regenerating configurations during `prod upgrade`. + +### Changed +- Improved the `prod upgrade` process to be more robust by preserving critical configuration details and gracefully stopping services. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.69.0] - 2025-11-11 + +### Added +- Added comprehensive documentation for setting up HTTPS using a domain name, including configuration steps for both installation and existing setups. +- Added the `--force` flag to the `install` command for reconfiguring all settings. +- Added new log targets (`ipfs-cluster`, `rqlite`, `olric`) and improved the `dbn prod logs` command documentation. + +### Changed +- Improved the IPFS Cluster configuration logic to ensure the cluster secret and IPFS API port are correctly synchronized during updates. +- Refined the directory structure creation process to ensure node-specific data directories are created only when initializing services. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.68.1] - 2025-11-11 + +### Added +- Pre-create log files during setup to ensure correct permissions for systemd logging. + +### Changed +- Improved binary installation process to handle copying files individually, preventing potential shell wildcard issues. +- Enhanced ownership fixing logic during installation to ensure all files created by root (especially during service initialization) are correctly owned by the 'debros' user. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.68.0] - 2025-11-11 + +### Added +- Added comprehensive documentation for production deployment, including installation, upgrade, service management, and troubleshooting. +- Added new CLI commands (`dbn prod start`, `dbn prod stop`, `dbn prod restart`) for convenient management of production systemd services. + +### Changed +- Updated IPFS configuration during production installation to use port 4501 for the API (to avoid conflicts with RQLite on port 5001) and port 8080 for the Gateway. + +### Deprecated + +### Removed + +### Fixed +- Ensured that IPFS configuration automatically disables AutoConf when a private swarm key is present during installation and upgrade, preventing startup errors. + +## [0.67.7] - 2025-11-11 + +### Added +- Added support for specifying the Git branch (main or nightly) during `prod install` and `prod upgrade`. +- The chosen branch is now saved and automatically used for future upgrades unless explicitly overridden. + +### Changed +- Updated help messages and examples for production commands to include branch options. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.67.6] - 2025-11-11 + +### Added +\n +### Changed +- The binary installer now updates the source repository if it already exists, instead of only cloning it if missing. + +### Deprecated + +### Removed + +### Fixed +- Resolved an issue where disabling AutoConf in the IPFS repository could leave 'auto' placeholders in the config, causing startup errors. + +## [0.67.5] - 2025-11-11 + +### Added +- Added `--restart` option to `dbn prod upgrade` to automatically restart services after upgrade. +- The gateway now supports an optional `--config` flag to specify the configuration file path. + +### Changed +- Improved `dbn prod upgrade` process to better handle existing installations, including detecting node type and ensuring configurations are updated to the latest format. +- Configuration loading logic for `node` and `gateway` commands now correctly handles absolute paths passed via command line or systemd. + +### Deprecated + +### Removed + +### Fixed +- Fixed an issue during production upgrades where IPFS repositories in private swarms might fail to start due to `AutoConf` not being disabled. + +## [0.67.4] - 2025-11-11 + +### Added +\n +### Changed +- Improved configuration file loading logic to support absolute paths for config files. +- Updated IPFS Cluster initialization during setup to run `ipfs-cluster-service init` and automatically configure the cluster secret. +- IPFS repositories initialized with a private swarm key will now automatically disable AutoConf. + +### Deprecated + +### Removed + +### Fixed +- Fixed configuration path resolution to correctly check for config files in both the legacy (`~/.debros/`) and production (`~/.debros/configs/`) directories. + +## [0.67.3] - 2025-11-11 + +### Added +\n +### Changed +- Improved reliability of IPFS (Kubo) installation by switching from a single install script to the official step-by-step download and extraction process. +- Updated IPFS (Kubo) installation to use version v0.38.2. +- Enhanced binary installation routines (RQLite, IPFS, Go) to ensure the installed binaries are immediately available in the current process's PATH. + +### Deprecated + +### Removed + +### Fixed +- Fixed potential installation failures for RQLite by adding error checking to the binary copy command. + +## [0.67.2] - 2025-11-11 + +### Added +- Added a new utility function to reliably resolve the full path of required external binaries (like ipfs, rqlited, etc.). + +### Changed +- Improved service initialization by validating the availability and path of all required external binaries before creating systemd service units. +- Updated systemd service generation logic to use the resolved, fully-qualified paths for external binaries instead of relying on hardcoded paths. + +### Deprecated + +### Removed + +### Fixed +- Changed IPFS initialization from a warning to a fatal error if the repo fails to initialize, ensuring setup stops on critical failures. + +## [0.67.1] - 2025-11-11 + +### Added +\n +### Changed +- Improved disk space check logic to correctly check the parent directory if the specified path does not exist. + +### Deprecated + +### Removed + +### Fixed +- Fixed an issue in the installation script where the extracted CLI binary might be named 'dbn' instead of 'network-cli', ensuring successful installation regardless of the extracted filename. + +## [0.67.0] - 2025-11-11 + +### Added +- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag. +- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining. + +### Changed +- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive. +- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses. + +### Deprecated + +### Removed + +### Fixed +- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`. + +## [0.67.0] - 2025-11-11 + +### Added +- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag. +- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining. + +### Changed +- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive. +- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses. + +### Deprecated + +### Removed + +### Fixed +- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`. + +## [0.66.1] - 2025-11-11 + +### Added +\n +### Changed +- Allow bootstrap nodes to optionally define a join address to synchronize with another bootstrap cluster. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.66.0] - 2025-11-11 + +### Added +- Pre-installation checks for minimum system resources (10GB disk space, 2GB RAM, 2 CPU cores) are now performed during setup. +- All systemd services (IPFS, RQLite, Olric, Node, Gateway) now log directly to dedicated files in the logs directory instead of using the system journal. + +### Changed +- Improved logging instructions in the setup completion message to reference the new dedicated log files. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.65.0] - 2025-11-11 + +### Added +- Expanded the local development environment (`dbn dev up`) from 3 nodes to 5 nodes (2 bootstraps and 3 regular nodes) for better testing of cluster resilience and quorum. +- Added a new `bootstrap2` node configuration and service to the development topology. + +### Changed +- Updated the `dbn dev up` command to configure and start all 5 nodes and associated services (IPFS, RQLite, IPFS Cluster). +- Modified RQLite and LibP2P health checks in the development environment to require a quorum of 3 out of 5 nodes. +- Refactored development environment configuration logic using a new `Topology` structure for easier management of node ports and addresses. + +### Deprecated + +### Removed + +### Fixed +- Ensured that secondary bootstrap nodes can correctly join the primary RQLite cluster in the development environment. + +## [0.64.1] - 2025-11-10 + +### Added +\n +### Changed +- Improved the accuracy of the Raft log index reporting by falling back to reading persisted snapshot metadata from disk if the running RQLite instance is not yet reachable or reports a zero index. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.64.0] - 2025-11-10 + +### Added +- Comprehensive End-to-End (E2E) test suite for Gateway API endpoints (Cache, RQLite, Storage, Network, Auth). +- New E2E tests for concurrent operations and TTL expiry in the distributed cache. +- New E2E tests for LibP2P peer connectivity and discovery. + +### Changed +- Improved Gateway E2E test configuration: automatically discovers Gateway URL and API Key from local `~/.debros` configuration files, removing the need for environment variables. +- The `/v1/network/peers` endpoint now returns a flattened list of multiaddresses for all connected peers. +- Improved robustness of Cache API handlers to correctly identify and return 404 (Not Found) errors when keys are missing, even when wrapped by underlying library errors. +- The RQLite transaction handler now supports the legacy `statements` array format in addition to the `ops` array format for easier use. +- The RQLite schema endpoint now returns tables under the `tables` key instead of `objects`. + +### Deprecated + +### Removed + +### Fixed +- Corrected IPFS Add operation to return the actual file size (byte count) instead of the DAG size in the response. + +## [0.63.3] - 2025-11-10 + +### Added +\n +### Changed +- Improved RQLite cluster stability by automatically clearing stale Raft state on startup if peers have a higher log index, allowing the node to join cleanly. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.63.2] - 2025-11-10 + +### Added +\n +### Changed +- Improved process termination logic in development environments to ensure child processes are also killed. +- Enhanced the `dev-kill-all.sh` script to reliably kill all processes using development ports, including orphaned processes and their children. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.63.1] - 2025-11-10 + +### Added +\n +### Changed +- Increased the default minimum cluster size for database environments from 1 to 3. + +### Deprecated + +### Removed + +### Fixed +- Prevented unnecessary cluster recovery attempts when a node starts up as the first node (fresh bootstrap). + +## [0.63.0] - 2025-11-10 + +### Added +- Added a new `kill` command to the Makefile for forcefully shutting down all development processes. +- Introduced a new `stop` command in the Makefile for graceful shutdown of development processes. + +### Changed +- The `kill` command now performs a graceful shutdown attempt followed by a force kill of any lingering processes and verifies that development ports are free. + +### Deprecated + +### Removed + +### Fixed +\n +## [0.62.0] - 2025-11-10 + +### Added +- The `prod status` command now correctly checks for both 'bootstrap' and 'node' service variants. + +### Changed +- The production installation process now generates secrets (like the cluster secret and peer ID) before initializing services. This ensures all necessary secrets are available when services start. +- The `prod install` command now displays the actual Peer ID upon completion instead of a placeholder. + +### Deprecated + +### Removed + +### Fixed +- Fixed an issue where IPFS Cluster initialization was using a hardcoded configuration file instead of relying on the standard `ipfs-cluster-service init` process. + +## [0.61.0] - 2025-11-10 + +### Added +- Introduced a new simplified authentication flow (`dbn auth login`) that allows users to generate an API key directly from a wallet address without signature verification (for development/testing purposes). +- Added a new `PRODUCTION_INSTALL.md` guide for production deployment using the `dbn prod` command suite. + +### Changed +- Renamed the primary CLI binary from `network-cli` to `dbn` across all configurations, documentation, and source code. +- Refactored the IPFS configuration logic in the development environment to directly modify the IPFS config file instead of relying on shell commands, improving stability. +- Improved the IPFS Cluster peer count logic to correctly handle NDJSON streaming responses from the `/peers` endpoint. +- Enhanced RQLite connection logic to retry connecting to the database if the store is not yet open, particularly for joining nodes during recovery, improving cluster stability. + +### Deprecated + +### Removed + +### Fixed +\n + +## [0.60.1] - 2025-11-09 + +### Added + +- Improved IPFS Cluster startup logic in development environment to ensure proper peer discovery and configuration. + +### Changed + +- Refactored IPFS Cluster initialization in the development environment to use a multi-phase startup (bootstrap first, then followers) and explicitly clean stale cluster state (pebble, peerstore) before initialization. + +### Deprecated + +### Removed + +### Fixed + +- Fixed an issue where IPFS Cluster nodes in the development environment might fail to join due to incorrect bootstrap configuration or stale state. + +## [0.60.0] - 2025-11-09 + +### Added + +- Introduced comprehensive `dbn dev` commands for managing the local development environment (start, stop, status, logs). +- Added `dbn prod` commands for streamlined production installation, upgrade, and service management on Linux systems (requires root). + +### Changed + +- Refactored `Makefile` targets (`dev` and `kill`) to use the new `dbn dev up` and `dbn dev down` commands, significantly simplifying the development workflow. +- Removed deprecated `dbn config`, `dbn setup`, `dbn service`, and `dbn rqlite` commands, consolidating functionality under `dev` and `prod`. + +### Deprecated + +### Removed + +### Fixed + +\n + +## [0.59.2] - 2025-11-08 + +### Added + +- Added health checks to the installation script to verify the gateway and node services are running after setup or upgrade. +- The installation script now attempts to verify the downloaded binary using checksums.txt if available. +- Added checks in the CLI setup to ensure systemd is available before attempting to create service files. + +### Changed + +- Improved the installation script to detect existing installations, stop services before upgrading, and restart them afterward to minimize downtime. +- Enhanced the CLI setup process by detecting the VPS IP address earlier and improving validation feedback for cluster secrets and swarm keys. +- Modified directory setup to log warnings instead of exiting if `chown` fails, providing manual instructions for fixing ownership issues. +- Improved the HTTPS configuration flow to check for port 80/443 availability before prompting for a domain name. + +### Deprecated + +### Removed + +### Fixed + +\n + +## [0.59.1] - 2025-11-08 + +### Added + +\n + +### Changed + +- Improved interactive setup to prompt for existing IPFS Cluster secret and Swarm key, allowing easier joining of existing private networks. +- Updated default IPFS API URL in configuration files from `http://localhost:9105` to the standard `http://localhost:5001`. +- Updated systemd service files (debros-ipfs.service and debros-ipfs-cluster.service) to correctly determine and use the IPFS and Cluster repository paths. + +### Deprecated + +### Removed + +### Fixed + +\n + +## [0.59.0] - 2025-11-08 + +### Added + +- Added support for asynchronous pinning of uploaded files, improving upload speed. +- Added an optional `pin` flag to the storage upload endpoint to control whether content is pinned (defaults to true). + +### Changed + +- Improved handling of IPFS Cluster responses during the Add operation to correctly process streaming NDJSON output. + +### Deprecated + +### Removed + +### Fixed + +\n + +## [0.58.0] - 2025-11-07 + +### Added + +- Added default configuration for IPFS Cluster and IPFS API settings in node and gateway configurations. +- Added `ipfs` configuration section to node configuration, including settings for cluster API URL, replication factor, and encryption. + +### Changed + +- Improved error logging for cache operations in the Gateway. + +### Deprecated + +### Removed + +### Fixed + +\n + +## [0.57.0] - 2025-11-07 + +### Added + +- Added a new endpoint `/v1/cache/mget` to retrieve multiple keys from the distributed cache in a single request. + +### Changed + +- Improved API key extraction logic to prioritize the `X-API-Key` header and better handle different authorization schemes (Bearer, ApiKey) while avoiding confusion with JWTs. +- Refactored cache retrieval logic to use a dedicated function for decoding values from the distributed cache. + +### Deprecated + +### Removed + +### Fixed + +\n + +## [0.56.0] - 2025-11-05 + +### Added + +- Added IPFS storage endpoints to the Gateway for content upload, pinning, status, retrieval, and unpinning. +- Introduced `StorageClient` interface and implementation in the Go client library for interacting with the new IPFS storage endpoints. +- Added support for automatically starting IPFS daemon, IPFS Cluster daemon, and Olric cache server in the `dev` environment setup. + +### Changed + +- Updated Gateway configuration to include settings for IPFS Cluster API URL, IPFS API URL, timeout, and replication factor. +- Refactored Olric configuration generation to use a simpler, local-environment focused setup. +- Improved IPFS content retrieval (`Get`) to fall back to the IPFS Gateway (port 8080) if the IPFS API (port 5001) returns a 404. + +### Deprecated + +### Removed + +### Fixed + +## [0.54.0] - 2025-11-03 + +### Added + +- Integrated Olric distributed cache for high-speed key-value storage and caching. +- Added new HTTP Gateway endpoints for cache operations (GET, PUT, DELETE, SCAN) via `/v1/cache/`. +- Added `olric_servers` and `olric_timeout` configuration options to the Gateway. +- Updated the automated installation script (`install-debros-network.sh`) to include Olric installation, configuration, and firewall rules (ports 3320, 3322). + +### Changed + +- Refactored README for better clarity and organization, focusing on quick start and core features. + +### Deprecated + +### Removed + +### Fixed + +\n + +## [0.53.18] - 2025-11-03 + +### Added + +\n + +### Changed + - Increased the connection timeout during peer discovery from 15 seconds to 20 seconds to improve connection reliability. - Removed unnecessary debug logging related to filtering out ephemeral port addresses during peer exchange. @@ -26,13 +771,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.17] - 2025-11-03 ### Added + - Added a new Git `pre-commit` hook to automatically update the changelog and version before committing, ensuring version consistency. ### Changed + - Refactored the `update_changelog.sh` script to support different execution contexts (pre-commit vs. pre-push), allowing it to analyze only staged changes during commit. - The Git `pre-push` hook was simplified by removing the changelog update logic, which is now handled by the `pre-commit` hook. @@ -41,12 +790,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.16] - 2025-11-03 ### Added + \n + ### Changed + - Improved the changelog generation script to prevent infinite loops when the only unpushed commit is a previous changelog update. ### Deprecated @@ -54,12 +808,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.15] - 2025-11-03 ### Added + \n + ### Changed + - Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation. - Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security. - Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility. @@ -71,12 +830,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.15] - 2025-11-03 ### Added + \n + ### Changed + - Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation. - Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security. - Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility. @@ -88,14 +852,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.14] - 2025-11-03 ### Added + - Added a new `install-hooks` target to the Makefile to easily set up git hooks. - Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`. ### Changed + - Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog. - Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration. - Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments. @@ -107,14 +875,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n + ## [0.53.14] - 2025-11-03 ### Added + - Added a new `install-hooks` target to the Makefile to easily set up git hooks. - Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`. ### Changed + - Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog. - Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration. - Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments. @@ -124,6 +896,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed ### Fixed + \n ## [0.53.8] - 2025-10-31 @@ -131,7 +904,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Added - **HTTPS/ACME Support**: Gateway now supports automatic HTTPS with Let's Encrypt certificates via ACME - - Interactive domain configuration during `network-cli setup` command + - Interactive domain configuration during `dbn setup` command - Automatic port availability checking for ports 80 and 443 before enabling HTTPS - DNS resolution verification to ensure domain points to the server IP - TLS certificate cache directory management (`~/.debros/tls-cache`) @@ -177,8 +950,8 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Changed -- **GoReleaser**: Updated to build only `network-cli` binary (v0.52.2+) - - Other binaries (node, gateway, identity) now installed via `network-cli setup` +- **GoReleaser**: Updated to build only `dbn` binary (v0.52.2+) + - Other binaries (node, gateway, identity) now installed via `dbn setup` - Cleaner, smaller release packages - Resolves archive mismatch errors - **GitHub Actions**: Updated artifact actions from v3 to v4 (deprecated versions) @@ -196,7 +969,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant - **CLI Refactor**: Modularized monolithic CLI into `pkg/cli/` package structure for better maintainability - New `environment.go`: Multi-environment management system (local, devnet, testnet) - New `env_commands.go`: Environment switching commands (`env list`, `env switch`, `devnet enable`, `testnet enable`) - - New `setup.go`: Interactive VPS installation command (`network-cli setup`) that replaces bash install script + - New `setup.go`: Interactive VPS installation command (`dbn setup`) that replaces bash install script - New `service.go`: Systemd service management commands (`service start|stop|restart|status|logs`) - New `auth_commands.go`, `config_commands.go`, `basic_commands.go`: Refactored commands into modular pkg/cli - **Release Pipeline**: Complete automated release infrastructure via `.goreleaser.yaml` and GitHub Actions @@ -218,7 +991,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant - All business logic moved to modular `pkg/cli/` functions - Easier to test, maintain, and extend individual commands - **Installation**: `scripts/install-debros-network.sh` now APT-ready with fallback to source build -- **Setup Process**: Consolidated all installation logic into `network-cli setup` command +- **Setup Process**: Consolidated all installation logic into `dbn setup` command - Single unified installation regardless of installation method - Interactive user experience with clear progress indicators @@ -229,7 +1002,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Added - One-command `make dev` target to start full development stack (bootstrap + node2 + node3 + gateway in background) -- New `network-cli config init` (no --type) generates complete development stack with all configs and identities +- New `dbn config init` (no --type) generates complete development stack with all configs and identities - Full stack initialization with auto-generated peer identities for bootstrap and all nodes - Explicit control over LibP2P listen addresses for better localhost/development support - Production/development mode detection for NAT services (disabled for localhost, enabled for production) @@ -240,8 +1013,8 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant - Simplified Makefile: removed legacy dev commands, replaced with unified `make dev` target - Updated README with clearer getting started instructions (single `make dev` command) -- Simplified `network-cli config init` behavior: defaults to generating full stack instead of single node -- `network-cli config init` now handles bootstrap peer discovery and join addresses automatically +- Simplified `dbn config init` behavior: defaults to generating full stack instead of single node +- `dbn config init` now handles bootstrap peer discovery and join addresses automatically - LibP2P configuration: removed always-on NAT services for development environments - Code formatting in pkg/node/node.go (indentation fixes in bootstrapPeerSource) @@ -437,7 +1210,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Removed -- Removed cli, network-cli binaries from project +- Removed cli, dbn binaries from project - Removed AI_CONTEXT.md - Removed Network.md - Removed unused log from monitoring.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index efff5c6..f93d30f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,19 +22,19 @@ make deps - Test: `make test` - Format/Vet: `make fmt vet` (or `make lint`) -``` +```` Useful CLI commands: ```bash -./bin/network-cli health -./bin/network-cli peers -./bin/network-cli status -``` +./bin/dbn health +./bin/dbn peers +./bin/dbn status +```` ## Versioning -- The CLI reports its version via `network-cli version`. +- The CLI reports its version via `dbn version`. - Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser. ## Pull Requests diff --git a/Makefile b/Makefile index 22f1d5c..28d2509 100644 --- a/Makefile +++ b/Makefile @@ -6,22 +6,20 @@ test: go test -v $(TEST) # Gateway-focused E2E tests assume gateway and nodes are already running -# Configure via env: -# GATEWAY_BASE_URL (default http://127.0.0.1:6001) -# GATEWAY_API_KEY (required for auth-protected routes) +# Auto-discovers configuration from ~/.debros and queries database for API key +# No environment variables required .PHONY: test-e2e test-e2e: - @echo "Running gateway E2E tests (HTTP/WS only)..." - @echo "Base URL: $${GATEWAY_BASE_URL:-http://127.0.0.1:6001}" - @test -n "$$GATEWAY_API_KEY" || (echo "GATEWAY_API_KEY must be set" && exit 1) + @echo "Running comprehensive E2E tests..." + @echo "Auto-discovering configuration from ~/.debros..." go test -v -tags e2e ./e2e # Network - Distributed P2P Database System # Makefile for development and build tasks -.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks +.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill -VERSION := 0.53.18 +VERSION := 0.69.13 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' @@ -32,10 +30,10 @@ build: deps @mkdir -p bin go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity go build -ldflags "$(LDFLAGS)" -o bin/node ./cmd/node - go build -ldflags "$(LDFLAGS)" -o bin/network-cli cmd/cli/main.go + go build -ldflags "$(LDFLAGS)" -o bin/dbn cmd/cli/main.go # Inject gateway build metadata via pkg path variables go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway - @echo "Build complete! Run ./bin/network-cli version" + @echo "Build complete! Run ./bin/dbn version" # Install git hooks install-hooks: @@ -53,105 +51,46 @@ clean: run-node: @echo "Starting bootstrap node..." @echo "Config: ~/.debros/bootstrap.yaml" - @echo "Generate it with: network-cli config init --type bootstrap" + @echo "Generate it with: dbn config init --type bootstrap" go run ./cmd/node --config node.yaml # Run second node (regular) - requires join address of bootstrap node -# Usage: make run-node2 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002 +# Usage: make run-node2 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002 run-node2: @echo "Starting regular node (node.yaml)..." @echo "Config: ~/.debros/node.yaml" - @echo "Generate it with: network-cli config init --type node --join localhost:5001 --bootstrap-peers ''" + @echo "Generate it with: dbn config init --type node --join localhost:5001 --bootstrap-peers ''" go run ./cmd/node --config node2.yaml # Run third node (regular) - requires join address of bootstrap node -# Usage: make run-node3 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003 +# Usage: make run-node3 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003 run-node3: @echo "Starting regular node (node2.yaml)..." @echo "Config: ~/.debros/node2.yaml" - @echo "Generate it with: network-cli config init --type node --name node2.yaml --join localhost:5001 --bootstrap-peers ''" + @echo "Generate it with: dbn config init --type node --name node2.yaml --join localhost:5001 --bootstrap-peers ''" go run ./cmd/node --config node3.yaml # Run gateway HTTP server # Usage examples: # make run-gateway # uses ~/.debros/gateway.yaml -# Config generated with: network-cli config init --type gateway +# Config generated with: dbn config init --type gateway run-gateway: @echo "Starting gateway HTTP server..." @echo "Note: Config must be in ~/.debros/gateway.yaml" - @echo "Generate it with: network-cli config init --type gateway" + @echo "Generate it with: dbn config init --type gateway" go run ./cmd/gateway -# One-command dev: Start bootstrap, node2, node3, gateway, and anon in background -# Requires: configs already exist in ~/.debros +# Development environment target +# Uses dbn dev up to start full stack with dependency and port checking dev: build - @echo "🚀 Starting development network stack..." - @mkdir -p .dev/pids - @mkdir -p $$HOME/.debros/logs - @echo "Starting Anyone client (anon proxy)..." - @if [ "$$(uname)" = "Darwin" ]; then \ - echo " Detected macOS - using npx anyone-client"; \ - if command -v npx >/dev/null 2>&1; then \ - nohup npx anyone-client > $$HOME/.debros/logs/anon.log 2>&1 & echo $$! > .dev/pids/anon.pid; \ - echo " Anyone client started (PID: $$(cat .dev/pids/anon.pid))"; \ - else \ - echo " ⚠️ npx not found - skipping Anyone client"; \ - echo " Install with: npm install -g npm"; \ - fi; \ - elif [ "$$(uname)" = "Linux" ]; then \ - echo " Detected Linux - checking systemctl"; \ - if systemctl is-active --quiet anon 2>/dev/null; then \ - echo " ✓ Anon service already running"; \ - elif command -v systemctl >/dev/null 2>&1; then \ - echo " Starting anon service..."; \ - sudo systemctl start anon 2>/dev/null || echo " ⚠️ Failed to start anon service"; \ - else \ - echo " ⚠️ systemctl not found - skipping Anon"; \ - fi; \ - fi - @sleep 2 - @echo "Starting bootstrap node..." - @nohup ./bin/node --config bootstrap.yaml > $$HOME/.debros/logs/bootstrap.log 2>&1 & echo $$! > .dev/pids/bootstrap.pid - @sleep 2 - @echo "Starting node2..." - @nohup ./bin/node --config node2.yaml > $$HOME/.debros/logs/node2.log 2>&1 & echo $$! > .dev/pids/node2.pid - @sleep 1 - @echo "Starting node3..." - @nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid - @sleep 1 - @echo "Starting gateway..." - @nohup ./bin/gateway --config gateway.yaml > $$HOME/.debros/logs/gateway.log 2>&1 & echo $$! > .dev/pids/gateway.pid - @echo "" - @echo "============================================================" - @echo "✅ Development stack started!" - @echo "============================================================" - @echo "" - @echo "Processes:" - @if [ -f .dev/pids/anon.pid ]; then \ - echo " Anon: PID=$$(cat .dev/pids/anon.pid) (SOCKS: 9050)"; \ - fi - @echo " Bootstrap: PID=$$(cat .dev/pids/bootstrap.pid)" - @echo " Node2: PID=$$(cat .dev/pids/node2.pid)" - @echo " Node3: PID=$$(cat .dev/pids/node3.pid)" - @echo " Gateway: PID=$$(cat .dev/pids/gateway.pid)" - @echo "" - @echo "Ports:" - @echo " Anon SOCKS: 9050 (proxy endpoint: POST /v1/proxy/anon)" - @echo " Bootstrap P2P: 4001, HTTP: 5001, Raft: 7001" - @echo " Node2 P2P: 4002, HTTP: 5002, Raft: 7002" - @echo " Node3 P2P: 4003, HTTP: 5003, Raft: 7003" - @echo " Gateway: 6001" - @echo "" - @echo "Press Ctrl+C to stop all processes" - @echo "============================================================" - @echo "" - @if [ -f .dev/pids/anon.pid ]; then \ - trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \ - tail -f $$HOME/.debros/logs/anon.log $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \ - else \ - trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \ - tail -f $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \ - fi + @./bin/dbn dev up + +# Kill all processes (graceful shutdown + force kill stray processes) +kill: + @bash scripts/dev-kill-all.sh + +stop: + @./bin/dbn dev down # Help help: @@ -160,42 +99,25 @@ help: @echo " clean - Clean build artifacts" @echo " test - Run tests" @echo "" - @echo "Development:" - @echo " dev - Start full dev stack (bootstrap + 2 nodes + gateway)" - @echo " Requires: configs in ~/.debros (run 'network-cli config init' first)" + @echo "Local Development (Recommended):" + @echo " make dev - Start full development stack with one command" + @echo " - Checks dependencies and available ports" + @echo " - Generates configs (2 bootstraps + 3 nodes + gateway)" + @echo " - Starts IPFS, RQLite, Olric, all nodes, and gateway" + @echo " - Validates cluster health (IPFS peers, RQLite, LibP2P)" + @echo " - Stops all services if health checks fail" + @echo " - Includes comprehensive logging" + @echo " make kill - Stop all development services" @echo "" - @echo "Configuration (NEW):" - @echo " First, generate config files in ~/.debros with:" - @echo " make build # Build CLI first" - @echo " ./bin/network-cli config init # Generate full stack" + @echo "Development Management (via dbn):" + @echo " ./bin/dbn dev status - Show status of all dev services" + @echo " ./bin/dbn dev logs [--follow]" @echo "" - @echo "Network Targets (requires config files in ~/.debros):" - @echo " run-node - Start bootstrap node" - @echo " run-node2 - Start second node" - @echo " run-node3 - Start third node" - @echo " run-gateway - Start HTTP gateway" - @echo " run-example - Run usage example" - @echo "" - @echo "Running Multiple Nodes:" - @echo " Nodes use --config flag to select which YAML file in ~/.debros to load:" - @echo " go run ./cmd/node --config bootstrap.yaml" - @echo " go run ./cmd/node --config node.yaml" - @echo " go run ./cmd/node --config node2.yaml" - @echo " Generate configs with: ./bin/network-cli config init --name " - @echo "" - @echo "CLI Commands:" - @echo " run-cli - Run network CLI help" - @echo " cli-health - Check network health" - @echo " cli-peers - List network peers" - @echo " cli-status - Get network status" - @echo " cli-storage-test - Test storage operations" - @echo " cli-pubsub-test - Test pub/sub operations" - @echo "" - @echo "Development:" - @echo " test-multinode - Full multi-node test with 1 bootstrap + 2 nodes" - @echo " test-peer-discovery - Test peer discovery (requires running nodes)" - @echo " test-replication - Test data replication (requires running nodes)" - @echo " test-consensus - Test database consensus (requires running nodes)" + @echo "Individual Node Targets (advanced):" + @echo " run-node - Start bootstrap node directly" + @echo " run-node2 - Start second node directly" + @echo " run-node3 - Start third node directly" + @echo " run-gateway - Start HTTP gateway directly" @echo "" @echo "Maintenance:" @echo " deps - Download dependencies" @@ -203,8 +125,4 @@ help: @echo " fmt - Format code" @echo " vet - Vet code" @echo " lint - Lint code (fmt + vet)" - @echo " clear-ports - Clear common dev ports" - @echo " dev-setup - Setup development environment" - @echo " dev-cluster - Show cluster startup commands" - @echo " dev - Full development workflow" @echo " help - Show this help" diff --git a/PRODUCTION_INSTALL.md b/PRODUCTION_INSTALL.md new file mode 100644 index 0000000..de0ac93 --- /dev/null +++ b/PRODUCTION_INSTALL.md @@ -0,0 +1,175 @@ +# Production Installation Guide - DeBros Network + +This guide covers production deployment of the DeBros Network using the `dbn prod` command suite. + +## System Requirements + +- **OS**: Ubuntu 20.04 LTS or later, Debian 11+, or other Linux distributions +- **Architecture**: x86_64 (amd64) or ARM64 (aarch64) +- **RAM**: Minimum 4GB, recommended 8GB+ +- **Storage**: Minimum 50GB SSD recommended +- **Ports**: + - 4001 (P2P networking) + - 4501 (IPFS HTTP API - bootstrap), 4502/4503 (node2/node3) + - 5001-5003 (RQLite HTTP - one per node) + - 6001 (Gateway) + - 7001-7003 (RQLite Raft - one per node) + - 9094 (IPFS Cluster API - bootstrap), 9104/9114 (node2/node3) + - 3320/3322 (Olric) + - 80, 443 (for HTTPS with Let's Encrypt) + +## Installation + +### Prerequisites + +1. **Root access required**: All production operations require sudo/root privileges +2. **Supported distros**: Ubuntu, Debian, Fedora (via package manager) +3. **Basic tools**: `curl`, `git`, `make`, `build-essential`, `wget` + +### Single-Node Bootstrap Installation + +Deploy the first node (bootstrap node) on a VPS: + +```bash +sudo dbn prod install --bootstrap +``` + +This will: + +1. Check system prerequisites (OS, arch, root privileges, basic tools) +2. Provision the `debros` system user and filesystem structure at `~/.debros` +3. Download and install all required binaries (Go, RQLite, IPFS, IPFS Cluster, Olric, DeBros) +4. Generate secrets (cluster secret, swarm key, node identity) +5. Initialize repositories (IPFS, IPFS Cluster, RQLite) +6. Generate configurations for bootstrap node +7. Create and start systemd services + +All files will be under `/home/debros/.debros`: + +``` +~/.debros/ +├── bin/ # Compiled binaries +├── configs/ # YAML configurations +├── data/ +│ ├── ipfs/ # IPFS repository +│ ├── ipfs-cluster/ # IPFS Cluster state +│ └── rqlite/ # RQLite database +├── logs/ # Service logs +└── secrets/ # Keys and certificates +``` + +### Joining Additional Nodes + +Every non-bootstrap node must use the exact same IPFS Cluster secret as the bootstrap host. When you provision a follower node: + +1. Copy the secret from the bootstrap machine: + ```bash + scp debros@:/home/debros/.debros/secrets/cluster-secret ./cluster-secret + ``` +2. Run the installer with the `--cluster-secret` flag: + ```bash + sudo dbn prod install --vps-ip \ + --peers /ip4//tcp/4001/p2p/ \ + --cluster-secret $(cat ./cluster-secret) + ``` + +The installer now enforces `--cluster-secret` for all non-bootstrap nodes, which prevents mismatched cluster PSKs during deployment. + +## Service Management + +### Check Service Status + +```bash +sudo systemctl status debros-node-bootstrap +sudo systemctl status debros-gateway +sudo systemctl status debros-rqlite-bootstrap +``` + +### View Service Logs + +```bash +# Bootstrap node logs +sudo journalctl -u debros-node-bootstrap -f + +# Gateway logs +sudo journalctl -u debros-gateway -f + +# All services +sudo journalctl -u "debros-*" -f +``` + +## Health Checks + +After installation, verify services are running: + +```bash +# Check IPFS +curl http://localhost:4501/api/v0/id + +# Check RQLite cluster +curl http://localhost:5001/status + +# Check Gateway +curl http://localhost:6001/health + +# Check Olric +curl http://localhost:3320/ping +``` + +## Port Reference + +### Development Environment (via `make dev`) + +- IPFS API: 4501 (bootstrap), 4502 (node2), 4503 (node3) +- RQLite HTTP: 5001, 5002, 5003 +- RQLite Raft: 7001, 7002, 7003 +- IPFS Cluster: 9094, 9104, 9114 +- P2P: 4001, 4002, 4003 +- Gateway: 6001 +- Olric: 3320, 3322 + +### Production Environment (via `sudo dbn prod install`) + +- Same port assignments as development for consistency + +## Configuration Files + +Key configuration files are located in `~/.debros/configs/`: + +- **bootstrap.yaml**: Bootstrap node configuration +- **node.yaml**: Regular node configuration +- **gateway.yaml**: HTTP gateway configuration +- **olric.yaml**: In-memory cache configuration + +Edit these files directly for advanced configuration, then restart services: + +```bash +sudo systemctl restart debros-node-bootstrap +``` + +## Troubleshooting + +### Port already in use + +Check which process is using the port: + +```bash +sudo lsof -i :4501 +sudo lsof -i :5001 +sudo lsof -i :7001 +``` + +Kill conflicting processes or change ports in config. + +### RQLite cluster not forming + +Ensure: + +1. Bootstrap node is running: `systemctl status debros-rqlite-bootstrap` +2. Network connectivity between nodes on ports 5001+ (HTTP) and 7001+ (Raft) +3. Check logs: `journalctl -u debros-rqlite-bootstrap -f` + +--- + +**Last Updated**: November 2024 +**Compatible with**: Network v1.0.0+ diff --git a/README.md b/README.md index f0826fd..ddc4c79 100644 --- a/README.md +++ b/README.md @@ -1,966 +1,605 @@ # DeBros Network - Distributed P2P Database System -A robust, decentralized peer-to-peer network built in Go, providing distributed SQL database, key-value storage, pub/sub messaging, and resilient peer management. Designed for applications needing reliable, scalable, and secure data sharing without centralized infrastructure. - ---- +DeBros Network is a decentralized peer-to-peer data platform built in Go. It combines distributed SQL (RQLite), pub/sub messaging, and resilient peer discovery so applications can share state without central infrastructure. ## Table of Contents -- [Features](#features) -- [Architecture Overview](#architecture-overview) -- [System Requirements](#system-requirements) +- [At a Glance](#at-a-glance) - [Quick Start](#quick-start) -- [Deployment & Installation](#deployment--installation) -- [Configuration](#configuration) -- [CLI Usage](#cli-usage) +- [Production Deployment](#production-deployment) +- [Components & Ports](#components--ports) +- [Configuration Cheatsheet](#configuration-cheatsheet) +- [CLI Highlights](#cli-highlights) - [HTTP Gateway](#http-gateway) -- [Development](#development) -- [Database Client (Go ORM-like)](#database-client-go-orm-like) - [Troubleshooting](#troubleshooting) -- [License](#license) +- [Resources](#resources) ---- +## At a Glance -## Features - -- **Distributed SQL Database:** RQLite-backed, Raft-consensus, ACID transactions, automatic failover. -- **Pub/Sub Messaging:** Topic-based, real-time, namespaced, automatic cleanup. -- **Peer Discovery & Management:** Nodes discover peers, bootstrap support, health monitoring. -- **Application Isolation:** Namespace-based multi-tenancy, per-app config. -- **Secure by Default:** Noise/TLS transport, peer identity, systemd hardening. -- **Simple Client API:** Lightweight Go client for apps and CLI tools. - ---- - -## Architecture Overview - -``` -┌─────────────────────────────────────────────────────────────┐ -│ DeBros Network Cluster │ -├─────────────────────────────────────────────────────────────┤ -│ Application Layer │ -│ ┌─────────────┐ ┌─────────────┐ ┌────────────────────────┐ │ -│ │ Anchat │ │ Custom App │ │ CLI Tools │ │ -│ └─────────────┘ └─────────────┘ └────────────────────────┘ │ -├─────────────────────────────────────────────────────────────┤ -│ Client API │ -│ ┌─────────────┐ ┌────────────────────────┐ │ -│ │ Database │ │ PubSub │ │ -│ │ Client │ │ Client │ │ -│ └─────────────┘ └────────────────────────┘ │ -├─────────────────────────────────────────────────────────────┤ -│ Network Node Layer │ -│ ┌─────────────┐ ┌─────────────┐ ┌────────────────────────┐ │ -│ │ Discovery │ │ PubSub │ │ Database │ │ -│ │ Manager │ │ Manager │ │ (RQLite) │ │ -│ └─────────────┘ └─────────────┘ └────────────────────────┘ │ -├─────────────────────────────────────────────────────────────┤ -│ Transport Layer │ -│ ┌─────────────┐ ┌─────────────┐ ┌────────────────────────┐ │ -│ │ LibP2P │ │ Noise/TLS │ │ RQLite │ │ -│ │ Host │ │ Encryption │ │ Database │ │ -│ └─────────────┘ └─────────────┘ └────────────────────────┘ │ -└─────────────────────────────────────────────────────────────┘ -``` - -- **Node:** Full P2P participant, runs services, handles peer discovery, database, pubsub. -- **Client:** Lightweight, connects only to bootstrap peers, consumes services, no peer discovery. - ---- - -## System Requirements - -### Software - -- **Go:** 1.21+ (recommended) -- **RQLite:** 8.x (distributed SQLite) -- **Git:** For source management -- **Make:** For build automation (recommended) - -### Hardware - -- **Minimum:** 2 CPU cores, 4GB RAM, 10GB disk, stable internet -- **Recommended:** 4+ cores, 8GB+ RAM, 50GB+ SSD, low-latency network - -### Network Ports - -- **4001:** LibP2P P2P communication -- **5001:** RQLite HTTP API -- **7001:** RQLite Raft consensus - -### Filesystem Permissions - -DeBros Network stores all configuration and data in `~/.debros/` directory. Ensure you have: - -- **Read/Write access** to your home directory (`~`) -- **Available disk space**: At least 10GB for database and logs -- **No restrictive mount options**: The home directory must not be mounted read-only -- **Unix permissions**: Standard user permissions are sufficient (no root/sudo required) - -#### Directory Structure - -DeBros automatically creates the following directory structure: - -``` -~/.debros/ -├── bootstrap.yaml # Bootstrap node config -├── node.yaml # Node config -├── gateway.yaml # Gateway config -├── bootstrap/ # Bootstrap node data (auto-created) -│ ├── rqlite/ # RQLite database files -│ │ ├── db.sqlite # Main database -│ │ ├── raft/ # Raft consensus data -│ │ └── rsnapshots/ # Raft snapshots -│ ├── peer.info # Node multiaddr (created at startup) -│ └── identity.key # Node private key (created at startup) -├── node/ # Node data (auto-created) -│ ├── rqlite/ # RQLite database files -│ ├── raft/ # Raft data -│ ├── peer.info # Node multiaddr (created at startup) -│ └── identity.key # Node private key (created at startup) -└── node2/ # Additional node configs (if running multiple) - └── rqlite/ # RQLite database files -``` - -**Files Created at Startup:** -- `identity.key` - LibP2P private key for the node (generated once, reused) -- `peer.info` - The node's multiaddr (e.g., `/ip4/0.0.0.0/tcp/4001/p2p/12D3KooW...`) - -**Automatic Creation**: The node automatically creates all necessary data directories when started. You only need to ensure: -1. `~/.debros/` is writable -2. Sufficient disk space available -3. Correct config files exist - -**Permission Check:** - -```bash -# Verify home directory is writable -touch ~/test-write && rm ~/test-write && echo "✓ Home directory is writable" - -# Check available disk space -df -h ~ -``` - -**If you get permission errors:** - -``` -Error: Failed to create/access config directory -Please ensure: - 1. Home directory is accessible - 2. You have write permissions to home directory - 3. Disk space is available -``` - -**Solution:** - -- Ensure you're not running with overly restrictive umask: `umask` should show `0022` or similar -- Check home directory permissions: `ls -ld ~` should show your user as owner -- For sandboxed/containerized environments: Ensure `/home/` is writable - ---- +- Distributed SQL backed by RQLite and Raft consensus +- Topic-based pub/sub with automatic cleanup +- Namespace isolation for multi-tenant apps +- Secure transport using libp2p plus Noise/TLS +- Lightweight Go client and CLI tooling ## Quick Start -### 1. Clone and Setup +1. Clone and build the project: + + ```bash + git clone https://github.com/DeBrosOfficial/network.git + cd network + make build + ``` + +2. Generate local configuration (bootstrap, node2, node3, gateway): + + ```bash + ./bin/dbn config init + ``` + +3. Launch the full development stack: + + ```bash + make dev + ``` + + This starts three nodes and the HTTP gateway. **The command will not complete successfully until all services pass health checks** (IPFS peer connectivity, RQLite cluster formation, and LibP2P connectivity). If health checks fail, all services are stopped automatically. Stop with `Ctrl+C`. + +4. Validate the network from another terminal: + + ```bash + ./bin/dbn health + ./bin/dbn peers + ./bin/dbn pubsub publish notifications "Hello World" + ./bin/dbn pubsub subscribe notifications 10s + ``` + +## Production Deployment + +DeBros Network can be deployed as production systemd services on Linux servers. The production installer handles all dependencies, configuration, and service management automatically. + +### Prerequisites + +- **OS**: Ubuntu 20.04+, Debian 11+, or compatible Linux distribution +- **Architecture**: `amd64` (x86_64) or `arm64` (aarch64) +- **Permissions**: Root access (use `sudo`) +- **Resources**: Minimum 2GB RAM, 10GB disk space, 2 CPU cores + +### Installation + +#### Quick Install + +Install the CLI tool first: ```bash -git clone https://github.com/DeBrosOfficial/network.git -cd network +curl -fsSL https://install.debros.network | sudo bash ``` -### 2. Build All Executables +Or download manually from [GitHub Releases](https://github.com/DeBrosOfficial/network/releases). + +#### Bootstrap Node (First Node) + +Install the first node in your cluster: ```bash -make build +# Main branch (stable releases) +sudo dbn prod install --bootstrap + +# Nightly branch (latest development) +sudo dbn prod install --bootstrap --branch nightly ``` -### 3. Generate Configuration Files +The bootstrap node initializes the cluster and serves as the primary peer for other nodes to join. + +#### Secondary Node (Join Existing Cluster) + +Join an existing cluster by providing the bootstrap node's IP and peer multiaddr: ```bash -# Generate all configs (bootstrap, node2, node3, gateway) with one command -./bin/network-cli config init +sudo dbn prod install \ + --vps-ip \ + --peers /ip4//tcp/4001/p2p/ \ + --branch nightly ``` -This creates: -- `~/.debros/bootstrap.yaml` - Bootstrap node -- `~/.debros/node2.yaml` - Regular node 2 -- `~/.debros/node3.yaml` - Regular node 3 -- `~/.debros/gateway.yaml` - HTTP Gateway +**Required flags for secondary nodes:** -Plus auto-generated identities for each node. +- `--vps-ip`: Your server's public IP address +- `--peers`: Comma-separated list of bootstrap peer multiaddrs -### 4. Start the Complete Network Stack +**Optional flags:** + +- `--branch`: Git branch to use (`main` or `nightly`, default: `main`) +- `--domain`: Domain name for HTTPS (enables ACME/Let's Encrypt) - see [HTTPS Setup](#https-setup-with-domain) below +- `--bootstrap-join`: Raft join address for secondary bootstrap nodes +- `--force`: Reconfigure all settings (use with caution) + +#### Secondary Bootstrap Node + +Create a secondary bootstrap node that joins an existing Raft cluster: ```bash -make dev +sudo dbn prod install \ + --bootstrap \ + --vps-ip \ + --bootstrap-join :7001 \ + --branch nightly ``` -This starts: -- Bootstrap node (P2P: 4001, RQLite HTTP: 5001, Raft: 7001) -- Node 2 (P2P: 4002, RQLite HTTP: 5002, Raft: 7002) -- Node 3 (P2P: 4003, RQLite HTTP: 5003, Raft: 7003) -- Gateway (HTTP: 6001) +### Branch Selection -Logs stream to terminal. Press **Ctrl+C** to stop all processes. +DeBros Network supports two branches: -### 5. Test with CLI (in another terminal) +- **`main`**: Stable releases (default). Recommended for production. +- **`nightly`**: Latest development builds. Use for testing new features. + +**Branch preference is saved automatically** during installation. Future upgrades will use the same branch unless you override it with `--branch`. + +**Examples:** ```bash -./bin/network-cli health -./bin/network-cli peers -./bin/network-cli pubsub publish notifications "Hello World" -./bin/network-cli pubsub subscribe notifications 10s +# Install with nightly branch +sudo dbn prod install --bootstrap --branch nightly + +# Upgrade using saved branch preference +sudo dbn prod upgrade --restart + +# Upgrade and switch to main branch +sudo dbn prod upgrade --restart --branch main ``` ---- +### Upgrade -## Deployment & Installation - -### Automated Production Install - -Run the install script for a secure, production-ready setup: +Upgrade an existing installation to the latest version: ```bash -curl -sSL https://github.com/DeBrosOfficial/network/raw/main/scripts/install-debros-network.sh | sudo bash +# Upgrade using saved branch preference +sudo dbn prod upgrade --restart + +# Upgrade and switch branches +sudo dbn prod upgrade --restart --branch nightly + +# Upgrade without restarting services +sudo dbn prod upgrade ``` -**What the Script Does:** +The upgrade process: -- Detects OS, installs Go, RQLite, dependencies -- Creates `debros` system user, secure directory structure -- Generates LibP2P identity keys -- Clones source, builds binaries -- Sets up systemd service (`debros-node`) -- Configures firewall (UFW) for required ports -- Generates YAML config in `/opt/debros/configs/node.yaml` +1. ✅ Checks prerequisites +2. ✅ Updates binaries (fetches latest from selected branch) +3. ✅ Preserves existing configurations and data +4. ✅ Updates configurations to latest format +5. ✅ Updates systemd service files +6. ✅ Optionally restarts services (`--restart` flag) -**Directory Structure:** +**Note**: The upgrade automatically detects your node type (bootstrap vs. regular node) and preserves all secrets, data, and configurations. -``` -/opt/debros/ -├── bin/ # Binaries -├── configs/ # YAML configs -├── keys/ # Identity keys -├── data/ # RQLite DB, storage -├── logs/ # Node logs -├── src/ # Source code -``` +**Note**: Currently, the `upgrade` command does not support adding a domain via `--domain` flag. To enable HTTPS after installation, see [Adding Domain After Installation](#adding-domain-after-installation) below. -**Service Management:** +### HTTPS Setup with Domain + +DeBros Gateway supports automatic HTTPS with Let's Encrypt certificates via ACME. This enables secure connections on ports 80 (HTTP redirect) and 443 (HTTPS). + +#### Prerequisites + +- Domain name pointing to your server's public IP address +- Ports 80 and 443 open and accessible from the internet +- Gateway service running + +#### Adding Domain During Installation + +Specify your domain during installation: ```bash -sudo systemctl status debros-node -sudo systemctl start debros-node -sudo systemctl stop debros-node -sudo systemctl restart debros-node -sudo journalctl -u debros-node.service -f +# Bootstrap node with HTTPS +sudo dbn prod install --bootstrap --domain node-kv4la8.debros.network --branch nightly + +# Secondary node with HTTPS +sudo dbn prod install \ + --vps-ip \ + --peers /ip4//tcp/4001/p2p/ \ + --domain example.com \ + --branch nightly ``` ---- +The gateway will automatically: -## Configuration +- Obtain Let's Encrypt certificates via ACME +- Serve HTTP on port 80 (redirects to HTTPS) +- Serve HTTPS on port 443 +- Renew certificates automatically -### Configuration Files Location +#### Adding Domain After Installation -All configuration files are stored in `~/.debros/` for both local development and production deployments: +Currently, the `upgrade` command doesn't support `--domain` flag. To enable HTTPS on an existing installation: -- `~/.debros/node.yaml` - Node configuration -- `~/.debros/node.yaml` - Bootstrap node configuration -- `~/.debros/gateway.yaml` - Gateway configuration - -The system will **only** load config from `~/.debros/` and will error if required config files are missing. - -### Generating Configuration Files - -Use the `network-cli config init` command to generate configuration files: - -### Generate Complete Stack (Recommended) +1. **Edit the gateway configuration:** ```bash -# Generate bootstrap, node2, node3, and gateway configs in one command -./bin/network-cli config init - -# Force regenerate (overwrites existing configs) -./bin/network-cli config init --force +sudo nano /home/debros/.debros/data/gateway.yaml ``` -This is the **recommended way** to get started with a local development network. +2. **Update the configuration:** -### Generate Individual Configs (Advanced) +```yaml +listen_addr: ":6001" +client_namespace: "default" +rqlite_dsn: "" +bootstrap_peers: [] +enable_https: true +domain_name: "your-domain.com" +tls_cache_dir: "/home/debros/.debros/tls-cache" +olric_servers: + - "127.0.0.1:3320" +olric_timeout: "10s" +ipfs_cluster_api_url: "http://localhost:9094" +ipfs_api_url: "http://localhost:4501" +ipfs_timeout: "60s" +ipfs_replication_factor: 3 +``` -For custom setups or production deployments, you can generate individual configs: - -#### Generate a Single Node Config +3. **Ensure ports 80 and 443 are available:** ```bash -# Generate basic node config with bootstrap peers -./bin/network-cli config init --type node --bootstrap-peers "/ip4/127.0.0.1/tcp/4001/p2p/QmXxx" +# Check if ports are in use +sudo lsof -i :80 +sudo lsof -i :443 -# With custom ports -./bin/network-cli config init --type node --name node2.yaml \ - --listen-port 4002 --rqlite-http-port 5002 --rqlite-raft-port 7002 \ - --join localhost:5001 --bootstrap-peers "/ip4/127.0.0.1/tcp/4001/p2p/QmXxx" - -# Force overwrite existing config -./bin/network-cli config init --type node --force +# If needed, stop conflicting services ``` -#### Generate a Bootstrap Node Config +4. **Restart the gateway:** ```bash -# Generate bootstrap node (no join address required) -./bin/network-cli config init --type bootstrap - -# With custom ports -./bin/network-cli config init --type bootstrap --listen-port 4001 --rqlite-http-port 5001 --rqlite-raft-port 7001 +sudo systemctl restart debros-gateway.service ``` -#### Generate a Gateway Config +5. **Verify HTTPS is working:** ```bash -# Generate gateway config -./bin/network-cli config init --type gateway +# Check gateway logs +sudo journalctl -u debros-gateway.service -f -# With bootstrap peers -./bin/network-cli config init --type gateway --bootstrap-peers "/ip4/127.0.0.1/tcp/4001/p2p/QmXxx" +# Test HTTPS endpoint +curl https://your-domain.com/health ``` -### Running the Network +**Important Notes:** -Once configs are generated, start the complete stack with: +- The gateway will automatically obtain Let's Encrypt certificates on first start +- Certificates are cached in `/home/debros/.debros/tls-cache` +- Certificate renewal happens automatically +- Ensure your domain's DNS A record points to the server's public IP before enabling HTTPS + +### Service Management + +All services run as systemd units under the `debros` user. + +#### Check Status ```bash -make dev +# View status of all services +dbn prod status + +# Or use systemctl directly +systemctl status debros-node-bootstrap +systemctl status debros-ipfs-bootstrap +systemctl status debros-gateway ``` -Or start individual components (in separate terminals): +#### View Logs ```bash -# Terminal 1 - Bootstrap node -go run ./cmd/node --config bootstrap.yaml +# View recent logs (last 50 lines) +dbn prod logs node -# Terminal 2 - Node 2 -go run ./cmd/node --config node2.yaml +# Follow logs in real-time +dbn prod logs node --follow -# Terminal 3 - Node 3 -go run ./cmd/node --config node3.yaml - -# Terminal 4 - Gateway -go run ./cmd/gateway --config gateway.yaml +# View specific service logs +dbn prod logs ipfs --follow +dbn prod logs ipfs-cluster --follow +dbn prod logs rqlite --follow +dbn prod logs olric --follow +dbn prod logs gateway --follow ``` -### Running Multiple Nodes on the Same Machine +**Available log service names:** -The default `make dev` creates a 3-node setup. For additional nodes, generate individual configs: +- `node` - DeBros Network Node (bootstrap or regular) +- `ipfs` - IPFS Daemon +- `ipfs-cluster` - IPFS Cluster Service +- `rqlite` - RQLite Database +- `olric` - Olric Cache Server +- `gateway` - DeBros Gateway + +**Note:** The `logs` command uses journalctl and accepts the full systemd service name. Use the short names above for convenience. + +#### Service Control Commands + +Use `dbn prod` commands for convenient service management: ```bash -# Generate additional node configs with unique ports -./bin/network-cli config init --type node --name node4.yaml \ - --listen-port 4004 --rqlite-http-port 5004 --rqlite-raft-port 7004 \ - --join localhost:5001 \ - --bootstrap-peers "/ip4/127.0.0.1/tcp/4001/p2p/" +# Start all services +sudo dbn prod start -# Start the additional node -go run ./cmd/node --config node4.yaml +# Stop all services +sudo dbn prod stop + +# Restart all services +sudo dbn prod restart ``` -#### Key Points for Multiple Nodes - -- **Each node needs unique ports**: P2P port, RQLite HTTP port, and RQLite Raft port must all be different -- **Join address**: Non-bootstrap nodes need `rqlite_join_address` pointing to the bootstrap or an existing node (use Raft port) -- **Bootstrap peers**: All nodes need the bootstrap node's multiaddr in `discovery.bootstrap_peers` -- **Config files**: Store all configs in `~/.debros/` with different filenames -- **--config flag**: Specify which config file to load - -⚠️ **Common Mistake - Same Ports:** -If all nodes use the same ports (e.g., 5001, 7001), they will try to bind to the same addresses and fail to communicate. Verify each node has unique ports: +Or use `systemctl` directly for more control: ```bash -# Bootstrap -grep "rqlite_port\|rqlite_raft_port" ~/.debros/bootstrap.yaml -# Should show: rqlite_port: 5001, rqlite_raft_port: 7001 +# Restart all services +sudo systemctl restart debros-* -# Node 2 -grep "rqlite_port\|rqlite_raft_port" ~/.debros/node2.yaml -# Should show: rqlite_port: 5002, rqlite_raft_port: 7002 +# Restart specific service +sudo systemctl restart debros-node-bootstrap -# Node 3 -grep "rqlite_port\|rqlite_raft_port" ~/.debros/node3.yaml -# Should show: rqlite_port: 5003, rqlite_raft_port: 7003 +# Stop services +sudo systemctl stop debros-* + +# Start services +sudo systemctl start debros-* + +# Enable services (start on boot) +sudo systemctl enable debros-* ``` -If ports are wrong, regenerate the config with `--force`: +### Complete Production Commands Reference + +#### Installation & Upgrade ```bash -./bin/network-cli config init --type node --name node.yaml \ - --listen-port 4002 --rqlite-http-port 5002 --rqlite-raft-port 7002 \ - --join localhost:5001 --bootstrap-peers '' --force +# Install bootstrap node +sudo dbn prod install --bootstrap [--domain DOMAIN] [--branch BRANCH] + + +sudo dbn prod install --nightly --domain node-gh38V1.debros.network --vps-ip 57.128.223.92 --ignore-resource-checks --bootstrap-join + +# Install secondary node +sudo dbn prod install --vps-ip IP --peers ADDRS [--domain DOMAIN] [--branch BRANCH] + +# Install secondary bootstrap +sudo dbn prod install --bootstrap --vps-ip IP --bootstrap-join ADDR [--domain DOMAIN] [--branch BRANCH] + +# Upgrade installation +sudo dbn prod upgrade [--restart] [--branch BRANCH] ``` -### Validating Configuration - -DeBros Network performs strict validation of all configuration files at startup. This ensures invalid configurations are caught immediately rather than causing silent failures later. - -#### Validation Features - -- **Strict YAML Parsing:** Unknown configuration keys are rejected with helpful error messages -- **Format Validation:** Multiaddrs, ports, durations, and other formats are validated for correctness -- **Cross-Field Validation:** Configuration constraints (e.g., bootstrap nodes don't join clusters) are enforced -- **Aggregated Error Reporting:** All validation errors are reported together, not one-by-one - -#### Common Validation Errors - -**Missing or Invalid `node.type`** -``` -node.type: must be one of [bootstrap node]; got "invalid" -``` -Solution: Set `type: "bootstrap"` or `type: "node"` - -**Invalid Bootstrap Peer Format** -``` -discovery.bootstrap_peers[0]: invalid multiaddr; expected /ip{4,6}/.../tcp//p2p/ -discovery.bootstrap_peers[0]: missing /p2p/ component -``` -Solution: Use full multiaddr format: `/ip4/127.0.0.1/tcp/4001/p2p/12D3KooW...` - -**Port Conflicts** -``` -database.rqlite_raft_port: must differ from database.rqlite_port (5001) -``` -Solution: Use different ports for HTTP and Raft (e.g., 5001 and 7001) - -**RQLite Join Address Issues (Nodes)** -``` -database.rqlite_join_address: required for node type (non-bootstrap) -database.rqlite_join_address: invalid format; expected host:port -``` -Solution: Non-bootstrap nodes must specify where to join the cluster. Use Raft port: `127.0.0.1:7001` - -**Bootstrap Nodes Cannot Join** -``` -database.rqlite_join_address: must be empty for bootstrap type -``` -Solution: Bootstrap nodes should have `rqlite_join_address: ""` - -**Invalid Listen Addresses** -``` -node.listen_addresses[0]: invalid TCP port 99999; port must be between 1 and 65535 -``` -Solution: Use valid ports [1-65535], e.g., `/ip4/0.0.0.0/tcp/4001` - -**Unknown Configuration Keys** -``` -invalid config: yaml: unmarshal errors: - line 42: field migrations_path not found in type config.DatabaseConfig -``` -Solution: Remove unsupported keys. Supported keys are documented in the YAML Reference section above. - ---- - -## CLI Usage - -### Authentication Commands +#### Service Management ```bash -./bin/network-cli auth login # Authenticate with wallet -./bin/network-cli auth whoami # Show current authentication status -./bin/network-cli auth status # Show detailed authentication info -./bin/network-cli auth logout # Clear stored credentials +# Check service status (no sudo required) +dbn prod status + +# Start all services +sudo dbn prod start + +# Stop all services +sudo dbn prod stop + +# Restart all services +sudo dbn prod restart ``` -### Network Operations +#### Logs ```bash -./bin/network-cli health # Check network health -./bin/network-cli status # Get network status -./bin/network-cli peers # List connected peers +# View recent logs +dbn prod logs + +# Follow logs in real-time +dbn prod logs --follow + +# Available services: node, ipfs, ipfs-cluster, rqlite, olric, gateway ``` -### Database Operations +#### Uninstall ```bash -./bin/network-cli query "SELECT * FROM table" # Execute SQL -./bin/network-cli query "CREATE TABLE users (id INTEGER)" # DDL operations +# Remove all services (preserves data and configs) +sudo dbn prod uninstall ``` -### Pub/Sub Messaging +### Directory Structure + +Production installations use `/home/debros/.debros/`: + +``` +/home/debros/.debros/ +├── configs/ # Configuration files +│ ├── bootstrap.yaml # Bootstrap node config +│ ├── node.yaml # Regular node config +│ ├── gateway.yaml # Gateway config +│ └── olric/ # Olric cache config +├── data/ # Runtime data +│ ├── bootstrap/ # Bootstrap node data +│ │ ├── ipfs/ # IPFS repository +│ │ ├── ipfs-cluster/ # IPFS Cluster data +│ │ └── rqlite/ # RQLite database +│ └── node/ # Regular node data +├── secrets/ # Secrets and keys +│ ├── cluster-secret # IPFS Cluster secret +│ └── swarm.key # IPFS swarm key +├── logs/ # Service logs +│ ├── node-bootstrap.log +│ ├── ipfs-bootstrap.log +│ └── gateway.log +└── .branch # Saved branch preference +``` + +### Uninstall + +Remove all production services (preserves data and configs): ```bash -./bin/network-cli pubsub publish # Send message -./bin/network-cli pubsub subscribe [duration] # Listen for messages -./bin/network-cli pubsub topics # List active topics +sudo dbn prod uninstall ``` -### CLI Options +This stops and removes all systemd services but keeps `/home/debros/.debros/` intact. You'll be prompted to confirm before uninstalling. + +**To completely remove everything:** ```bash ---format json # Output in JSON format ---timeout 30s # Set operation timeout ---bootstrap # Override bootstrap peer ---production # Use production bootstrap peers +sudo dbn prod uninstall +sudo rm -rf /home/debros/.debros ``` -### Database Operations (Gateway REST) +### Production Troubleshooting -```http -POST /v1/rqlite/exec # Body: {"sql": "INSERT/UPDATE/DELETE/DDL ...", "args": [...]} -POST /v1/rqlite/find # Body: {"table":"...", "criteria":{"col":val,...}, "options":{...}} -POST /v1/rqlite/find-one # Body: same as /find, returns a single row (404 if not found) -POST /v1/rqlite/select # Body: {"table":"...", "select":[...], "where":[...], "joins":[...], "order_by":[...], "limit":N, "offset":N, "one":false} -POST /v1/rqlite/transaction # Body: {"ops":[{"kind":"exec|query","sql":"...","args":[...]}], "return_results": true} -POST /v1/rqlite/query # Body: {"sql": "SELECT ...", "args": [..]} (legacy-friendly SELECT) -GET /v1/rqlite/schema # Returns tables/views + create SQL -POST /v1/rqlite/create-table # Body: {"schema": "CREATE TABLE ..."} -POST /v1/rqlite/drop-table # Body: {"table": "table_name"} -``` - -Common workflows: +#### Services Not Starting ```bash -# Exec (INSERT/UPDATE/DELETE/DDL) -curl -X POST "$GW/v1/rqlite/exec" \ - -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{"sql":"INSERT INTO users(name,email) VALUES(?,?)","args":["Alice","alice@example.com"]}' +# Check service status +systemctl status debros-node-bootstrap -# Find (criteria + options) -curl -X POST "$GW/v1/rqlite/find" \ - -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{ - "table":"users", - "criteria":{"active":true}, - "options":{"select":["id","email"],"order_by":["created_at DESC"],"limit":25} - }' +# View detailed logs +journalctl -u debros-node-bootstrap -n 100 -# Select (fluent builder via JSON) -curl -X POST "$GW/v1/rqlite/select" \ - -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{ - "table":"orders o", - "select":["o.id","o.total","u.email AS user_email"], - "joins":[{"kind":"INNER","table":"users u","on":"u.id = o.user_id"}], - "where":[{"conj":"AND","expr":"o.total > ?","args":[100]}], - "order_by":["o.created_at DESC"], - "limit":10 - }' - -# Transaction (atomic batch) -curl -X POST "$GW/v1/rqlite/transaction" \ - -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{ - "return_results": true, - "ops": [ - {"kind":"exec","sql":"INSERT INTO users(email) VALUES(?)","args":["bob@example.com"]}, - {"kind":"query","sql":"SELECT last_insert_rowid() AS id","args":[]} - ] - }' - -# Schema -curl "$GW/v1/rqlite/schema" -H "Authorization: Bearer $API_KEY" - -# DDL helpers -curl -X POST "$GW/v1/rqlite/create-table" -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{"schema":"CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)"}' -curl -X POST "$GW/v1/rqlite/drop-table" -H "Authorization: Bearer $API_KEY" -H 'Content-Type: application/json' \ - -d '{"table":"users"}' +# Check log files +tail -f /home/debros/.debros/logs/node-bootstrap.log ``` -### Authentication - -The CLI features an enhanced authentication system with explicit command support and automatic wallet detection: - -#### Explicit Authentication Commands - -Use the `auth` command to manage your credentials: +#### Configuration Issues ```bash -# Authenticate with your wallet (opens browser for signature) -./bin/network-cli auth login +# Verify configs exist +ls -la /home/debros/.debros/configs/ -# Check if you're authenticated -./bin/network-cli auth whoami - -# View detailed authentication info -./bin/network-cli auth status - -# Clear all stored credentials -./bin/network-cli auth logout +# Regenerate configs (preserves secrets) +sudo dbn prod upgrade --restart ``` -Credentials are stored securely in `~/.debros/credentials.json` with restricted file permissions (readable only by owner). +#### IPFS AutoConf Errors -#### Key Features - -- **Explicit Authentication:** Use `auth login` command to authenticate with your wallet -- **Automatic Authentication:** Commands that require auth (query, pubsub, etc.) automatically prompt if needed -- **Multi-Wallet Management:** Seamlessly switch between multiple wallet credentials -- **Persistent Sessions:** Wallet credentials are automatically saved and restored between sessions -- **Enhanced User Experience:** Streamlined authentication flow with better error handling and user feedback - -#### Automatic Authentication Flow - -When using operations that require authentication (query, pubsub publish/subscribe), the CLI will automatically: - -1. Check for existing valid credentials -2. Prompt for wallet authentication if needed -3. Handle signature verification -4. Persist credentials for future use - -**Example with automatic authentication:** +If you see "AutoConf.Enabled=false but 'auto' placeholder is used" errors, the upgrade process should fix this automatically. If not: ```bash -# First time - will prompt for wallet authentication when needed -./bin/network-cli pubsub publish notifications "Hello World" +# Re-run upgrade to fix IPFS config +sudo dbn prod upgrade --restart ``` -#### Environment Variables - -You can override the gateway URL used for authentication: +#### Port Conflicts ```bash -export DEBROS_GATEWAY_URL="http://localhost:6001" -./bin/network-cli auth login +# Check what's using ports +sudo lsof -i :4001 # P2P port +sudo lsof -i :5001 # RQLite HTTP +sudo lsof -i :6001 # Gateway ``` ---- +#### Reset Installation + +To start fresh (⚠️ **destroys all data**): + +```bash +sudo dbn prod uninstall +sudo rm -rf /home/debros/.debros +sudo dbn prod install --bootstrap --branch nightly +``` + +## Components & Ports + +- **Bootstrap node**: P2P `4001`, RQLite HTTP `5001`, Raft `7001` +- **Additional nodes** (`node2`, `node3`): Incrementing ports (`400{2,3}`, `500{2,3}`, `700{2,3}`) +- **Gateway**: HTTP `6001` exposes REST/WebSocket APIs +- **Data directory**: `~/.debros/` stores configs, identities, and RQLite data + +Use `make dev` for the complete stack or run binaries individually with `go run ./cmd/node --config ` and `go run ./cmd/gateway --config gateway.yaml`. + +## Configuration Cheatsheet + +All runtime configuration lives in `~/.debros/`. + +- `bootstrap.yaml`: `type: bootstrap`, optionally set `database.rqlite_join_address` to join another bootstrap's cluster +- `node*.yaml`: `type: node`, set `database.rqlite_join_address` (e.g. `localhost:7001`) and include the bootstrap `discovery.bootstrap_peers` +- `gateway.yaml`: configure `gateway.bootstrap_peers`, `gateway.namespace`, and optional auth flags + +Validation reminders: + +- HTTP and Raft ports must differ +- Non-bootstrap nodes require a join address and bootstrap peers +- Bootstrap nodes can optionally define a join address to synchronize with another bootstrap +- Multiaddrs must end with `/p2p/` + +Regenerate configs any time with `./bin/dbn config init --force`. + +## CLI Highlights + +All commands accept `--format json`, `--timeout `, and `--bootstrap `. + +- **Auth** + + ```bash + ./bin/dbn auth login + ./bin/dbn auth status + ./bin/dbn auth logout + ``` + +- **Network** + + ```bash + ./bin/dbn health + ./bin/dbn status + ./bin/dbn peers + ``` + +- **Database** + + ```bash + ./bin/dbn query "SELECT * FROM users" + ./bin/dbn query "CREATE TABLE users (id INTEGER PRIMARY KEY)" + ./bin/dbn transaction --file ops.json + ``` + +- **Pub/Sub** + + ```bash + ./bin/dbn pubsub publish + ./bin/dbn pubsub subscribe 30s + ./bin/dbn pubsub topics + ``` + +Credentials live at `~/.debros/credentials.json` with user-only permissions. ## HTTP Gateway -The DeBros Network includes a powerful HTTP/WebSocket gateway that provides a modern REST API and WebSocket interface over the P2P network, featuring an enhanced authentication system with multi-wallet support. +Start locally with `make run-gateway` or `go run ./cmd/gateway --config gateway.yaml`. -### Quick Start +Environment overrides: ```bash -make run-gateway -# Or manually: -go run ./cmd/gateway -``` - -### Configuration - -The gateway can be configured via configs/gateway.yaml and environment variables (env override YAML): - -```bash -# Basic Configuration export GATEWAY_ADDR="0.0.0.0:6001" export GATEWAY_NAMESPACE="my-app" -export GATEWAY_BOOTSTRAP_PEERS="/ip4/127.0.0.1/tcp/4001/p2p/YOUR_PEER_ID" - -# Authentication Configuration +export GATEWAY_BOOTSTRAP_PEERS="/ip4/localhost/tcp/4001/p2p/" export GATEWAY_REQUIRE_AUTH=true export GATEWAY_API_KEYS="key1:namespace1,key2:namespace2" ``` -### Enhanced Authentication System +Common endpoints (see `openapi/gateway.yaml` for the full spec): -The gateway features a significantly improved authentication system with the following capabilities: - -#### Key Features - -- **Automatic Authentication:** No manual auth commands required - authentication happens automatically when needed -- **Multi-Wallet Support:** Seamlessly manage multiple wallet credentials with automatic switching -- **Persistent Sessions:** Wallet credentials are automatically saved and restored -- **Enhanced User Experience:** Streamlined authentication flow with better error handling - -#### Authentication Methods - -**Wallet-Based Authentication (Ethereum EIP-191)** - -- Uses `personal_sign` for secure wallet verification -- Supports multiple wallets with automatic detection -- Addresses are case-insensitive with normalized signature handling - -**JWT Tokens** - -- Issued by the gateway with configurable expiration -- JWKS endpoints available at `/v1/auth/jwks` and `/.well-known/jwks.json` -- Automatic refresh capability - -**API Keys** - -- Support for pre-configured API keys via `Authorization: Bearer ` or `X-API-Key` headers -- Optional namespace mapping for multi-tenant applications - -### API Endpoints - -#### Health & Status - -```http -GET /health # Basic health check -GET /v1/health # Detailed health status -GET /v1/status # Network status -GET /v1/version # Version information -``` - -#### Authentication (Public Endpoints) - -```http -POST /v1/auth/challenge # Generate wallet challenge -POST /v1/auth/verify # Verify wallet signature -POST /v1/auth/register # Register new wallet -POST /v1/auth/refresh # Refresh JWT token -POST /v1/auth/logout # Clear authentication -GET /v1/auth/whoami # Current auth status -POST /v1/auth/api-key # Generate API key (authenticated) -``` - -#### RQLite HTTP ORM Gateway (/v1/db) - -The gateway now exposes a full HTTP interface over the Go ORM-like client (see `pkg/rqlite/gateway.go`) so you can build SDKs in any language. - -- Base path: `/v1/db` -- Endpoints: - - `POST /v1/rqlite/exec` — Execute write/DDL SQL; returns `{ rows_affected, last_insert_id }` - - `POST /v1/rqlite/find` — Map-based criteria; returns `{ items: [...], count: N }` - - `POST /v1/rqlite/find-one` — Single row; 404 if not found - - `POST /v1/rqlite/select` — Fluent SELECT via JSON (joins, where, order, group, limit, offset) - - `POST /v1/rqlite/transaction` — Atomic batch of exec/query ops, optional per-op results - - `POST /v1/rqlite/query` — Arbitrary SELECT (legacy-friendly), returns `items` - - `GET /v1/rqlite/schema` — List user tables/views + create SQL - - `POST /v1/rqlite/create-table` — Convenience for DDL - - `POST /v1/rqlite/drop-table` — Safe drop (identifier validated) - -Payload examples are shown in the [Database Operations (Gateway REST)](#database-operations-gateway-rest) section. - -#### Network Operations - -```http -GET /v1/network/status # Network status -GET /v1/network/peers # Connected peers -POST /v1/network/connect # Connect to peer -POST /v1/network/disconnect # Disconnect from peer -``` - -#### Pub/Sub Messaging - -**WebSocket Interface** - -```http -GET /v1/pubsub/ws?topic= # WebSocket connection for real-time messaging -``` - -**REST Interface** - -```http -POST /v1/pubsub/publish # Publish message to topic -GET /v1/pubsub/topics # List active topics -``` - ---- - -## SDK Authoring Guide - -### Base concepts - -- OpenAPI: a machine-readable spec is available at `openapi/gateway.yaml` for SDK code generation. -- **Auth**: send `X-API-Key: ` or `Authorization: Bearer ` with every request. -- **Versioning**: all endpoints are under `/v1/`. -- **Responses**: mutations return `{status:"ok"}`; queries/lists return JSON; errors return `{ "error": "message" }` with proper HTTP status. - -### Key HTTP endpoints for SDKs - -- **Database** - - Exec: `POST /v1/rqlite/exec` `{sql, args?}` → `{rows_affected,last_insert_id}` - - Find: `POST /v1/rqlite/find` `{table, criteria, options?}` → `{items,count}` - - FindOne: `POST /v1/rqlite/find-one` `{table, criteria, options?}` → single object or 404 - - Select: `POST /v1/rqlite/select` `{table, select?, joins?, where?, order_by?, group_by?, limit?, offset?, one?}` - - Transaction: `POST /v1/rqlite/transaction` `{ops:[{kind,sql,args?}], return_results?}` - - Query: `POST /v1/rqlite/query` `{sql, args?}` → `{items,count}` - - Schema: `GET /v1/rqlite/schema` - - Create Table: `POST /v1/rqlite/create-table` `{schema}` - - Drop Table: `POST /v1/rqlite/drop-table` `{table}` -- **PubSub** - - WS Subscribe: `GET /v1/pubsub/ws?topic=` - - Publish: `POST /v1/pubsub/publish` `{topic, data_base64}` → `{status:"ok"}` - - Topics: `GET /v1/pubsub/topics` → `{topics:[...]}` - ---- +- `GET /health`, `GET /v1/status`, `GET /v1/version` +- `POST /v1/auth/challenge`, `POST /v1/auth/verify`, `POST /v1/auth/refresh` +- `POST /v1/rqlite/exec`, `POST /v1/rqlite/find`, `POST /v1/rqlite/select`, `POST /v1/rqlite/transaction` +- `GET /v1/rqlite/schema` +- `POST /v1/pubsub/publish`, `GET /v1/pubsub/topics`, `GET /v1/pubsub/ws?topic=` +- `POST /v1/storage/upload`, `POST /v1/storage/pin`, `GET /v1/storage/status/:cid`, `GET /v1/storage/get/:cid`, `DELETE /v1/storage/unpin/:cid` ## Troubleshooting -### Configuration & Permissions +- **Config directory errors**: Ensure `~/.debros/` exists, is writable, and has free disk space (`touch ~/.debros/test && rm ~/.debros/test`). +- **Port conflicts**: Inspect with `lsof -i :4001` (or other ports) and stop conflicting processes or regenerate configs with new ports. +- **Missing configs**: Run `./bin/dbn config init` before starting nodes. +- **Cluster join issues**: Confirm the bootstrap node is running, `peer.info` multiaddr matches `bootstrap_peers`, and firewall rules allow the P2P ports. -**Error: "Failed to create/access config directory"** +## Resources -This happens when DeBros cannot access or create `~/.debros/` directory. - -**Causes:** -1. Home directory is not writable -2. Home directory doesn't exist -3. Filesystem is read-only (sandboxed/containerized environment) -4. Permission denied (running with wrong user/umask) - -**Solutions:** - -```bash -# Check home directory exists and is writable -ls -ld ~ -touch ~/test-write && rm ~/test-write - -# Check umask (should be 0022 or 0002) -umask - -# If umask is too restrictive, change it -umask 0022 - -# Check disk space -df -h ~ - -# For containerized environments, ensure /home/ is mounted with write permissions -docker run -v /home:/home --user $(id -u):$(id -g) debros-network -``` - -**Error: "Config file not found at ~/.debros/node.yaml"** - -The node requires a config file to exist before starting. - -**Solution:** - -Generate config files first: - -```bash -# Build CLI -make build - -# Generate configs -./bin/network-cli config init --type bootstrap -./bin/network-cli config init --type node --bootstrap-peers '' -./bin/network-cli config init --type gateway -``` - -### Node Startup Issues - -**Error: "node.data_dir: parent directory not writable"** - -The data directory parent is not accessible. - -**Solution:** - -Ensure `~/.debros` is writable and has at least 10GB free space: - -```bash -# Check permissions -ls -ld ~/.debros - -# Check available space -df -h ~/.debros - -# Recreate if corrupted -rm -rf ~/.debros -./bin/network-cli config init --type bootstrap -``` - -**Error: "failed to create data directory"** - -The node cannot create its data directory in `~/.debros`. - -**Causes:** -1. `~/.debros` is not writable -2. Parent directory path in config uses `~` which isn't expanded properly -3. Disk is full - -**Solutions:** - -```bash -# Check ~/.debros exists and is writable -mkdir -p ~/.debros -ls -ld ~/.debros - -# Verify data_dir in config uses ~ (e.g., ~/.debros/node) -cat ~/.debros/node.yaml | grep data_dir - -# Check disk space -df -h ~ - -# Ensure user owns ~/.debros -chown -R $(whoami) ~/.debros - -# Retry node startup -make run-node -``` - -**Error: "stat ~/.debros: no such file or directory"** - -**Port Already in Use** - -If you get "address already in use" errors: - -```bash -# Find processes using ports -lsof -i :4001 # P2P port -lsof -i :5001 # RQLite HTTP -lsof -i :7001 # RQLite Raft - -# Kill if needed -kill -9 - -# Or use different ports in config -./bin/network-cli config init --type node --listen-port 4002 --rqlite-http-port 5002 --rqlite-raft-port 7002 -``` - -### Common Configuration Errors - -**Error: "discovery.bootstrap_peers: required for node type"** - -Nodes (non-bootstrap) must specify bootstrap peers to discover the network. - -**Solution:** - -Generate node config with bootstrap peers: - -```bash -./bin/network-cli config init --type node --bootstrap-peers '/ip4/127.0.0.1/tcp/4001/p2p/12D3KooW...' -``` - -**Error: "database.rqlite_join_address: required for node type"** - -Non-bootstrap nodes must specify which node to join in the Raft cluster. - -**Solution:** - -Generate config with join address: - -```bash -./bin/network-cli config init --type node --join localhost:5001 -``` - -**Error: "database.rqlite_raft_port: must differ from database.rqlite_port"** - -HTTP and Raft ports cannot be the same. - -**Solution:** - -Use different ports (RQLite HTTP and Raft must be on different ports): - -```bash -./bin/network-cli config init --type node \ - --rqlite-http-port 5001 \ - --rqlite-raft-port 7001 -``` - -### Peer Discovery Issues - -If nodes can't find each other: - -1. **Verify bootstrap node is running:** - ```bash - ./bin/network-cli health - ./bin/network-cli peers - ``` - -2. **Check bootstrap peer multiaddr is correct:** - ```bash - cat ~/.debros/bootstrap/peer.info # On bootstrap node - # Should match value in other nodes' discovery.bootstrap_peers - ``` - -3. **Ensure all nodes have same bootstrap peers in config** - -4. **Check firewall/network:** - ```bash - # Verify P2P port is open - nc -zv 127.0.0.1 4001 - ``` - ---- - -## License \ No newline at end of file +- Go modules: `go mod tidy`, `go test ./...` +- Automation: `make build`, `make dev`, `make run-gateway`, `make lint` +- API reference: `openapi/gateway.yaml` +- Code of Conduct: [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) diff --git a/cmd/cli/main.go b/cmd/cli/main.go index e396013..8149812 100644 --- a/cmd/cli/main.go +++ b/cmd/cli/main.go @@ -34,7 +34,7 @@ func main() { switch command { case "version": - fmt.Printf("network-cli %s", version) + fmt.Printf("dbn %s", version) if commit != "" { fmt.Printf(" (commit %s)", commit) } @@ -44,74 +44,18 @@ func main() { fmt.Println() return - // Environment commands - case "env": - cli.HandleEnvCommand(args) - case "devnet", "testnet", "local": - // Shorthand for switching environments - if len(args) > 0 && (args[0] == "enable" || args[0] == "switch") { - if err := cli.SwitchEnvironment(command); err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to switch environment: %v\n", err) - os.Exit(1) - } - env, _ := cli.GetActiveEnvironment() - fmt.Printf("✅ Switched to %s environment\n", command) - if env != nil { - fmt.Printf(" Gateway URL: %s\n", env.GatewayURL) - } - } else { - fmt.Fprintf(os.Stderr, "Usage: network-cli %s enable\n", command) - os.Exit(1) - } + // Development environment commands + case "dev": + cli.HandleDevCommand(args) - // Setup and service commands - case "setup": - cli.HandleSetupCommand(args) - case "service": - cli.HandleServiceCommand(args) + // Production environment commands + case "prod": + cli.HandleProdCommand(args) // Authentication commands case "auth": cli.HandleAuthCommand(args) - // Config commands - case "config": - cli.HandleConfigCommand(args) - - // Basic network commands - case "health": - cli.HandleHealthCommand(format, timeout) - case "peers": - cli.HandlePeersCommand(format, timeout) - case "status": - cli.HandleStatusCommand(format, timeout) - case "peer-id": - cli.HandlePeerIDCommand(format, timeout) - - // Query command - case "query": - if len(args) == 0 { - fmt.Fprintf(os.Stderr, "Usage: network-cli query \n") - os.Exit(1) - } - cli.HandleQueryCommand(args[0], format, timeout) - - // PubSub commands - case "pubsub": - cli.HandlePubSubCommand(args, format, timeout) - - // Connect command - case "connect": - if len(args) == 0 { - fmt.Fprintf(os.Stderr, "Usage: network-cli connect \n") - os.Exit(1) - } - cli.HandleConnectCommand(args[0], timeout) - - // RQLite commands - case "rqlite": - cli.HandleRQLiteCommand(args) - // Help case "help", "--help", "-h": showHelp() @@ -142,69 +86,49 @@ func parseGlobalFlags(args []string) { func showHelp() { fmt.Printf("Network CLI - Distributed P2P Network Management Tool\n\n") - fmt.Printf("Usage: network-cli [args...]\n\n") + fmt.Printf("Usage: dbn [args...]\n\n") - fmt.Printf("🌍 Environment Management:\n") - fmt.Printf(" env list - List available environments\n") - fmt.Printf(" env current - Show current environment\n") - fmt.Printf(" env switch - Switch to environment (local, devnet, testnet)\n") - fmt.Printf(" devnet enable - Shorthand for switching to devnet\n") - fmt.Printf(" testnet enable - Shorthand for switching to testnet\n\n") + fmt.Printf("💻 Local Development:\n") + fmt.Printf(" dev up - Start full local dev environment\n") + fmt.Printf(" dev down - Stop all dev services\n") + fmt.Printf(" dev status - Show status of dev services\n") + fmt.Printf(" dev logs - View dev component logs\n") + fmt.Printf(" dev help - Show dev command help\n\n") - fmt.Printf("🚀 Setup & Services:\n") - fmt.Printf(" setup [--force] - Interactive VPS setup (Linux only, requires root)\n") - fmt.Printf(" service start - Start service (node, gateway, all)\n") - fmt.Printf(" service stop - Stop service\n") - fmt.Printf(" service restart - Restart service\n") - fmt.Printf(" service status [target] - Show service status\n") - fmt.Printf(" service logs [opts] - View service logs (--follow, --since=1h)\n\n") + fmt.Printf("🚀 Production Deployment:\n") + fmt.Printf(" prod install [--bootstrap] - Full production bootstrap (requires root/sudo)\n") + fmt.Printf(" prod upgrade - Upgrade existing installation\n") + fmt.Printf(" prod status - Show production service status\n") + fmt.Printf(" prod start - Start all production services (requires root/sudo)\n") + fmt.Printf(" prod stop - Stop all production services (requires root/sudo)\n") + fmt.Printf(" prod restart - Restart all production services (requires root/sudo)\n") + fmt.Printf(" prod logs - View production service logs\n") + fmt.Printf(" prod uninstall - Remove production services (requires root/sudo)\n") + fmt.Printf(" prod help - Show prod command help\n\n") fmt.Printf("🔐 Authentication:\n") fmt.Printf(" auth login - Authenticate with wallet\n") fmt.Printf(" auth logout - Clear stored credentials\n") fmt.Printf(" auth whoami - Show current authentication\n") - fmt.Printf(" auth status - Show detailed auth info\n\n") - - fmt.Printf("⚙️ Configuration:\n") - fmt.Printf(" config init [--type ] - Generate configs (full stack or single)\n") - fmt.Printf(" config validate --name - Validate config file\n\n") - - fmt.Printf("🌐 Network Commands:\n") - fmt.Printf(" health - Check network health\n") - fmt.Printf(" peers - List connected peers\n") - fmt.Printf(" status - Show network status\n") - fmt.Printf(" peer-id - Show this node's peer ID\n") - fmt.Printf(" connect - Connect to peer\n\n") - - fmt.Printf("🗄️ Database:\n") - fmt.Printf(" query 🔐 Execute database query\n\n") - - fmt.Printf("🔧 RQLite:\n") - fmt.Printf(" rqlite fix 🔧 Fix misconfigured join address and clean raft state\n\n") - - fmt.Printf("📡 PubSub:\n") - fmt.Printf(" pubsub publish 🔐 Publish message\n") - fmt.Printf(" pubsub subscribe 🔐 Subscribe to topic\n") - fmt.Printf(" pubsub topics 🔐 List topics\n\n") + fmt.Printf(" auth status - Show detailed auth info\n") + fmt.Printf(" auth help - Show auth command help\n\n") fmt.Printf("Global Flags:\n") fmt.Printf(" -f, --format - Output format: table, json (default: table)\n") - fmt.Printf(" -t, --timeout - Operation timeout (default: 30s)\n\n") - - fmt.Printf("🔐 = Requires authentication (auto-prompts if needed)\n\n") + fmt.Printf(" -t, --timeout - Operation timeout (default: 30s)\n") + fmt.Printf(" --help, -h - Show this help message\n\n") fmt.Printf("Examples:\n") - fmt.Printf(" # Switch to devnet\n") - fmt.Printf(" network-cli devnet enable\n\n") + fmt.Printf(" # Authenticate\n") + fmt.Printf(" dbn auth login\n\n") - fmt.Printf(" # Authenticate and query\n") - fmt.Printf(" network-cli auth login\n") - fmt.Printf(" network-cli query \"SELECT * FROM users LIMIT 10\"\n\n") + fmt.Printf(" # Start local dev environment\n") + fmt.Printf(" dbn dev up\n") + fmt.Printf(" dbn dev status\n\n") - fmt.Printf(" # Setup VPS (Linux only)\n") - fmt.Printf(" sudo network-cli setup\n\n") - - fmt.Printf(" # Manage services\n") - fmt.Printf(" sudo network-cli service status all\n") - fmt.Printf(" sudo network-cli service logs node --follow\n") + fmt.Printf(" # Production deployment (requires root/sudo)\n") + fmt.Printf(" sudo dbn prod install --bootstrap\n") + fmt.Printf(" sudo dbn prod upgrade\n") + fmt.Printf(" dbn prod status\n") + fmt.Printf(" dbn prod logs node --follow\n") } diff --git a/cmd/gateway/config.go b/cmd/gateway/config.go index e10763c..1f76866 100644 --- a/cmd/gateway/config.go +++ b/cmd/gateway/config.go @@ -1,10 +1,12 @@ package main import ( + "flag" "fmt" "os" "path/filepath" "strings" + "time" "github.com/DeBrosOfficial/network/pkg/config" "github.com/DeBrosOfficial/network/pkg/gateway" @@ -39,24 +41,52 @@ func getEnvBoolDefault(key string, def bool) bool { } // parseGatewayConfig loads gateway.yaml from ~/.debros exclusively. +// It accepts an optional --config flag for absolute paths (used by systemd services). func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { + // Parse --config flag (optional, for systemd services that pass absolute paths) + configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.debros)") + flag.Parse() + // Determine config path - configPath, err := config.DefaultPath("gateway.yaml") - if err != nil { - logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err)) - fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err) - os.Exit(1) + var configPath string + var err error + if *configFlag != "" { + // If --config flag is provided, use it (handles both absolute and relative paths) + if filepath.IsAbs(*configFlag) { + configPath = *configFlag + } else { + configPath, err = config.DefaultPath(*configFlag) + if err != nil { + logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err)) + fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err) + os.Exit(1) + } + } + } else { + // Default behavior: look for gateway.yaml in ~/.debros/data/, ~/.debros/configs/, or ~/.debros/ + configPath, err = config.DefaultPath("gateway.yaml") + if err != nil { + logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err)) + fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err) + os.Exit(1) + } } // Load YAML type yamlCfg struct { - ListenAddr string `yaml:"listen_addr"` - ClientNamespace string `yaml:"client_namespace"` - RQLiteDSN string `yaml:"rqlite_dsn"` - BootstrapPeers []string `yaml:"bootstrap_peers"` - EnableHTTPS bool `yaml:"enable_https"` - DomainName string `yaml:"domain_name"` - TLSCacheDir string `yaml:"tls_cache_dir"` + ListenAddr string `yaml:"listen_addr"` + ClientNamespace string `yaml:"client_namespace"` + RQLiteDSN string `yaml:"rqlite_dsn"` + BootstrapPeers []string `yaml:"bootstrap_peers"` + EnableHTTPS bool `yaml:"enable_https"` + DomainName string `yaml:"domain_name"` + TLSCacheDir string `yaml:"tls_cache_dir"` + OlricServers []string `yaml:"olric_servers"` + OlricTimeout string `yaml:"olric_timeout"` + IPFSClusterAPIURL string `yaml:"ipfs_cluster_api_url"` + IPFSAPIURL string `yaml:"ipfs_api_url"` + IPFSTimeout string `yaml:"ipfs_timeout"` + IPFSReplicationFactor int `yaml:"ipfs_replication_factor"` } data, err := os.ReadFile(configPath) @@ -65,7 +95,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { zap.String("path", configPath), zap.Error(err)) fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath) - fmt.Fprintf(os.Stderr, "Generate it using: network-cli config init --type gateway\n") + fmt.Fprintf(os.Stderr, "Generate it using: dbn config init --type gateway\n") os.Exit(1) } @@ -79,13 +109,19 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { // Build config from YAML cfg := &gateway.Config{ - ListenAddr: ":6001", - ClientNamespace: "default", - BootstrapPeers: nil, - RQLiteDSN: "", - EnableHTTPS: false, - DomainName: "", - TLSCacheDir: "", + ListenAddr: ":6001", + ClientNamespace: "default", + BootstrapPeers: nil, + RQLiteDSN: "", + EnableHTTPS: false, + DomainName: "", + TLSCacheDir: "", + OlricServers: nil, + OlricTimeout: 0, + IPFSClusterAPIURL: "", + IPFSAPIURL: "", + IPFSTimeout: 0, + IPFSReplicationFactor: 0, } if v := strings.TrimSpace(y.ListenAddr); v != "" { @@ -125,6 +161,36 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config { } } + // Olric configuration + if len(y.OlricServers) > 0 { + cfg.OlricServers = y.OlricServers + } + if v := strings.TrimSpace(y.OlricTimeout); v != "" { + if parsed, err := time.ParseDuration(v); err == nil { + cfg.OlricTimeout = parsed + } else { + logger.ComponentWarn(logging.ComponentGeneral, "invalid olric_timeout, using default", zap.String("value", v), zap.Error(err)) + } + } + + // IPFS configuration + if v := strings.TrimSpace(y.IPFSClusterAPIURL); v != "" { + cfg.IPFSClusterAPIURL = v + } + if v := strings.TrimSpace(y.IPFSAPIURL); v != "" { + cfg.IPFSAPIURL = v + } + if v := strings.TrimSpace(y.IPFSTimeout); v != "" { + if parsed, err := time.ParseDuration(v); err == nil { + cfg.IPFSTimeout = parsed + } else { + logger.ComponentWarn(logging.ComponentGeneral, "invalid ipfs_timeout, using default", zap.String("value", v), zap.Error(err)) + } + } + if y.IPFSReplicationFactor > 0 { + cfg.IPFSReplicationFactor = y.IPFSReplicationFactor + } + // Validate configuration if errs := cfg.ValidateConfig(); len(errs) > 0 { fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs)) diff --git a/cmd/node/main.go b/cmd/node/main.go index 5d469b1..136af61 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "net" "os" "os/signal" "path/filepath" @@ -66,23 +67,32 @@ func check_if_should_open_help(help *bool) { func select_data_dir_check(configName *string) { logger := setup_logger(logging.ComponentNode) - // Ensure config directory exists and is writable - _, err := config.EnsureConfigDir() - if err != nil { - logger.Error("Failed to ensure config directory", zap.Error(err)) - fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n") - fmt.Fprintf(os.Stderr, "Failed to create/access config directory: %v\n", err) - fmt.Fprintf(os.Stderr, "\nPlease ensure:\n") - fmt.Fprintf(os.Stderr, " 1. Home directory is accessible: %s\n", os.ExpandEnv("~")) - fmt.Fprintf(os.Stderr, " 2. You have write permissions to home directory\n") - fmt.Fprintf(os.Stderr, " 3. Disk space is available\n") - os.Exit(1) - } + var configPath string + var err error - configPath, err := config.DefaultPath(*configName) - if err != nil { - logger.Error("Failed to determine config path", zap.Error(err)) - os.Exit(1) + // Check if configName is an absolute path + if filepath.IsAbs(*configName) { + // Use absolute path directly + configPath = *configName + } else { + // Ensure config directory exists and is writable + _, err = config.EnsureConfigDir() + if err != nil { + logger.Error("Failed to ensure config directory", zap.Error(err)) + fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n") + fmt.Fprintf(os.Stderr, "Failed to create/access config directory: %v\n", err) + fmt.Fprintf(os.Stderr, "\nPlease ensure:\n") + fmt.Fprintf(os.Stderr, " 1. Home directory is accessible: %s\n", os.ExpandEnv("~")) + fmt.Fprintf(os.Stderr, " 2. You have write permissions to home directory\n") + fmt.Fprintf(os.Stderr, " 3. Disk space is available\n") + os.Exit(1) + } + + configPath, err = config.DefaultPath(*configName) + if err != nil { + logger.Error("Failed to determine config path", zap.Error(err)) + os.Exit(1) + } } if _, err := os.Stat(configPath); err != nil { @@ -92,8 +102,8 @@ func select_data_dir_check(configName *string) { fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n") fmt.Fprintf(os.Stderr, "Config file not found at %s\n", configPath) fmt.Fprintf(os.Stderr, "\nGenerate it with one of:\n") - fmt.Fprintf(os.Stderr, " network-cli config init --type bootstrap\n") - fmt.Fprintf(os.Stderr, " network-cli config init --type node --bootstrap-peers ''\n") + fmt.Fprintf(os.Stderr, " dbn config init --type bootstrap\n") + fmt.Fprintf(os.Stderr, " dbn config init --type node --bootstrap-peers ''\n") os.Exit(1) } } @@ -128,7 +138,26 @@ func startNode(ctx context.Context, cfg *config.Config, port int) error { // Save the peer ID to a file for CLI access (especially useful for bootstrap) peerID := n.GetPeerID() peerInfoFile := filepath.Join(dataDir, "peer.info") - peerMultiaddr := fmt.Sprintf("/ip4/0.0.0.0/tcp/%d/p2p/%s", port, peerID) + + // Extract advertise IP from config (prefer http_adv_address, fallback to raft_adv_address) + advertiseIP := "0.0.0.0" // Default fallback + if cfg.Discovery.HttpAdvAddress != "" { + if host, _, err := net.SplitHostPort(cfg.Discovery.HttpAdvAddress); err == nil && host != "" && host != "localhost" { + advertiseIP = host + } + } else if cfg.Discovery.RaftAdvAddress != "" { + if host, _, err := net.SplitHostPort(cfg.Discovery.RaftAdvAddress); err == nil && host != "" && host != "localhost" { + advertiseIP = host + } + } + + // Determine IP protocol (IPv4 or IPv6) for multiaddr + ipProtocol := "ip4" + if ip := net.ParseIP(advertiseIP); ip != nil && ip.To4() == nil { + ipProtocol = "ip6" + } + + peerMultiaddr := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", ipProtocol, advertiseIP, port, peerID) if err := os.WriteFile(peerInfoFile, []byte(peerMultiaddr), 0644); err != nil { logger.Error("Failed to save peer info: %v", zap.Error(err)) @@ -232,15 +261,24 @@ func main() { check_if_should_open_help(help) - // Check if config file exists + // Check if config file exists and determine path select_data_dir_check(configName) - // Load configuration from ~/.debros/node.yaml - configPath, err := config.DefaultPath(*configName) - if err != nil { - logger.Error("Failed to determine config path", zap.Error(err)) - fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err) - os.Exit(1) + // Determine config path (handle both absolute and relative paths) + // Note: select_data_dir_check already validated the path exists, so we can safely determine it here + var configPath string + var err error + if filepath.IsAbs(*configName) { + // Absolute path passed directly (e.g., from systemd service) + configPath = *configName + } else { + // Relative path - use DefaultPath which checks both ~/.debros/configs/ and ~/.debros/ + configPath, err = config.DefaultPath(*configName) + if err != nil { + logger.Error("Failed to determine config path", zap.Error(err)) + fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err) + os.Exit(1) + } } var cfg *config.Config @@ -255,10 +293,10 @@ func main() { // Set default advertised addresses if empty if cfg.Discovery.HttpAdvAddress == "" { - cfg.Discovery.HttpAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLitePort) + cfg.Discovery.HttpAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLitePort) } if cfg.Discovery.RaftAdvAddress == "" { - cfg.Discovery.RaftAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLiteRaftPort) + cfg.Discovery.RaftAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLiteRaftPort) } // Validate configuration diff --git a/e2e/auth_negative_test.go b/e2e/auth_negative_test.go new file mode 100644 index 0000000..130dc63 --- /dev/null +++ b/e2e/auth_negative_test.go @@ -0,0 +1,294 @@ +//go:build e2e + +package e2e + +import ( + "context" + "net/http" + "testing" + "time" + "unicode" +) + +func TestAuth_MissingAPIKey(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request without auth headers + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/network/status", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for missing auth, got %d (auth may not be enforced on this endpoint)", resp.StatusCode) + } +} + +func TestAuth_InvalidAPIKey(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with invalid API key + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Authorization", "Bearer invalid-key-xyz") + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for invalid key, got %d", resp.StatusCode) + } +} + +func TestAuth_CacheWithoutAuth(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request cache endpoint without auth + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/cache/health", + SkipAuth: true, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should fail with 401 or 403 + if status != http.StatusUnauthorized && status != http.StatusForbidden { + t.Logf("warning: expected 401/403 for cache without auth, got %d", status) + } +} + +func TestAuth_StorageWithoutAuth(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request storage endpoint without auth + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/storage/status/QmTest", + SkipAuth: true, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should fail with 401 or 403 + if status != http.StatusUnauthorized && status != http.StatusForbidden { + t.Logf("warning: expected 401/403 for storage without auth, got %d", status) + } +} + +func TestAuth_RQLiteWithoutAuth(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request rqlite endpoint without auth + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/rqlite/schema", + SkipAuth: true, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should fail with 401 or 403 + if status != http.StatusUnauthorized && status != http.StatusForbidden { + t.Logf("warning: expected 401/403 for rqlite without auth, got %d", status) + } +} + +func TestAuth_MalformedBearerToken(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with malformed bearer token + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + // Missing "Bearer " prefix + req.Header.Set("Authorization", "invalid-token-format") + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for malformed token, got %d", resp.StatusCode) + } +} + +func TestAuth_ExpiredJWT(t *testing.T) { + // Skip if JWT is not being used + if GetJWT() == "" && GetAPIKey() == "" { + t.Skip("No JWT or API key configured") + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // This test would require an expired JWT token + // For now, test with a clearly invalid JWT structure + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Authorization", "Bearer expired.jwt.token") + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for expired JWT, got %d", resp.StatusCode) + } +} + +func TestAuth_EmptyBearerToken(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with empty bearer token + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Authorization", "Bearer ") + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for empty token, got %d", resp.StatusCode) + } +} + +func TestAuth_DuplicateAuthHeaders(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with both API key and invalid JWT + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/cache/health", + Headers: map[string]string{ + "Authorization": "Bearer " + GetAPIKey(), + "X-API-Key": GetAPIKey(), + }, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should succeed if API key is valid + if status != http.StatusOK { + t.Logf("request with both headers returned %d", status) + } +} + +func TestAuth_CaseSensitiveAPIKey(t *testing.T) { + if GetAPIKey() == "" { + t.Skip("No API key configured") + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with incorrectly cased API key + apiKey := GetAPIKey() + incorrectKey := "" + for i, ch := range apiKey { + if i%2 == 0 && unicode.IsLetter(ch) { + incorrectKey += string(unicode.ToUpper(ch)) // Convert to uppercase + } else { + incorrectKey += string(ch) + } + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Authorization", "Bearer "+incorrectKey) + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // API keys should be case-sensitive + if resp.StatusCode == http.StatusOK { + t.Logf("warning: API key check may not be case-sensitive (got 200)") + } +} + +func TestAuth_HealthEndpointNoAuth(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Health endpoint at /health should not require auth + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should succeed without auth + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected 200 for /health without auth, got %d", resp.StatusCode) + } +} diff --git a/e2e/cache_http_test.go b/e2e/cache_http_test.go new file mode 100644 index 0000000..6f4a3ed --- /dev/null +++ b/e2e/cache_http_test.go @@ -0,0 +1,511 @@ +//go:build e2e + +package e2e + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" +) + +func TestCache_Health(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/cache/health", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("health check failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["status"] != "ok" { + t.Fatalf("expected status 'ok', got %v", resp["status"]) + } + + if resp["service"] != "olric" { + t.Fatalf("expected service 'olric', got %v", resp["service"]) + } +} + +func TestCache_PutGet(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "test-key" + value := "test-value" + + // Put value + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + body, status, err := putReq.Do(ctx) + if err != nil { + t.Fatalf("put failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body)) + } + + // Get value + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + body, status, err = getReq.Do(ctx) + if err != nil { + t.Fatalf("get failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var getResp map[string]interface{} + if err := DecodeJSON(body, &getResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if getResp["value"] != value { + t.Fatalf("expected value %q, got %v", value, getResp["value"]) + } +} + +func TestCache_PutGetJSON(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "json-key" + jsonValue := map[string]interface{}{ + "name": "John", + "age": 30, + "tags": []string{"developer", "golang"}, + } + + // Put JSON value + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": jsonValue, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil { + t.Fatalf("put failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + // Get JSON value + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + body, status, err := getReq.Do(ctx) + if err != nil { + t.Fatalf("get failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var getResp map[string]interface{} + if err := DecodeJSON(body, &getResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + retrievedValue := getResp["value"].(map[string]interface{}) + if retrievedValue["name"] != jsonValue["name"] { + t.Fatalf("expected name %q, got %v", jsonValue["name"], retrievedValue["name"]) + } + if retrievedValue["age"] != float64(30) { + t.Fatalf("expected age 30, got %v", retrievedValue["age"]) + } +} + +func TestCache_Delete(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "delete-key" + value := "delete-value" + + // Put value + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + + // Delete value + deleteReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/delete", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = deleteReq.Do(ctx) + if err != nil { + t.Fatalf("delete failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + // Verify deletion + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = getReq.Do(ctx) + // Should get 404 for missing key + if status != http.StatusNotFound { + t.Fatalf("expected status 404 for deleted key, got %d", status) + } +} + +func TestCache_TTL(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "ttl-key" + value := "ttl-value" + + // Put value with TTL + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + "ttl": "2s", + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil { + t.Fatalf("put with TTL failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + // Verify value exists + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = getReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("get immediately after put failed: status %d, err %v", status, err) + } + + // Wait for TTL expiry (2 seconds + buffer) + Delay(2500) + + // Verify value is expired + _, status, err = getReq.Do(ctx) + if status != http.StatusNotFound { + t.Logf("warning: TTL expiry may not be fully implemented; got status %d", status) + } +} + +func TestCache_Scan(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + + // Put multiple keys + keys := []string{"user-1", "user-2", "session-1", "session-2"} + for _, key := range keys { + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": "value-" + key, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + } + + // Scan all keys + scanReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/scan", + Body: map[string]interface{}{ + "dmap": dmap, + }, + } + + body, status, err := scanReq.Do(ctx) + if err != nil { + t.Fatalf("scan failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var scanResp map[string]interface{} + if err := DecodeJSON(body, &scanResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + keysResp := scanResp["keys"].([]interface{}) + if len(keysResp) < 4 { + t.Fatalf("expected at least 4 keys, got %d", len(keysResp)) + } +} + +func TestCache_ScanWithRegex(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + + // Put keys with different patterns + keys := []string{"user-1", "user-2", "session-1", "session-2"} + for _, key := range keys { + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": "value-" + key, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + } + + // Scan with regex pattern + scanReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/scan", + Body: map[string]interface{}{ + "dmap": dmap, + "pattern": "^user-", + }, + } + + body, status, err := scanReq.Do(ctx) + if err != nil { + t.Fatalf("scan with regex failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var scanResp map[string]interface{} + if err := DecodeJSON(body, &scanResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + keysResp := scanResp["keys"].([]interface{}) + if len(keysResp) < 2 { + t.Fatalf("expected at least 2 keys matching pattern, got %d", len(keysResp)) + } +} + +func TestCache_MultiGet(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + keys := []string{"key-1", "key-2", "key-3"} + + // Put values + for i, key := range keys { + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": fmt.Sprintf("value-%d", i), + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + } + + // Multi-get + multiGetReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/mget", + Body: map[string]interface{}{ + "dmap": dmap, + "keys": keys, + }, + } + + body, status, err := multiGetReq.Do(ctx) + if err != nil { + t.Fatalf("mget failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var mgetResp map[string]interface{} + if err := DecodeJSON(body, &mgetResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + results := mgetResp["results"].([]interface{}) + if len(results) != 3 { + t.Fatalf("expected 3 results, got %d", len(results)) + } +} + +func TestCache_MissingDMap(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": "", + "key": "any-key", + }, + } + + _, status, err := getReq.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + if status != http.StatusBadRequest { + t.Fatalf("expected status 400 for missing dmap, got %d", status) + } +} + +func TestCache_MissingKey(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": "non-existent-key", + }, + } + + _, status, err := getReq.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + if status != http.StatusNotFound { + t.Fatalf("expected status 404 for missing key, got %d", status) + } +} diff --git a/e2e/client_e2e_test.go b/e2e/client_e2e_test.go deleted file mode 100644 index 04962e3..0000000 --- a/e2e/client_e2e_test.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build e2e - -package e2e - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/DeBrosOfficial/network/pkg/client" -) - -func getenv(k, def string) string { - if v := strings.TrimSpace(os.Getenv(k)); v != "" { - return v - } - return def -} - -func requireEnv(t *testing.T, key string) string { - t.Helper() - v := strings.TrimSpace(os.Getenv(key)) - if v == "" { - t.Skipf("%s not set; skipping", key) - } - return v -} - -func TestClient_Database_CreateQueryMigrate(t *testing.T) { - apiKey := requireEnv(t, "GATEWAY_API_KEY") - namespace := getenv("E2E_CLIENT_NAMESPACE", "default") - - cfg := client.DefaultClientConfig(namespace) - cfg.APIKey = apiKey - cfg.QuietMode = true - - if v := strings.TrimSpace(os.Getenv("E2E_BOOTSTRAP_PEERS")); v != "" { - parts := strings.Split(v, ",") - var peers []string - for _, p := range parts { - p = strings.TrimSpace(p) - if p != "" { - peers = append(peers, p) - } - } - cfg.BootstrapPeers = peers - } - if v := strings.TrimSpace(os.Getenv("E2E_RQLITE_NODES")); v != "" { - nodes := strings.Fields(strings.ReplaceAll(v, ",", " ")) - cfg.DatabaseEndpoints = nodes - } - - c, err := client.NewClient(cfg) - if err != nil { - t.Fatalf("new client: %v", err) - } - if err := c.Connect(); err != nil { - t.Fatalf("connect: %v", err) - } - t.Cleanup(func() { _ = c.Disconnect() }) - - // Unique table per run - table := fmt.Sprintf("e2e_items_client_%d", time.Now().UnixNano()) - schema := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)", table) - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - if err := c.Database().CreateTable(ctx, schema); err != nil { - t.Fatalf("create table: %v", err) - } - // Insert via transaction - stmts := []string{ - fmt.Sprintf("INSERT INTO %s(name) VALUES ('alpha')", table), - fmt.Sprintf("INSERT INTO %s(name) VALUES ('beta')", table), - } - ctx2, cancel2 := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel2() - if err := c.Database().Transaction(ctx2, stmts); err != nil { - t.Fatalf("transaction: %v", err) - } - // Query rows - ctx3, cancel3 := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel3() - res, err := c.Database().Query(ctx3, fmt.Sprintf("SELECT name FROM %s ORDER BY id", table)) - if err != nil { - t.Fatalf("query: %v", err) - } - if res.Count < 2 { - t.Fatalf("expected at least 2 rows, got %d", res.Count) - } -} diff --git a/e2e/concurrency_test.go b/e2e/concurrency_test.go new file mode 100644 index 0000000..16342c8 --- /dev/null +++ b/e2e/concurrency_test.go @@ -0,0 +1,503 @@ +//go:build e2e + +package e2e + +import ( + "context" + "fmt" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" +) + +// TestCache_ConcurrentWrites tests concurrent cache writes +func TestCache_ConcurrentWrites(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + dmap := GenerateDMapName() + numGoroutines := 10 + var wg sync.WaitGroup + var errorCount int32 + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + key := fmt.Sprintf("key-%d", idx) + value := fmt.Sprintf("value-%d", idx) + + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + } + }(i) + } + + wg.Wait() + + if errorCount > 0 { + t.Fatalf("expected no errors, got %d", errorCount) + } + + // Verify all values exist + scanReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/scan", + Body: map[string]interface{}{ + "dmap": dmap, + }, + } + + body, status, err := scanReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("scan failed: status %d, err %v", status, err) + } + + var scanResp map[string]interface{} + if err := DecodeJSON(body, &scanResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + keys := scanResp["keys"].([]interface{}) + if len(keys) < numGoroutines { + t.Fatalf("expected at least %d keys, got %d", numGoroutines, len(keys)) + } +} + +// TestCache_ConcurrentReads tests concurrent cache reads +func TestCache_ConcurrentReads(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "shared-key" + value := "shared-value" + + // Put value first + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + + // Read concurrently + numGoroutines := 10 + var wg sync.WaitGroup + var errorCount int32 + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + body, status, err := getReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + return + } + + var getResp map[string]interface{} + if err := DecodeJSON(body, &getResp); err != nil { + atomic.AddInt32(&errorCount, 1) + return + } + + if getResp["value"] != value { + atomic.AddInt32(&errorCount, 1) + } + }() + } + + wg.Wait() + + if errorCount > 0 { + t.Fatalf("expected no errors, got %d", errorCount) + } +} + +// TestCache_ConcurrentDeleteAndWrite tests concurrent delete and write +func TestCache_ConcurrentDeleteAndWrite(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + dmap := GenerateDMapName() + var wg sync.WaitGroup + var errorCount int32 + + numWrites := 5 + numDeletes := 3 + + // Write keys + for i := 0; i < numWrites; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + key := fmt.Sprintf("key-%d", idx) + value := fmt.Sprintf("value-%d", idx) + + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + } + }(i) + } + + wg.Wait() + + // Delete some keys + for i := 0; i < numDeletes; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + key := fmt.Sprintf("key-%d", idx) + + deleteReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/delete", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err := deleteReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + } + }(i) + } + + wg.Wait() + + if errorCount > 0 { + t.Fatalf("expected no errors, got %d", errorCount) + } +} + +// TestRQLite_ConcurrentInserts tests concurrent database inserts +func TestRQLite_ConcurrentInserts(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Insert concurrently + numInserts := 10 + var wg sync.WaitGroup + var errorCount int32 + + for i := 0; i < numInserts; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + txReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": []string{ + fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, idx), + }, + }, + } + + _, status, err := txReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + } + }(i) + } + + wg.Wait() + + if errorCount > 0 { + t.Logf("warning: %d concurrent inserts failed", errorCount) + } + + // Verify count + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("count query failed: status %d, err %v", status, err) + } + + var countResp map[string]interface{} + if err := DecodeJSON(body, &countResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 { + row := rows[0].([]interface{}) + count := int(row[0].(float64)) + if count < numInserts { + t.Logf("warning: expected %d inserts, got %d", numInserts, count) + } + } +} + +// TestRQLite_LargeBatchTransaction tests a large transaction with many statements +func TestRQLite_LargeBatchTransaction(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value TEXT)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Create large batch (100 statements) + var ops []map[string]interface{} + for i := 0; i < 100; i++ { + ops = append(ops, map[string]interface{}{ + "kind": "exec", + "sql": fmt.Sprintf("INSERT INTO %s(value) VALUES ('value-%d')", table, i), + }) + } + + txReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "ops": ops, + }, + } + + _, status, err = txReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("large batch transaction failed: status %d, err %v", status, err) + } + + // Verify count + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("count query failed: status %d, err %v", status, err) + } + + var countResp map[string]interface{} + if err := DecodeJSON(body, &countResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 { + row := rows[0].([]interface{}) + if int(row[0].(float64)) != 100 { + t.Fatalf("expected 100 rows, got %v", row[0]) + } + } +} + +// TestCache_TTLExpiryWithSleep tests TTL expiry with a controlled sleep +func TestCache_TTLExpiryWithSleep(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "ttl-expiry-key" + value := "ttl-expiry-value" + + // Put value with 2 second TTL + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + "ttl": "2s", + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put with TTL failed: status %d, err %v", status, err) + } + + // Verify exists immediately + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = getReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("get immediately after put failed: status %d, err %v", status, err) + } + + // Sleep for TTL duration + buffer + Delay(2500) + + // Try to get after TTL expires + _, status, err = getReq.Do(ctx) + if status == http.StatusOK { + t.Logf("warning: TTL expiry may not be fully implemented; key still exists after TTL") + } +} + +// TestCache_ConcurrentWriteAndDelete tests concurrent writes and deletes on same key +func TestCache_ConcurrentWriteAndDelete(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "contested-key" + + // Alternate between writes and deletes + numIterations := 5 + for i := 0; i < numIterations; i++ { + // Write + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": fmt.Sprintf("value-%d", i), + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed at iteration %d: status %d, err %v", i, status, err) + } + + // Read + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = getReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("get failed at iteration %d: status %d, err %v", i, status, err) + } + + // Delete + deleteReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/delete", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = deleteReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Logf("warning: delete at iteration %d failed: status %d, err %v", i, status, err) + } + } +} diff --git a/e2e/env.go b/e2e/env.go new file mode 100644 index 0000000..abace5d --- /dev/null +++ b/e2e/env.go @@ -0,0 +1,646 @@ +//go:build e2e + +package e2e + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "fmt" + "io" + "math/rand" + "net/http" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/DeBrosOfficial/network/pkg/client" + "github.com/DeBrosOfficial/network/pkg/config" + "github.com/DeBrosOfficial/network/pkg/ipfs" + _ "github.com/mattn/go-sqlite3" + "go.uber.org/zap" + "gopkg.in/yaml.v2" +) + +var ( + gatewayURLCache string + apiKeyCache string + bootstrapCache []string + rqliteCache []string + ipfsClusterCache string + ipfsAPICache string + cacheMutex sync.RWMutex +) + +// loadGatewayConfig loads gateway configuration from ~/.debros/gateway.yaml +func loadGatewayConfig() (map[string]interface{}, error) { + configPath, err := config.DefaultPath("gateway.yaml") + if err != nil { + return nil, fmt.Errorf("failed to get gateway config path: %w", err) + } + + data, err := os.ReadFile(configPath) + if err != nil { + return nil, fmt.Errorf("failed to read gateway config: %w", err) + } + + var cfg map[string]interface{} + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("failed to parse gateway config: %w", err) + } + + return cfg, nil +} + +// loadNodeConfig loads node configuration from ~/.debros/node.yaml or bootstrap.yaml +func loadNodeConfig(filename string) (map[string]interface{}, error) { + configPath, err := config.DefaultPath(filename) + if err != nil { + return nil, fmt.Errorf("failed to get config path: %w", err) + } + + data, err := os.ReadFile(configPath) + if err != nil { + return nil, fmt.Errorf("failed to read config: %w", err) + } + + var cfg map[string]interface{} + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("failed to parse config: %w", err) + } + + return cfg, nil +} + +// GetGatewayURL returns the gateway base URL from config +func GetGatewayURL() string { + cacheMutex.RLock() + if gatewayURLCache != "" { + defer cacheMutex.RUnlock() + return gatewayURLCache + } + cacheMutex.RUnlock() + + // Try to load from gateway config + gwCfg, err := loadGatewayConfig() + if err == nil { + if server, ok := gwCfg["server"].(map[interface{}]interface{}); ok { + if port, ok := server["port"].(int); ok { + url := fmt.Sprintf("http://localhost:%d", port) + cacheMutex.Lock() + gatewayURLCache = url + cacheMutex.Unlock() + return url + } + } + } + + // Default fallback + return "http://localhost:6001" +} + +// GetRQLiteNodes returns rqlite endpoint addresses from config +func GetRQLiteNodes() []string { + cacheMutex.RLock() + if len(rqliteCache) > 0 { + defer cacheMutex.RUnlock() + return rqliteCache + } + cacheMutex.RUnlock() + + // Try bootstrap.yaml first, then all node variants + for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + + if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok { + if rqlitePort, ok := db["rqlite_port"].(int); ok { + nodes := []string{fmt.Sprintf("http://localhost:%d", rqlitePort)} + cacheMutex.Lock() + rqliteCache = nodes + cacheMutex.Unlock() + return nodes + } + } + } + + // Default fallback + return []string{"http://localhost:5001"} +} + +// queryAPIKeyFromRQLite queries the SQLite database directly for an API key +func queryAPIKeyFromRQLite() (string, error) { + // Build database path from bootstrap/node config + homeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("failed to get home directory: %w", err) + } + + // Try bootstrap first, then all nodes + dbPaths := []string{ + filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite"), + filepath.Join(homeDir, ".debros", "bootstrap2", "rqlite", "db.sqlite"), + filepath.Join(homeDir, ".debros", "node2", "rqlite", "db.sqlite"), + filepath.Join(homeDir, ".debros", "node3", "rqlite", "db.sqlite"), + filepath.Join(homeDir, ".debros", "node4", "rqlite", "db.sqlite"), + } + + for _, dbPath := range dbPaths { + // Check if database file exists + if _, err := os.Stat(dbPath); err != nil { + continue + } + + // Open SQLite database + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + continue + } + defer db.Close() + + // Set timeout for connection + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Query the api_keys table + row := db.QueryRowContext(ctx, "SELECT key FROM api_keys ORDER BY id LIMIT 1") + var apiKey string + if err := row.Scan(&apiKey); err != nil { + if err == sql.ErrNoRows { + continue // Try next database + } + continue // Skip this database on error + } + + if apiKey != "" { + return apiKey, nil + } + } + + return "", fmt.Errorf("failed to retrieve API key from any SQLite database") +} + +// GetAPIKey returns the gateway API key from rqlite or cache +func GetAPIKey() string { + cacheMutex.RLock() + if apiKeyCache != "" { + defer cacheMutex.RUnlock() + return apiKeyCache + } + cacheMutex.RUnlock() + + // Query rqlite for API key + apiKey, err := queryAPIKeyFromRQLite() + if err != nil { + return "" + } + + cacheMutex.Lock() + apiKeyCache = apiKey + cacheMutex.Unlock() + + return apiKey +} + +// GetJWT returns the gateway JWT token (currently not auto-discovered) +func GetJWT() string { + return "" +} + +// GetBootstrapPeers returns bootstrap peer addresses from config +func GetBootstrapPeers() []string { + cacheMutex.RLock() + if len(bootstrapCache) > 0 { + defer cacheMutex.RUnlock() + return bootstrapCache + } + cacheMutex.RUnlock() + + configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} + seen := make(map[string]struct{}) + var peers []string + + for _, cfgFile := range configFiles { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + discovery, ok := nodeCfg["discovery"].(map[interface{}]interface{}) + if !ok { + continue + } + rawPeers, ok := discovery["bootstrap_peers"].([]interface{}) + if !ok { + continue + } + for _, v := range rawPeers { + peerStr, ok := v.(string) + if !ok || peerStr == "" { + continue + } + if _, exists := seen[peerStr]; exists { + continue + } + seen[peerStr] = struct{}{} + peers = append(peers, peerStr) + } + } + + if len(peers) == 0 { + return nil + } + + cacheMutex.Lock() + bootstrapCache = peers + cacheMutex.Unlock() + + return peers +} + +// GetIPFSClusterURL returns the IPFS cluster API URL from config +func GetIPFSClusterURL() string { + cacheMutex.RLock() + if ipfsClusterCache != "" { + defer cacheMutex.RUnlock() + return ipfsClusterCache + } + cacheMutex.RUnlock() + + // Try to load from node config + for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + + if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok { + if ipfs, ok := db["ipfs"].(map[interface{}]interface{}); ok { + if url, ok := ipfs["cluster_api_url"].(string); ok && url != "" { + cacheMutex.Lock() + ipfsClusterCache = url + cacheMutex.Unlock() + return url + } + } + } + } + + // Default fallback + return "http://localhost:9094" +} + +// GetIPFSAPIURL returns the IPFS API URL from config +func GetIPFSAPIURL() string { + cacheMutex.RLock() + if ipfsAPICache != "" { + defer cacheMutex.RUnlock() + return ipfsAPICache + } + cacheMutex.RUnlock() + + // Try to load from node config + for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + + if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok { + if ipfs, ok := db["ipfs"].(map[interface{}]interface{}); ok { + if url, ok := ipfs["api_url"].(string); ok && url != "" { + cacheMutex.Lock() + ipfsAPICache = url + cacheMutex.Unlock() + return url + } + } + } + } + + // Default fallback + return "http://localhost:5001" +} + +// GetClientNamespace returns the test client namespace from config +func GetClientNamespace() string { + // Try to load from node config + for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + + if discovery, ok := nodeCfg["discovery"].(map[interface{}]interface{}); ok { + if ns, ok := discovery["node_namespace"].(string); ok && ns != "" { + return ns + } + } + } + + return "default" +} + +// SkipIfMissingGateway skips the test if gateway is not accessible or API key not available +func SkipIfMissingGateway(t *testing.T) { + t.Helper() + apiKey := GetAPIKey() + if apiKey == "" { + t.Skip("API key not available from rqlite; gateway tests skipped") + } + + // Verify gateway is accessible + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/health", nil) + if err != nil { + t.Skip("Gateway not accessible; tests skipped") + return + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Skip("Gateway not accessible; tests skipped") + return + } + resp.Body.Close() +} + +// IsGatewayReady checks if the gateway is accessible and healthy +func IsGatewayReady(ctx context.Context) bool { + gatewayURL := GetGatewayURL() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, gatewayURL+"/v1/health", nil) + if err != nil { + return false + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false + } + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +// NewHTTPClient creates an authenticated HTTP client for gateway requests +func NewHTTPClient(timeout time.Duration) *http.Client { + if timeout == 0 { + timeout = 30 * time.Second + } + return &http.Client{Timeout: timeout} +} + +// HTTPRequest is a helper for making authenticated HTTP requests +type HTTPRequest struct { + Method string + URL string + Body interface{} + Headers map[string]string + Timeout time.Duration + SkipAuth bool +} + +// Do executes an HTTP request and returns the response body +func (hr *HTTPRequest) Do(ctx context.Context) ([]byte, int, error) { + if hr.Timeout == 0 { + hr.Timeout = 30 * time.Second + } + + var reqBody io.Reader + if hr.Body != nil { + data, err := json.Marshal(hr.Body) + if err != nil { + return nil, 0, fmt.Errorf("failed to marshal request body: %w", err) + } + reqBody = bytes.NewReader(data) + } + + req, err := http.NewRequestWithContext(ctx, hr.Method, hr.URL, reqBody) + if err != nil { + return nil, 0, fmt.Errorf("failed to create request: %w", err) + } + + // Add headers + if hr.Headers != nil { + for k, v := range hr.Headers { + req.Header.Set(k, v) + } + } + + // Add JSON content type if body is present + if hr.Body != nil && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "application/json") + } + + // Add auth headers + if !hr.SkipAuth { + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + req.Header.Set("X-API-Key", apiKey) + } + } + + client := NewHTTPClient(hr.Timeout) + resp, err := client.Do(req) + if err != nil { + return nil, 0, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, resp.StatusCode, fmt.Errorf("failed to read response: %w", err) + } + + return respBody, resp.StatusCode, nil +} + +// DecodeJSON unmarshals response body into v +func DecodeJSON(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewNetworkClient creates a network client configured for e2e tests +func NewNetworkClient(t *testing.T) client.NetworkClient { + t.Helper() + + namespace := GetClientNamespace() + cfg := client.DefaultClientConfig(namespace) + cfg.APIKey = GetAPIKey() + cfg.QuietMode = true // Suppress debug logs in tests + + if jwt := GetJWT(); jwt != "" { + cfg.JWT = jwt + } + + if peers := GetBootstrapPeers(); len(peers) > 0 { + cfg.BootstrapPeers = peers + } + + if nodes := GetRQLiteNodes(); len(nodes) > 0 { + cfg.DatabaseEndpoints = nodes + } + + c, err := client.NewClient(cfg) + if err != nil { + t.Fatalf("failed to create network client: %v", err) + } + + return c +} + +// GenerateUniqueID generates a unique identifier for test resources +func GenerateUniqueID(prefix string) string { + return fmt.Sprintf("%s_%d_%d", prefix, time.Now().UnixNano(), rand.Intn(10000)) +} + +// GenerateTableName generates a unique table name for database tests +func GenerateTableName() string { + return GenerateUniqueID("e2e_test") +} + +// GenerateDMapName generates a unique dmap name for cache tests +func GenerateDMapName() string { + return GenerateUniqueID("test_dmap") +} + +// GenerateTopic generates a unique topic name for pubsub tests +func GenerateTopic() string { + return GenerateUniqueID("e2e_topic") +} + +// Delay pauses execution for the specified duration +func Delay(ms int) { + time.Sleep(time.Duration(ms) * time.Millisecond) +} + +// WaitForCondition waits for a condition with exponential backoff +func WaitForCondition(maxWait time.Duration, check func() bool) error { + deadline := time.Now().Add(maxWait) + backoff := 100 * time.Millisecond + + for { + if check() { + return nil + } + if time.Now().After(deadline) { + return fmt.Errorf("condition not met within %v", maxWait) + } + time.Sleep(backoff) + if backoff < 2*time.Second { + backoff = backoff * 2 + } + } +} + +// NewTestLogger creates a test logger for debugging +func NewTestLogger(t *testing.T) *zap.Logger { + t.Helper() + config := zap.NewDevelopmentConfig() + config.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + logger, err := config.Build() + if err != nil { + t.Fatalf("failed to create logger: %v", err) + } + return logger +} + +// CleanupDatabaseTable drops a table from the database after tests +func CleanupDatabaseTable(t *testing.T, tableName string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Query rqlite to drop the table + homeDir, err := os.UserHomeDir() + if err != nil { + t.Logf("warning: failed to get home directory for cleanup: %v", err) + return + } + + dbPath := filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite") + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Logf("warning: failed to open database for cleanup: %v", err) + return + } + defer db.Close() + + dropSQL := fmt.Sprintf("DROP TABLE IF EXISTS %s", tableName) + if _, err := db.ExecContext(ctx, dropSQL); err != nil { + t.Logf("warning: failed to drop table %s: %v", tableName, err) + } +} + +// CleanupDMapCache deletes a dmap from the cache after tests +func CleanupDMapCache(t *testing.T, dmapName string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodDelete, + URL: GetGatewayURL() + "/v1/cache/dmap/" + dmapName, + Timeout: 10 * time.Second, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Logf("warning: failed to delete dmap %s: %v", dmapName, err) + return + } + + if status != http.StatusOK && status != http.StatusNoContent && status != http.StatusNotFound { + t.Logf("warning: delete dmap returned status %d", status) + } +} + +// CleanupIPFSFile unpins a file from IPFS after tests +func CleanupIPFSFile(t *testing.T, cid string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := &ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(*cfg, logger) + if err != nil { + t.Logf("warning: failed to create IPFS client for cleanup: %v", err) + return + } + + if err := client.Unpin(ctx, cid); err != nil { + t.Logf("warning: failed to unpin file %s: %v", cid, err) + } +} + +// CleanupCacheEntry deletes a cache entry after tests +func CleanupCacheEntry(t *testing.T, dmapName, key string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodDelete, + URL: GetGatewayURL() + "/v1/cache/dmap/" + dmapName + "/key/" + key, + Timeout: 10 * time.Second, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Logf("warning: failed to delete cache entry: %v", err) + return + } + + if status != http.StatusOK && status != http.StatusNoContent && status != http.StatusNotFound { + t.Logf("warning: delete cache entry returned status %d", status) + } +} diff --git a/e2e/gateway_e2e_test.go b/e2e/gateway_e2e_test.go deleted file mode 100644 index 82e7f27..0000000 --- a/e2e/gateway_e2e_test.go +++ /dev/null @@ -1,427 +0,0 @@ -//go:build e2e - -package e2e - -import ( - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "net/url" - "os" - "strings" - "testing" - "time" - - "github.com/gorilla/websocket" -) - -func getEnv(key, def string) string { - if v := strings.TrimSpace(os.Getenv(key)); v != "" { - return v - } - return def -} - -func requireAPIKey(t *testing.T) string { - t.Helper() - key := strings.TrimSpace(os.Getenv("GATEWAY_API_KEY")) - if key == "" { - t.Skip("GATEWAY_API_KEY not set; skipping gateway auth-required tests") - } - return key -} - -func gatewayBaseURL() string { - return getEnv("GATEWAY_BASE_URL", "http://127.0.0.1:6001") -} - -func httpClient() *http.Client { - return &http.Client{Timeout: 10 * time.Second} -} - -func authHeader(key string) http.Header { - h := http.Header{} - h.Set("Authorization", "Bearer "+key) - h.Set("Content-Type", "application/json") - return h -} - -func TestGateway_Health(t *testing.T) { - base := gatewayBaseURL() - resp, err := httpClient().Get(base + "/v1/health") - if err != nil { - t.Fatalf("health request error: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("unexpected status: %d", resp.StatusCode) - } - var body map[string]any - if err := json.NewDecoder(resp.Body).Decode(&body); err != nil { - t.Fatalf("decode: %v", err) - } - if body["status"] != "ok" { - t.Fatalf("status not ok: %+v", body) - } -} - -func TestGateway_PubSub_WS_Echo(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - topic := fmt.Sprintf("e2e-ws-%d", time.Now().UnixNano()) - wsURL, hdr := toWSURL(base+"/v1/pubsub/ws?topic="+url.QueryEscape(topic)), http.Header{} - hdr.Set("Authorization", "Bearer "+key) - - c, _, err := websocket.DefaultDialer.Dial(wsURL, hdr) - if err != nil { - t.Fatalf("ws dial: %v", err) - } - defer c.Close() - defer c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - - msg := []byte("hello-ws") - if err := c.WriteMessage(websocket.TextMessage, msg); err != nil { - t.Fatalf("ws write: %v", err) - } - - _, data, err := c.ReadMessage() - if err != nil { - t.Fatalf("ws read: %v", err) - } - if string(data) != string(msg) { - t.Fatalf("ws echo mismatch: %q", string(data)) - } -} - -func TestGateway_PubSub_RestPublishToWS(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - topic := fmt.Sprintf("e2e-rest-%d", time.Now().UnixNano()) - wsURL, hdr := toWSURL(base+"/v1/pubsub/ws?topic="+url.QueryEscape(topic)), http.Header{} - hdr.Set("Authorization", "Bearer "+key) - c, _, err := websocket.DefaultDialer.Dial(wsURL, hdr) - if err != nil { - t.Fatalf("ws dial: %v", err) - } - defer c.Close() - - // Publish via REST - payload := randomBytes(24) - b64 := base64.StdEncoding.EncodeToString(payload) - body := fmt.Sprintf(`{"topic":"%s","data_base64":"%s"}`, topic, b64) - req, _ := http.NewRequest(http.MethodPost, base+"/v1/pubsub/publish", strings.NewReader(body)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("publish do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("publish status: %d", resp.StatusCode) - } - - // Expect the message via WS - _ = c.SetReadDeadline(time.Now().Add(5 * time.Second)) - _, data, err := c.ReadMessage() - if err != nil { - t.Fatalf("ws read: %v", err) - } - if string(data) != string(payload) { - t.Fatalf("payload mismatch: %q != %q", string(data), string(payload)) - } - - // Topics list should include our topic (without namespace prefix) - req2, _ := http.NewRequest(http.MethodGet, base+"/v1/pubsub/topics", nil) - req2.Header = authHeader(key) - resp2, err := httpClient().Do(req2) - if err != nil { - t.Fatalf("topics do: %v", err) - } - defer resp2.Body.Close() - if resp2.StatusCode != http.StatusOK { - t.Fatalf("topics status: %d", resp2.StatusCode) - } - var tlist struct { - Topics []string `json:"topics"` - } - if err := json.NewDecoder(resp2.Body).Decode(&tlist); err != nil { - t.Fatalf("topics decode: %v", err) - } - found := false - for _, tt := range tlist.Topics { - if tt == topic { - found = true - break - } - } - if !found { - t.Fatalf("topic %s not found in topics list", topic) - } -} - -func TestGateway_Database_CreateQueryMigrate(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - // Create table - schema := `CREATE TABLE IF NOT EXISTS e2e_items (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)` - body := fmt.Sprintf(`{"schema":%q}`, schema) - req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("create-table do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusCreated { - t.Fatalf("create-table status: %d", resp.StatusCode) - } - - // Insert via transaction (simulate migration/data seed) - txBody := `{"statements":["INSERT INTO e2e_items(name) VALUES ('one')","INSERT INTO e2e_items(name) VALUES ('two')"]}` - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txBody)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("tx do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("tx status: %d", resp.StatusCode) - } - - // Query rows - qBody := `{"sql":"SELECT name FROM e2e_items ORDER BY id ASC"}` - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("query do: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("query status: %d", resp.StatusCode) - } - var qr struct { - Columns []string `json:"columns"` - Rows [][]any `json:"rows"` - Count int `json:"count"` - } - if err := json.NewDecoder(resp.Body).Decode(&qr); err != nil { - t.Fatalf("query decode: %v", err) - } - if qr.Count < 2 { - t.Fatalf("expected at least 2 rows, got %d", qr.Count) - } - - // Schema endpoint returns tables - req, _ = http.NewRequest(http.MethodGet, base+"/v1/rqlite/schema", nil) - req.Header = authHeader(key) - resp2, err := httpClient().Do(req) - if err != nil { - t.Fatalf("schema do: %v", err) - } - defer resp2.Body.Close() - if resp2.StatusCode != http.StatusOK { - t.Fatalf("schema status: %d", resp2.StatusCode) - } -} - -func TestGateway_Database_DropTable(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - table := fmt.Sprintf("e2e_tmp_%d", time.Now().UnixNano()) - schema := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)", table) - // create - body := fmt.Sprintf(`{"schema":%q}`, schema) - req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("create-table do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusCreated { - t.Fatalf("create-table status: %d", resp.StatusCode) - } - // drop - dbody := fmt.Sprintf(`{"table":%q}`, table) - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/drop-table", strings.NewReader(dbody)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("drop-table do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("drop-table status: %d", resp.StatusCode) - } - // verify not in schema - req, _ = http.NewRequest(http.MethodGet, base+"/v1/rqlite/schema", nil) - req.Header = authHeader(key) - resp2, err := httpClient().Do(req) - if err != nil { - t.Fatalf("schema do: %v", err) - } - defer resp2.Body.Close() - if resp2.StatusCode != http.StatusOK { - t.Fatalf("schema status: %d", resp2.StatusCode) - } - var schemaResp struct { - Tables []struct { - Name string `json:"name"` - } `json:"tables"` - } - if err := json.NewDecoder(resp2.Body).Decode(&schemaResp); err != nil { - t.Fatalf("schema decode: %v", err) - } - for _, tbl := range schemaResp.Tables { - if tbl.Name == table { - t.Fatalf("table %s still present after drop", table) - } - } -} - -func TestGateway_Database_RecreateWithFK(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - // base tables - orgs := fmt.Sprintf("e2e_orgs_%d", time.Now().UnixNano()) - users := fmt.Sprintf("e2e_users_%d", time.Now().UnixNano()) - createOrgs := fmt.Sprintf(`{"schema":%q}`, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)", orgs)) - createUsers := fmt.Sprintf(`{"schema":%q}`, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)", users)) - - for _, body := range []string{createOrgs, createUsers} { - req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("create-table do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusCreated { - t.Fatalf("create-table status: %d", resp.StatusCode) - } - } - // seed data - txSeed := fmt.Sprintf(`{"statements":["INSERT INTO %s(id,name) VALUES (1,'org')","INSERT INTO %s(id,name,org_id,age) VALUES (1,'alice',1,'30')"]}`, orgs, users) - req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txSeed)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("seed tx do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("seed tx status: %d", resp.StatusCode) - } - - // migrate: change users.age TEXT -> INTEGER and add FK to orgs(id) - // Note: Some backends may not support connection-scoped BEGIN/COMMIT or PRAGMA via HTTP. - // We apply the standard recreate pattern without explicit PRAGMAs/transaction. - txMig := fmt.Sprintf(`{"statements":[ - "CREATE TABLE %s_new (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age INTEGER, FOREIGN KEY(org_id) REFERENCES %s(id) ON DELETE CASCADE)", - "INSERT INTO %s_new (id,name,org_id,age) SELECT id,name,org_id, CAST(age AS INTEGER) FROM %s", - "DROP TABLE %s", - "ALTER TABLE %s_new RENAME TO %s" - ]}`, users, orgs, users, users, users, users, users) - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txMig)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("mig tx do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("mig tx status: %d", resp.StatusCode) - } - - // verify schema type change - qBody := fmt.Sprintf(`{"sql":"PRAGMA table_info(%s)"}`, users) - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("pragma do: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("pragma status: %d", resp.StatusCode) - } - var qr struct { - Columns []string `json:"columns"` - Rows [][]any `json:"rows"` - } - if err := json.NewDecoder(resp.Body).Decode(&qr); err != nil { - t.Fatalf("pragma decode: %v", err) - } - // column order: cid,name,type,notnull,dflt_value,pk - ageIsInt := false - for _, row := range qr.Rows { - if len(row) >= 3 && fmt.Sprintf("%v", row[1]) == "age" { - tstr := strings.ToUpper(fmt.Sprintf("%v", row[2])) - if strings.Contains(tstr, "INT") { - ageIsInt = true - break - } - } - } - if !ageIsInt { - // Fallback: inspect CREATE TABLE SQL from sqlite_master - qBody2 := fmt.Sprintf(`{"sql":"SELECT sql FROM sqlite_master WHERE type='table' AND name='%s'"}`, users) - req2, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody2)) - req2.Header = authHeader(key) - resp3, err := httpClient().Do(req2) - if err != nil { - t.Fatalf("sqlite_master do: %v", err) - } - defer resp3.Body.Close() - if resp3.StatusCode != http.StatusOK { - t.Fatalf("sqlite_master status: %d", resp3.StatusCode) - } - var qr2 struct { - Rows [][]any `json:"rows"` - } - if err := json.NewDecoder(resp3.Body).Decode(&qr2); err != nil { - t.Fatalf("sqlite_master decode: %v", err) - } - found := false - for _, row := range qr2.Rows { - if len(row) > 0 { - sql := strings.ToUpper(fmt.Sprintf("%v", row[0])) - if strings.Contains(sql, "AGE INT") || strings.Contains(sql, "AGE INTEGER") { - found = true - break - } - } - } - if !found { - t.Fatalf("age column type not INTEGER after migration") - } - } -} - -func toWSURL(httpURL string) string { - u, err := url.Parse(httpURL) - if err != nil { - return httpURL - } - if u.Scheme == "https" { - u.Scheme = "wss" - } else { - u.Scheme = "ws" - } - return u.String() -} - -func randomBytes(n int) []byte { - b := make([]byte, n) - _, _ = rand.Read(b) - return b -} diff --git a/e2e/ipfs_cluster_test.go b/e2e/ipfs_cluster_test.go new file mode 100644 index 0000000..5d8dff1 --- /dev/null +++ b/e2e/ipfs_cluster_test.go @@ -0,0 +1,400 @@ +//go:build e2e + +package e2e + +import ( + "bytes" + "context" + "fmt" + "io" + "testing" + "time" + + "github.com/DeBrosOfficial/network/pkg/ipfs" +) + +func TestIPFSCluster_Health(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 10 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + err = client.Health(ctx) + if err != nil { + t.Fatalf("health check failed: %v", err) + } +} + +func TestIPFSCluster_GetPeerCount(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 10 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + peerCount, err := client.GetPeerCount(ctx) + if err != nil { + t.Fatalf("get peer count failed: %v", err) + } + + if peerCount < 0 { + t.Fatalf("expected non-negative peer count, got %d", peerCount) + } + + t.Logf("IPFS cluster peers: %d", peerCount) +} + +func TestIPFSCluster_AddFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + content := []byte("IPFS cluster test content") + result, err := client.Add(ctx, bytes.NewReader(content), "test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + if result.Cid == "" { + t.Fatalf("expected non-empty CID") + } + + if result.Size != int64(len(content)) { + t.Fatalf("expected size %d, got %d", len(content), result.Size) + } + + t.Logf("Added file with CID: %s", result.Cid) +} + +func TestIPFSCluster_PinFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add file first + content := []byte("IPFS pin test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "pin-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + // Pin the file + pinResult, err := client.Pin(ctx, cid, "pinned-file", 1) + if err != nil { + t.Fatalf("pin file failed: %v", err) + } + + if pinResult.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid) + } + + t.Logf("Pinned file: %s", cid) +} + +func TestIPFSCluster_PinStatus(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add and pin file + content := []byte("IPFS status test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "status-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + pinResult, err := client.Pin(ctx, cid, "status-test", 1) + if err != nil { + t.Fatalf("pin file failed: %v", err) + } + + if pinResult.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid) + } + + // Give pin time to propagate + Delay(1000) + + // Get status + status, err := client.PinStatus(ctx, cid) + if err != nil { + t.Fatalf("get pin status failed: %v", err) + } + + if status.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, status.Cid) + } + + if status.Name != "status-test" { + t.Fatalf("expected name 'status-test', got %s", status.Name) + } + + if status.ReplicationFactor < 1 { + t.Logf("warning: replication factor is %d, expected >= 1", status.ReplicationFactor) + } + + t.Logf("Pin status: %s (replication: %d, peers: %d)", status.Status, status.ReplicationFactor, len(status.Peers)) +} + +func TestIPFSCluster_UnpinFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add and pin file + content := []byte("IPFS unpin test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "unpin-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + _, err = client.Pin(ctx, cid, "unpin-test", 1) + if err != nil { + t.Fatalf("pin file failed: %v", err) + } + + // Unpin file + err = client.Unpin(ctx, cid) + if err != nil { + t.Fatalf("unpin file failed: %v", err) + } + + t.Logf("Unpinned file: %s", cid) +} + +func TestIPFSCluster_GetFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add file + content := []byte("IPFS get test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "get-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + // Give time for propagation + Delay(1000) + + // Get file + rc, err := client.Get(ctx, cid, GetIPFSAPIURL()) + if err != nil { + t.Fatalf("get file failed: %v", err) + } + defer rc.Close() + + retrievedContent, err := io.ReadAll(rc) + if err != nil { + t.Fatalf("failed to read content: %v", err) + } + + if !bytes.Equal(retrievedContent, content) { + t.Fatalf("content mismatch: expected %q, got %q", string(content), string(retrievedContent)) + } + + t.Logf("Retrieved file: %s (%d bytes)", cid, len(retrievedContent)) +} + +func TestIPFSCluster_LargeFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 60 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Create 5MB file + content := bytes.Repeat([]byte("x"), 5*1024*1024) + result, err := client.Add(ctx, bytes.NewReader(content), "large.bin") + if err != nil { + t.Fatalf("add large file failed: %v", err) + } + + if result.Cid == "" { + t.Fatalf("expected non-empty CID") + } + + if result.Size != int64(len(content)) { + t.Fatalf("expected size %d, got %d", len(content), result.Size) + } + + t.Logf("Added large file with CID: %s (%d bytes)", result.Cid, result.Size) +} + +func TestIPFSCluster_ReplicationFactor(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add file + content := []byte("IPFS replication test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "replication-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + // Pin with specific replication factor + replicationFactor := 2 + pinResult, err := client.Pin(ctx, cid, "replication-test", replicationFactor) + if err != nil { + t.Fatalf("pin file failed: %v", err) + } + + if pinResult.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid) + } + + // Give time for replication + Delay(2000) + + // Check status + status, err := client.PinStatus(ctx, cid) + if err != nil { + t.Fatalf("get pin status failed: %v", err) + } + + t.Logf("Replication factor: requested=%d, actual=%d, peers=%d", replicationFactor, status.ReplicationFactor, len(status.Peers)) +} + +func TestIPFSCluster_MultipleFiles(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add multiple files + numFiles := 5 + var cids []string + + for i := 0; i < numFiles; i++ { + content := []byte(fmt.Sprintf("File %d", i)) + result, err := client.Add(ctx, bytes.NewReader(content), fmt.Sprintf("file%d.txt", i)) + if err != nil { + t.Fatalf("add file %d failed: %v", i, err) + } + cids = append(cids, result.Cid) + } + + if len(cids) != numFiles { + t.Fatalf("expected %d files added, got %d", numFiles, len(cids)) + } + + // Verify all files exist + for i, cid := range cids { + status, err := client.PinStatus(ctx, cid) + if err != nil { + t.Logf("warning: failed to get status for file %d: %v", i, err) + continue + } + + if status.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, status.Cid) + } + } + + t.Logf("Successfully added and verified %d files", numFiles) +} diff --git a/e2e/libp2p_connectivity_test.go b/e2e/libp2p_connectivity_test.go new file mode 100644 index 0000000..0a6408a --- /dev/null +++ b/e2e/libp2p_connectivity_test.go @@ -0,0 +1,294 @@ +//go:build e2e + +package e2e + +import ( + "context" + "net/http" + "strings" + "testing" + "time" +) + +func TestLibP2P_PeerConnectivity(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create and connect client + c := NewNetworkClient(t) + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + defer c.Disconnect() + + // Verify peer connectivity through the gateway + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + peers := resp["peers"].([]interface{}) + if len(peers) == 0 { + t.Logf("warning: no peers connected (cluster may still be initializing)") + } +} + +func TestLibP2P_BootstrapPeers(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + bootstrapPeers := GetBootstrapPeers() + if len(bootstrapPeers) == 0 { + t.Skipf("E2E_BOOTSTRAP_PEERS not set; skipping") + } + + // Create client with bootstrap peers explicitly set + c := NewNetworkClient(t) + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + defer c.Disconnect() + + // Give peer discovery time + Delay(2000) + + // Verify we're connected (check via gateway status) + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("status request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["connected"] != true { + t.Logf("warning: client not connected to network (cluster may still be initializing)") + } +} + +func TestLibP2P_MultipleClientConnections(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create multiple clients + c1 := NewNetworkClient(t) + c2 := NewNetworkClient(t) + c3 := NewNetworkClient(t) + + if err := c1.Connect(); err != nil { + t.Fatalf("c1 connect failed: %v", err) + } + defer c1.Disconnect() + + if err := c2.Connect(); err != nil { + t.Fatalf("c2 connect failed: %v", err) + } + defer c2.Disconnect() + + if err := c3.Connect(); err != nil { + t.Fatalf("c3 connect failed: %v", err) + } + defer c3.Disconnect() + + // Give peer discovery time + Delay(2000) + + // Verify gateway sees multiple peers + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + peers := resp["peers"].([]interface{}) + if len(peers) < 1 { + t.Logf("warning: expected at least 1 peer, got %d", len(peers)) + } +} + +func TestLibP2P_ReconnectAfterDisconnect(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + c := NewNetworkClient(t) + + // Connect + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + + // Verify connected via gateway + req1 := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + } + + _, status1, err := req1.Do(ctx) + if err != nil || status1 != http.StatusOK { + t.Logf("warning: gateway check failed before disconnect: status %d, err %v", status1, err) + } + + // Disconnect + if err := c.Disconnect(); err != nil { + t.Logf("warning: disconnect failed: %v", err) + } + + // Give time for disconnect to propagate + Delay(500) + + // Reconnect + if err := c.Connect(); err != nil { + t.Fatalf("reconnect failed: %v", err) + } + defer c.Disconnect() + + // Verify connected via gateway again + req2 := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + } + + _, status2, err := req2.Do(ctx) + if err != nil || status2 != http.StatusOK { + t.Logf("warning: gateway check failed after reconnect: status %d, err %v", status2, err) + } +} + +func TestLibP2P_PeerDiscovery(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create client + c := NewNetworkClient(t) + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + defer c.Disconnect() + + // Give peer discovery time + Delay(3000) + + // Get peer list + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + peers := resp["peers"].([]interface{}) + if len(peers) == 0 { + t.Logf("warning: no peers discovered (cluster may not have multiple nodes)") + } else { + // Verify peer format (should be multiaddr strings) + for _, p := range peers { + peerStr := p.(string) + if !strings.Contains(peerStr, "/p2p/") && !strings.Contains(peerStr, "/ipfs/") { + t.Logf("warning: unexpected peer format: %s", peerStr) + } + } + } +} + +func TestLibP2P_PeerAddressFormat(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create client + c := NewNetworkClient(t) + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + defer c.Disconnect() + + // Get peer list + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + peers := resp["peers"].([]interface{}) + for _, p := range peers { + peerStr := p.(string) + // Multiaddrs should start with / + if !strings.HasPrefix(peerStr, "/") { + t.Fatalf("expected multiaddr format, got %s", peerStr) + } + } +} diff --git a/e2e/network_http_test.go b/e2e/network_http_test.go new file mode 100644 index 0000000..0f91f4e --- /dev/null +++ b/e2e/network_http_test.go @@ -0,0 +1,223 @@ +//go:build e2e + +package e2e + +import ( + "context" + "net/http" + "testing" + "time" +) + +func TestNetwork_Health(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/health", + SkipAuth: true, + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("health check failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["status"] != "ok" { + t.Fatalf("expected status 'ok', got %v", resp["status"]) + } +} + +func TestNetwork_Status(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("status check failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if _, ok := resp["connected"]; !ok { + t.Fatalf("expected 'connected' field in response") + } + + if _, ok := resp["peer_count"]; !ok { + t.Fatalf("expected 'peer_count' field in response") + } +} + +func TestNetwork_Peers(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers check failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if _, ok := resp["peers"]; !ok { + t.Fatalf("expected 'peers' field in response") + } +} + +func TestNetwork_ProxyAnonSuccess(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/proxy/anon", + Body: map[string]interface{}{ + "url": "https://httpbin.org/get", + "method": "GET", + "headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"}, + }, + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("proxy anon request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body)) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["status_code"] != float64(200) { + t.Fatalf("expected proxy status 200, got %v", resp["status_code"]) + } + + if _, ok := resp["body"]; !ok { + t.Fatalf("expected 'body' field in response") + } +} + +func TestNetwork_ProxyAnonBadURL(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/proxy/anon", + Body: map[string]interface{}{ + "url": "http://localhost:1/nonexistent", + "method": "GET", + }, + } + + _, status, err := req.Do(ctx) + if err == nil && status == http.StatusOK { + t.Fatalf("expected error for bad URL, got status 200") + } +} + +func TestNetwork_ProxyAnonPostRequest(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/proxy/anon", + Body: map[string]interface{}{ + "url": "https://httpbin.org/post", + "method": "POST", + "headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"}, + "body": "test_data", + }, + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("proxy anon POST failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body)) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["status_code"] != float64(200) { + t.Fatalf("expected proxy status 200, got %v", resp["status_code"]) + } +} + +func TestNetwork_Unauthorized(t *testing.T) { + // Test without API key + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Create request without auth + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + SkipAuth: true, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + if status != http.StatusUnauthorized && status != http.StatusForbidden { + t.Logf("warning: expected 401/403, got %d (auth may not be enforced on this endpoint)", status) + } +} diff --git a/e2e/pubsub_client_test.go b/e2e/pubsub_client_test.go new file mode 100644 index 0000000..5063c47 --- /dev/null +++ b/e2e/pubsub_client_test.go @@ -0,0 +1,421 @@ +//go:build e2e + +package e2e + +import ( + "context" + "fmt" + "sync" + "testing" + "time" +) + +func newMessageCollector(ctx context.Context, buffer int) (chan []byte, func(string, []byte) error) { + if buffer <= 0 { + buffer = 1 + } + + ch := make(chan []byte, buffer) + handler := func(_ string, data []byte) error { + copied := append([]byte(nil), data...) + select { + case ch <- copied: + case <-ctx.Done(): + } + return nil + } + return ch, handler +} + +func waitForMessage(ctx context.Context, ch <-chan []byte) ([]byte, error) { + select { + case msg := <-ch: + return msg, nil + case <-ctx.Done(): + return nil, fmt.Errorf("context finished while waiting for pubsub message: %w", ctx.Err()) + } +} + +func TestPubSub_SubscribePublish(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create two clients + client1 := NewNetworkClient(t) + client2 := NewNetworkClient(t) + + if err := client1.Connect(); err != nil { + t.Fatalf("client1 connect failed: %v", err) + } + defer client1.Disconnect() + + if err := client2.Connect(); err != nil { + t.Fatalf("client2 connect failed: %v", err) + } + defer client2.Disconnect() + + topic := GenerateTopic() + message := "test-message-from-client1" + + // Subscribe on client2 + messageCh, handler := newMessageCollector(ctx, 1) + if err := client2.PubSub().Subscribe(ctx, topic, handler); err != nil { + t.Fatalf("subscribe failed: %v", err) + } + defer client2.PubSub().Unsubscribe(ctx, topic) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish from client1 + if err := client1.PubSub().Publish(ctx, topic, []byte(message)); err != nil { + t.Fatalf("publish failed: %v", err) + } + + // Receive message on client2 + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + msg, err := waitForMessage(recvCtx, messageCh) + if err != nil { + t.Fatalf("receive failed: %v", err) + } + + if string(msg) != message { + t.Fatalf("expected message %q, got %q", message, string(msg)) + } +} + +func TestPubSub_MultipleSubscribers(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create three clients + clientPub := NewNetworkClient(t) + clientSub1 := NewNetworkClient(t) + clientSub2 := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub1.Connect(); err != nil { + t.Fatalf("subscriber1 connect failed: %v", err) + } + defer clientSub1.Disconnect() + + if err := clientSub2.Connect(); err != nil { + t.Fatalf("subscriber2 connect failed: %v", err) + } + defer clientSub2.Disconnect() + + topic := GenerateTopic() + message1 := "message-for-sub1" + message2 := "message-for-sub2" + + // Subscribe on both clients + sub1Ch, sub1Handler := newMessageCollector(ctx, 4) + if err := clientSub1.PubSub().Subscribe(ctx, topic, sub1Handler); err != nil { + t.Fatalf("subscribe1 failed: %v", err) + } + defer clientSub1.PubSub().Unsubscribe(ctx, topic) + + sub2Ch, sub2Handler := newMessageCollector(ctx, 4) + if err := clientSub2.PubSub().Subscribe(ctx, topic, sub2Handler); err != nil { + t.Fatalf("subscribe2 failed: %v", err) + } + defer clientSub2.PubSub().Unsubscribe(ctx, topic) + + // Give subscriptions time to propagate + Delay(500) + + // Publish first message + if err := clientPub.PubSub().Publish(ctx, topic, []byte(message1)); err != nil { + t.Fatalf("publish1 failed: %v", err) + } + + // Both subscribers should receive first message + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + msg1a, err := waitForMessage(recvCtx, sub1Ch) + if err != nil { + t.Fatalf("sub1 receive1 failed: %v", err) + } + + if string(msg1a) != message1 { + t.Fatalf("sub1: expected %q, got %q", message1, string(msg1a)) + } + + msg1b, err := waitForMessage(recvCtx, sub2Ch) + if err != nil { + t.Fatalf("sub2 receive1 failed: %v", err) + } + + if string(msg1b) != message1 { + t.Fatalf("sub2: expected %q, got %q", message1, string(msg1b)) + } + + // Publish second message + if err := clientPub.PubSub().Publish(ctx, topic, []byte(message2)); err != nil { + t.Fatalf("publish2 failed: %v", err) + } + + // Both subscribers should receive second message + recvCtx2, recvCancel2 := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel2() + + msg2a, err := waitForMessage(recvCtx2, sub1Ch) + if err != nil { + t.Fatalf("sub1 receive2 failed: %v", err) + } + + if string(msg2a) != message2 { + t.Fatalf("sub1: expected %q, got %q", message2, string(msg2a)) + } + + msg2b, err := waitForMessage(recvCtx2, sub2Ch) + if err != nil { + t.Fatalf("sub2 receive2 failed: %v", err) + } + + if string(msg2b) != message2 { + t.Fatalf("sub2: expected %q, got %q", message2, string(msg2b)) + } +} + +func TestPubSub_Deduplication(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create two clients + clientPub := NewNetworkClient(t) + clientSub := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub.Connect(); err != nil { + t.Fatalf("subscriber connect failed: %v", err) + } + defer clientSub.Disconnect() + + topic := GenerateTopic() + message := "duplicate-test-message" + + // Subscribe on client + messageCh, handler := newMessageCollector(ctx, 3) + if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil { + t.Fatalf("subscribe failed: %v", err) + } + defer clientSub.PubSub().Unsubscribe(ctx, topic) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish the same message multiple times + for i := 0; i < 3; i++ { + if err := clientPub.PubSub().Publish(ctx, topic, []byte(message)); err != nil { + t.Fatalf("publish %d failed: %v", i, err) + } + } + + // Receive messages - should get all (no dedup filter on subscribe) + recvCtx, recvCancel := context.WithTimeout(ctx, 5*time.Second) + defer recvCancel() + + receivedCount := 0 + for receivedCount < 3 { + if _, err := waitForMessage(recvCtx, messageCh); err != nil { + break + } + receivedCount++ + } + + if receivedCount < 1 { + t.Fatalf("expected to receive at least 1 message, got %d", receivedCount) + } +} + +func TestPubSub_ConcurrentPublish(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create clients + clientPub := NewNetworkClient(t) + clientSub := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub.Connect(); err != nil { + t.Fatalf("subscriber connect failed: %v", err) + } + defer clientSub.Disconnect() + + topic := GenerateTopic() + numMessages := 10 + + // Subscribe + messageCh, handler := newMessageCollector(ctx, numMessages) + if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil { + t.Fatalf("subscribe failed: %v", err) + } + defer clientSub.PubSub().Unsubscribe(ctx, topic) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish multiple messages concurrently + var wg sync.WaitGroup + for i := 0; i < numMessages; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + msg := fmt.Sprintf("concurrent-msg-%d", idx) + if err := clientPub.PubSub().Publish(ctx, topic, []byte(msg)); err != nil { + t.Logf("publish %d failed: %v", idx, err) + } + }(i) + } + wg.Wait() + + // Receive messages + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + receivedCount := 0 + for receivedCount < numMessages { + if _, err := waitForMessage(recvCtx, messageCh); err != nil { + break + } + receivedCount++ + } + + if receivedCount < numMessages { + t.Logf("expected %d messages, got %d (some may have been dropped)", numMessages, receivedCount) + } +} + +func TestPubSub_TopicIsolation(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create clients + clientPub := NewNetworkClient(t) + clientSub := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub.Connect(); err != nil { + t.Fatalf("subscriber connect failed: %v", err) + } + defer clientSub.Disconnect() + + topic1 := GenerateTopic() + topic2 := GenerateTopic() + + // Subscribe to topic1 + messageCh, handler := newMessageCollector(ctx, 2) + if err := clientSub.PubSub().Subscribe(ctx, topic1, handler); err != nil { + t.Fatalf("subscribe1 failed: %v", err) + } + defer clientSub.PubSub().Unsubscribe(ctx, topic1) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish to topic2 + msg2 := "message-on-topic2" + if err := clientPub.PubSub().Publish(ctx, topic2, []byte(msg2)); err != nil { + t.Fatalf("publish2 failed: %v", err) + } + + // Publish to topic1 + msg1 := "message-on-topic1" + if err := clientPub.PubSub().Publish(ctx, topic1, []byte(msg1)); err != nil { + t.Fatalf("publish1 failed: %v", err) + } + + // Receive on sub1 - should get msg1 only + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + msg, err := waitForMessage(recvCtx, messageCh) + if err != nil { + t.Fatalf("receive failed: %v", err) + } + + if string(msg) != msg1 { + t.Fatalf("expected %q, got %q", msg1, string(msg)) + } +} + +func TestPubSub_EmptyMessage(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create clients + clientPub := NewNetworkClient(t) + clientSub := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub.Connect(); err != nil { + t.Fatalf("subscriber connect failed: %v", err) + } + defer clientSub.Disconnect() + + topic := GenerateTopic() + + // Subscribe + messageCh, handler := newMessageCollector(ctx, 1) + if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil { + t.Fatalf("subscribe failed: %v", err) + } + defer clientSub.PubSub().Unsubscribe(ctx, topic) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish empty message + if err := clientPub.PubSub().Publish(ctx, topic, []byte("")); err != nil { + t.Fatalf("publish empty failed: %v", err) + } + + // Receive on sub - should get empty message + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + msg, err := waitForMessage(recvCtx, messageCh) + if err != nil { + t.Fatalf("receive failed: %v", err) + } + + if len(msg) != 0 { + t.Fatalf("expected empty message, got %q", string(msg)) + } +} diff --git a/e2e/rqlite_http_test.go b/e2e/rqlite_http_test.go new file mode 100644 index 0000000..0d7df2b --- /dev/null +++ b/e2e/rqlite_http_test.go @@ -0,0 +1,446 @@ +//go:build e2e + +package e2e + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" +) + +func TestRQLite_CreateTable(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)", + table, + ) + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("create table request failed: %v", err) + } + + if status != http.StatusCreated && status != http.StatusOK { + t.Fatalf("expected status 201 or 200, got %d: %s", status, string(body)) + } +} + +func TestRQLite_InsertQuery(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Insert rows + insertReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": []string{ + fmt.Sprintf("INSERT INTO %s(name) VALUES ('alice')", table), + fmt.Sprintf("INSERT INTO %s(name) VALUES ('bob')", table), + }, + }, + } + + _, status, err = insertReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("insert failed: status %d, err %v", status, err) + } + + // Query rows + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT name FROM %s ORDER BY id", table), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil { + t.Fatalf("query failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var queryResp map[string]interface{} + if err := DecodeJSON(body, &queryResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if queryResp["count"].(float64) < 2 { + t.Fatalf("expected at least 2 rows, got %v", queryResp["count"]) + } +} + +func TestRQLite_DropTable(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Drop table + dropReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/drop-table", + Body: map[string]interface{}{ + "table": table, + }, + } + + _, status, err = dropReq.Do(ctx) + if err != nil { + t.Fatalf("drop table request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + // Verify table doesn't exist via schema + schemaReq := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/rqlite/schema", + } + + body, status, err := schemaReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Logf("warning: failed to verify schema after drop: status %d, err %v", status, err) + return + } + + var schemaResp map[string]interface{} + if err := DecodeJSON(body, &schemaResp); err != nil { + t.Logf("warning: failed to decode schema response: %v", err) + return + } + + if tables, ok := schemaResp["tables"].([]interface{}); ok { + for _, tbl := range tables { + tblMap := tbl.(map[string]interface{}) + if tblMap["name"] == table { + t.Fatalf("table %s still present after drop", table) + } + } + } +} + +func TestRQLite_Schema(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/rqlite/schema", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("schema request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if _, ok := resp["tables"]; !ok { + t.Fatalf("expected 'tables' field in response") + } +} + +func TestRQLite_MalformedSQL(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": "SELECT * FROM nonexistent_table WHERE invalid syntax", + }, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should get an error response + if status == http.StatusOK { + t.Fatalf("expected error for malformed SQL, got status 200") + } +} + +func TestRQLite_LargeTransaction(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Generate large transaction (50 inserts) + var statements []string + for i := 0; i < 50; i++ { + statements = append(statements, fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, i)) + } + + txReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": statements, + }, + } + + _, status, err = txReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("large transaction failed: status %d, err %v", status, err) + } + + // Verify all rows were inserted + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("count query failed: status %d, err %v", status, err) + } + + var countResp map[string]interface{} + if err := DecodeJSON(body, &countResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + // Extract count from result + if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 { + row := rows[0].([]interface{}) + if row[0].(float64) != 50 { + t.Fatalf("expected 50 rows, got %v", row[0]) + } + } +} + +func TestRQLite_ForeignKeyMigration(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + orgsTable := GenerateTableName() + usersTable := GenerateTableName() + + // Create base tables + createOrgsReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)", + orgsTable, + ), + }, + } + + _, status, err := createOrgsReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create orgs table failed: status %d, err %v", status, err) + } + + createUsersReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)", + usersTable, + ), + }, + } + + _, status, err = createUsersReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create users table failed: status %d, err %v", status, err) + } + + // Seed data + seedReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": []string{ + fmt.Sprintf("INSERT INTO %s(id,name) VALUES (1,'org')", orgsTable), + fmt.Sprintf("INSERT INTO %s(id,name,org_id,age) VALUES (1,'alice',1,'30')", usersTable), + }, + }, + } + + _, status, err = seedReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("seed transaction failed: status %d, err %v", status, err) + } + + // Migrate: change age type and add FK + migrationReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": []string{ + fmt.Sprintf( + "CREATE TABLE %s_new (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age INTEGER, FOREIGN KEY(org_id) REFERENCES %s(id) ON DELETE CASCADE)", + usersTable, orgsTable, + ), + fmt.Sprintf( + "INSERT INTO %s_new (id,name,org_id,age) SELECT id,name,org_id, CAST(age AS INTEGER) FROM %s", + usersTable, usersTable, + ), + fmt.Sprintf("DROP TABLE %s", usersTable), + fmt.Sprintf("ALTER TABLE %s_new RENAME TO %s", usersTable, usersTable), + }, + }, + } + + _, status, err = migrationReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("migration transaction failed: status %d, err %v", status, err) + } + + // Verify data is intact + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT name, org_id, age FROM %s", usersTable), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("query after migration failed: status %d, err %v", status, err) + } + + var queryResp map[string]interface{} + if err := DecodeJSON(body, &queryResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if queryResp["count"].(float64) != 1 { + t.Fatalf("expected 1 row after migration, got %v", queryResp["count"]) + } +} + +func TestRQLite_DropNonexistentTable(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dropReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/drop-table", + Body: map[string]interface{}{ + "table": "nonexistent_table_xyz_" + fmt.Sprintf("%d", time.Now().UnixNano()), + }, + } + + _, status, err := dropReq.Do(ctx) + if err != nil { + t.Logf("warning: drop nonexistent table request failed: %v", err) + return + } + + // Should get an error (400 or 404) + if status == http.StatusOK { + t.Logf("warning: expected error for dropping nonexistent table, got status 200") + } +} diff --git a/e2e/storage_http_test.go b/e2e/storage_http_test.go new file mode 100644 index 0000000..ee8fb0c --- /dev/null +++ b/e2e/storage_http_test.go @@ -0,0 +1,550 @@ +//go:build e2e + +package e2e + +import ( + "bytes" + "context" + "io" + "mime/multipart" + "net/http" + "testing" + "time" +) + +// uploadFile is a helper to upload a file to storage +func uploadFile(t *testing.T, ctx context.Context, content []byte, filename string) string { + t.Helper() + + // Create multipart form + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // Add auth headers + if jwt := GetJWT(); jwt != "" { + req.Header.Set("Authorization", "Bearer "+jwt) + } else if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body)) + } + + result, err := DecodeJSONFromReader(resp.Body) + if err != nil { + t.Fatalf("failed to decode upload response: %v", err) + } + + return result["cid"].(string) +} + +// DecodeJSON is a helper to decode JSON from io.ReadCloser +func DecodeJSONFromReader(rc io.ReadCloser) (map[string]interface{}, error) { + defer rc.Close() + body, err := io.ReadAll(rc) + if err != nil { + return nil, err + } + var result map[string]interface{} + err = DecodeJSON(body, &result) + return result, err +} + +func TestStorage_UploadText(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + content := []byte("Hello, IPFS!") + filename := "test.txt" + + // Create multipart form + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &result); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if result["cid"] == nil { + t.Fatalf("expected cid in response") + } + + if result["name"] != filename { + t.Fatalf("expected name %q, got %v", filename, result["name"]) + } + + if result["size"] == nil || result["size"].(float64) <= 0 { + t.Fatalf("expected positive size") + } +} + +func TestStorage_UploadBinary(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // PNG header + content := []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a} + filename := "test.png" + + // Create multipart form + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &result); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if result["cid"] == nil { + t.Fatalf("expected cid in response") + } +} + +func TestStorage_UploadLarge(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Create 1MB file + content := bytes.Repeat([]byte("x"), 1024*1024) + filename := "large.bin" + + // Create multipart form + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &result); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if result["size"] != float64(1024*1024) { + t.Fatalf("expected size %d, got %v", 1024*1024, result["size"]) + } +} + +func TestStorage_PinUnpin(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + content := []byte("test content for pinning") + + // Upload file first + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", "pin-test.txt") + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create upload request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload failed: %v", err) + } + defer resp.Body.Close() + + var uploadResult map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &uploadResult); err != nil { + t.Fatalf("failed to decode upload response: %v", err) + } + + cid := uploadResult["cid"].(string) + + // Pin the file + pinReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/storage/pin", + Body: map[string]interface{}{ + "cid": cid, + "name": "pinned-file", + }, + } + + body2, status, err := pinReq.Do(ctx) + if err != nil { + t.Fatalf("pin failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body2)) + } + + var pinResult map[string]interface{} + if err := DecodeJSON(body2, &pinResult); err != nil { + t.Fatalf("failed to decode pin response: %v", err) + } + + if pinResult["cid"] != cid { + t.Fatalf("expected cid %s, got %v", cid, pinResult["cid"]) + } + + // Unpin the file + unpinReq := &HTTPRequest{ + Method: http.MethodDelete, + URL: GetGatewayURL() + "/v1/storage/unpin/" + cid, + } + + body3, status, err := unpinReq.Do(ctx) + if err != nil { + t.Fatalf("unpin failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body3)) + } +} + +func TestStorage_Status(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + content := []byte("test content for status") + + // Upload file first + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", "status-test.txt") + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create upload request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload failed: %v", err) + } + defer resp.Body.Close() + + var uploadResult map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &uploadResult); err != nil { + t.Fatalf("failed to decode upload response: %v", err) + } + + cid := uploadResult["cid"].(string) + + // Get status + statusReq := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/storage/status/" + cid, + } + + statusBody, status, err := statusReq.Do(ctx) + if err != nil { + t.Fatalf("status request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var statusResult map[string]interface{} + if err := DecodeJSON(statusBody, &statusResult); err != nil { + t.Fatalf("failed to decode status response: %v", err) + } + + if statusResult["cid"] != cid { + t.Fatalf("expected cid %s, got %v", cid, statusResult["cid"]) + } +} + +func TestStorage_InvalidCID(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + statusReq := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/storage/status/QmInvalidCID123456789", + } + + _, status, err := statusReq.Do(ctx) + if err != nil { + t.Fatalf("status request failed: %v", err) + } + + if status != http.StatusNotFound { + t.Logf("warning: expected status 404 for invalid CID, got %d", status) + } +} + +func TestStorage_GetByteRange(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + content := []byte("0123456789abcdefghijklmnopqrstuvwxyz") + + // Upload file first + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", "range-test.txt") + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create upload request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload failed: %v", err) + } + defer resp.Body.Close() + + var uploadResult map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &uploadResult); err != nil { + t.Fatalf("failed to decode upload response: %v", err) + } + + cid := uploadResult["cid"].(string) + + // Get full content + getReq, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/storage/get/"+cid, nil) + if err != nil { + t.Fatalf("failed to create get request: %v", err) + } + + if apiKey := GetAPIKey(); apiKey != "" { + getReq.Header.Set("Authorization", "Bearer "+apiKey) + } + + resp, err = client.Do(getReq) + if err != nil { + t.Fatalf("get request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected status 200, got %d", resp.StatusCode) + } + + retrievedContent, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if !bytes.Equal(retrievedContent, content) { + t.Fatalf("content mismatch: expected %q, got %q", string(content), string(retrievedContent)) + } +} diff --git a/go.mod b/go.mod index 0b1a4b6..fc2d5c2 100644 --- a/go.mod +++ b/go.mod @@ -11,21 +11,28 @@ require ( github.com/libp2p/go-libp2p-pubsub v0.14.2 github.com/mackerelio/go-osstat v0.2.6 github.com/multiformats/go-multiaddr v0.15.0 + github.com/olric-data/olric v0.7.0 github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.40.0 golang.org/x/net v0.42.0 gopkg.in/yaml.v3 v3.0.1 ) require ( + github.com/RoaringBitmap/roaring v1.9.4 // indirect + github.com/armon/go-metrics v0.4.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.22.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect + github.com/buraksezer/consistent v0.10.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/go-units v0.5.0 // indirect github.com/elastic/gosigar v0.14.3 // indirect github.com/flynn/noise v1.1.0 // indirect @@ -33,10 +40,20 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.4 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.3 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-sockaddr v1.0.7 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/logutils v1.0.0 // indirect + github.com/hashicorp/memberlist v0.5.3 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/go-cid v0.5.0 // indirect @@ -55,11 +72,13 @@ require ( github.com/libp2p/go-yamux/v5 v5.0.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/miekg/dns v1.1.66 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect + github.com/mschoch/smat v0.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect @@ -101,14 +120,20 @@ require ( github.com/quic-go/quic-go v0.50.1 // indirect github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/redis/go-redis/v9 v9.8.0 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/tidwall/btree v1.7.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/redcon v1.6.2 // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/wlynxg/anet v0.0.5 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.40.0 // indirect golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect golang.org/x/mod v0.26.0 // indirect golang.org/x/sync v0.16.0 // indirect @@ -116,5 +141,6 @@ require ( golang.org/x/text v0.27.0 // indirect golang.org/x/tools v0.35.0 // indirect google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect lukechampine.com/blake3 v1.4.1 // indirect ) diff --git a/go.sum b/go.sum index 33dd50c..d97cebb 100644 --- a/go.sum +++ b/go.sum @@ -8,22 +8,45 @@ dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1 dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ= +github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4= +github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buraksezer/consistent v0.10.0 h1:hqBgz1PvNLC5rkWcEBVAL9dFMBWz6I0VgUCW25rrZlU= +github.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0maujuPowduSpZqmw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= @@ -43,6 +66,8 @@ github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -61,8 +86,15 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -79,13 +111,29 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -101,8 +149,33 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY= +github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI= +github.com/hashicorp/go-msgpack/v2 v2.1.3 h1:cB1w4Zrk0O3jQBTcFMKqYQWRFfsSQ/TYKNyUUVyCP2c= +github.com/hashicorp/go-msgpack/v2 v2.1.3/go.mod h1:SjlwKKFnwBXvxD/I1bEcfJIBbEJ+MCUn39TxymNR5ZU= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk= +github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE= github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= @@ -116,8 +189,14 @@ github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+ github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -125,8 +204,11 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk= github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -164,6 +246,8 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8 github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= +github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= @@ -178,11 +262,15 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= @@ -207,8 +295,12 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/olric-data/olric v0.7.0 h1:EKN2T6ZTtdu8Un0jV0KOWVxWm9odptJpefmDivfZdjE= +github.com/olric-data/olric v0.7.0/go.mod h1:+ZnPpgc8JkNkza8rETCKGn0P/QPF6HhZY0EbCKAOslo= github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= @@ -217,6 +309,8 @@ github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= @@ -261,21 +355,38 @@ github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM= github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA= github.com/pion/webrtc/v4 v4.0.10 h1:Hq/JLjhqLxi+NmCtE8lnRPDr8H4LcNvwg8OxVcdv56Q= github.com/pion/webrtc/v4 v4.0.10/go.mod h1:ViHLVaNpiuvaH8pdiuQxuA9awuE6KVzAXx3vVWilOck= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= @@ -286,12 +397,16 @@ github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6 github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE= github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8/go.mod h1:xF/KoXmrRyahPfo5L7Szb5cAAUl53dMWBh9cMruGEZg= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -316,16 +431,22 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go. github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -333,9 +454,21 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tidwall/btree v1.1.0/go.mod h1:TzIRzen6yHbibdSfK6t8QimqbUnoxUSrZfeW7Uob0q4= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/redcon v1.6.2 h1:5qfvrrybgtO85jnhSravmkZyC0D+7WstbfCs3MmPhow= +github.com/tidwall/redcon v1.6.2/go.mod h1:p5Wbsgeyi2VSTBWOcA5vRXrOb9arFTcU2+ZzFjqV75Y= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= @@ -357,6 +490,7 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -390,12 +524,15 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -419,6 +556,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -426,16 +564,26 @@ golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -456,6 +604,7 @@ golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -502,15 +651,29 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/anyoneproxy/socks.go b/pkg/anyoneproxy/socks.go index a4c4ce2..df4a2eb 100644 --- a/pkg/anyoneproxy/socks.go +++ b/pkg/anyoneproxy/socks.go @@ -19,7 +19,7 @@ var disabled bool func SetDisabled(v bool) { disabled = v } // Enabled reports whether Anyone proxy routing is active. -// Defaults to true, using SOCKS5 at 127.0.0.1:9050, unless explicitly disabled +// Defaults to true, using SOCKS5 at localhost:9050, unless explicitly disabled // via SetDisabled(true) or environment variable ANYONE_DISABLE=1. // ANYONE_SOCKS5 may override the proxy address. func Enabled() bool { @@ -31,7 +31,7 @@ func Enabled() bool { // socksAddr returns the SOCKS5 address to use for proxying (host:port). func socksAddr() string { - return "127.0.0.1:9050" + return "localhost:9050" } // socksContextDialer implements tcp.ContextDialer over a SOCKS5 proxy. @@ -57,7 +57,7 @@ func (d *socksContextDialer) DialContext(ctx context.Context, network, address s // DialerForAddr returns a tcp.DialerForAddr that routes through the Anyone SOCKS5 proxy. // It automatically BYPASSES the proxy for loopback, private, and link-local addresses -// to allow local/dev networking (e.g. 127.0.0.1, 10.0.0.0/8, 192.168.0.0/16, fc00::/7, fe80::/10). +// to allow local/dev networking (e.g. localhost, 10.0.0.0/8, 192.168.0.0/16, fc00::/7, fe80::/10). func DialerForAddr() tcp.DialerForAddr { return func(raddr ma.Multiaddr) (tcp.ContextDialer, error) { // Prefer direct dialing for local/private targets diff --git a/pkg/auth/simple_auth.go b/pkg/auth/simple_auth.go new file mode 100644 index 0000000..246ed80 --- /dev/null +++ b/pkg/auth/simple_auth.go @@ -0,0 +1,116 @@ +package auth + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" +) + +// PerformSimpleAuthentication performs a simple authentication flow where the user +// provides a wallet address and receives an API key without signature verification +func PerformSimpleAuthentication(gatewayURL string) (*Credentials, error) { + reader := bufio.NewReader(os.Stdin) + + fmt.Println("\n🔐 Simple Wallet Authentication") + fmt.Println("================================") + + // Read wallet address + fmt.Print("Enter your wallet address (0x...): ") + walletInput, err := reader.ReadString('\n') + if err != nil { + return nil, fmt.Errorf("failed to read wallet address: %w", err) + } + + wallet := strings.TrimSpace(walletInput) + if wallet == "" { + return nil, fmt.Errorf("wallet address cannot be empty") + } + + // Validate wallet format (basic check) + if !strings.HasPrefix(wallet, "0x") && !strings.HasPrefix(wallet, "0X") { + wallet = "0x" + wallet + } + + if !ValidateWalletAddress(wallet) { + return nil, fmt.Errorf("invalid wallet address format") + } + + // Read namespace (optional) + fmt.Print("Enter namespace (press Enter for 'default'): ") + nsInput, err := reader.ReadString('\n') + if err != nil { + return nil, fmt.Errorf("failed to read namespace: %w", err) + } + + namespace := strings.TrimSpace(nsInput) + if namespace == "" { + namespace = "default" + } + + fmt.Printf("\n✅ Wallet: %s\n", wallet) + fmt.Printf("✅ Namespace: %s\n", namespace) + fmt.Println("⏳ Requesting API key from gateway...") + + // Request API key from gateway + apiKey, err := requestAPIKeyFromGateway(gatewayURL, wallet, namespace) + if err != nil { + return nil, fmt.Errorf("failed to request API key: %w", err) + } + + // Create credentials + creds := &Credentials{ + APIKey: apiKey, + Namespace: namespace, + UserID: wallet, + Wallet: wallet, + IssuedAt: time.Now(), + } + + fmt.Printf("\n🎉 Authentication successful!\n") + fmt.Printf("📝 API Key: %s\n", creds.APIKey) + + return creds, nil +} + +// requestAPIKeyFromGateway calls the gateway's simple-key endpoint to generate an API key +func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, error) { + reqBody := map[string]string{ + "wallet": wallet, + "namespace": namespace, + } + + payload, err := json.Marshal(reqBody) + if err != nil { + return "", fmt.Errorf("failed to marshal request: %w", err) + } + + endpoint := gatewayURL + "/v1/auth/simple-key" + resp, err := http.Post(endpoint, "application/json", bytes.NewReader(payload)) + if err != nil { + return "", fmt.Errorf("failed to call gateway: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return "", fmt.Errorf("gateway returned status %d: %s", resp.StatusCode, string(body)) + } + + var respBody map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&respBody); err != nil { + return "", fmt.Errorf("failed to decode response: %w", err) + } + + apiKey, ok := respBody["api_key"].(string) + if !ok || apiKey == "" { + return "", fmt.Errorf("no api_key in response") + } + + return apiKey, nil +} diff --git a/pkg/cli/auth_commands.go b/pkg/cli/auth_commands.go index 5e795c6..3474288 100644 --- a/pkg/cli/auth_commands.go +++ b/pkg/cli/auth_commands.go @@ -33,29 +33,34 @@ func HandleAuthCommand(args []string) { func showAuthHelp() { fmt.Printf("🔐 Authentication Commands\n\n") - fmt.Printf("Usage: network-cli auth \n\n") + fmt.Printf("Usage: dbn auth \n\n") fmt.Printf("Subcommands:\n") - fmt.Printf(" login - Authenticate with wallet\n") + fmt.Printf(" login - Authenticate by providing your wallet address\n") fmt.Printf(" logout - Clear stored credentials\n") fmt.Printf(" whoami - Show current authentication status\n") fmt.Printf(" status - Show detailed authentication info\n\n") fmt.Printf("Examples:\n") - fmt.Printf(" network-cli auth login\n") - fmt.Printf(" network-cli auth whoami\n") - fmt.Printf(" network-cli auth status\n") - fmt.Printf(" network-cli auth logout\n\n") + fmt.Printf(" dbn auth login # Enter wallet address interactively\n") + fmt.Printf(" dbn auth whoami # Check who you're logged in as\n") + fmt.Printf(" dbn auth status # View detailed authentication info\n") + fmt.Printf(" dbn auth logout # Clear all stored credentials\n\n") fmt.Printf("Environment Variables:\n") fmt.Printf(" DEBROS_GATEWAY_URL - Gateway URL (overrides environment config)\n\n") + fmt.Printf("Authentication Flow:\n") + fmt.Printf(" 1. Run 'dbn auth login'\n") + fmt.Printf(" 2. Enter your wallet address when prompted\n") + fmt.Printf(" 3. Enter your namespace (or press Enter for 'default')\n") + fmt.Printf(" 4. An API key will be generated and saved to ~/.debros/credentials.json\n\n") fmt.Printf("Note: Authentication uses the currently active environment.\n") - fmt.Printf(" Use 'network-cli env current' to see your active environment.\n") + fmt.Printf(" Use 'dbn env current' to see your active environment.\n") } func handleAuthLogin() { gatewayURL := getGatewayURL() fmt.Printf("🔐 Authenticating with gateway at: %s\n", gatewayURL) - // Use the wallet authentication flow - creds, err := auth.PerformWalletAuthentication(gatewayURL) + // Use the simple authentication flow + creds, err := auth.PerformSimpleAuthentication(gatewayURL) if err != nil { fmt.Fprintf(os.Stderr, "❌ Authentication failed: %v\n", err) os.Exit(1) @@ -72,6 +77,7 @@ func handleAuthLogin() { fmt.Printf("📁 Credentials saved to: %s\n", credsPath) fmt.Printf("🎯 Wallet: %s\n", creds.Wallet) fmt.Printf("🏢 Namespace: %s\n", creds.Namespace) + fmt.Printf("🔑 API Key: %s\n", creds.APIKey) } func handleAuthLogout() { @@ -93,7 +99,7 @@ func handleAuthWhoami() { creds, exists := store.GetCredentialsForGateway(gatewayURL) if !exists || !creds.IsValid() { - fmt.Println("❌ Not authenticated - run 'network-cli auth login' to authenticate") + fmt.Println("❌ Not authenticated - run 'dbn auth login' to authenticate") os.Exit(1) } diff --git a/pkg/cli/basic_commands.go b/pkg/cli/basic_commands.go index 368160b..ade1ecf 100644 --- a/pkg/cli/basic_commands.go +++ b/pkg/cli/basic_commands.go @@ -158,7 +158,7 @@ func HandlePeerIDCommand(format string, timeout time.Duration) { // HandlePubSubCommand handles pubsub commands func HandlePubSubCommand(args []string, format string, timeout time.Duration) { if len(args) == 0 { - fmt.Fprintf(os.Stderr, "Usage: network-cli pubsub [args...]\n") + fmt.Fprintf(os.Stderr, "Usage: dbn pubsub [args...]\n") os.Exit(1) } @@ -179,7 +179,7 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) { switch subcommand { case "publish": if len(args) < 3 { - fmt.Fprintf(os.Stderr, "Usage: network-cli pubsub publish \n") + fmt.Fprintf(os.Stderr, "Usage: dbn pubsub publish \n") os.Exit(1) } err := cli.PubSub().Publish(ctx, args[1], []byte(args[2])) @@ -191,7 +191,7 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) { case "subscribe": if len(args) < 2 { - fmt.Fprintf(os.Stderr, "Usage: network-cli pubsub subscribe [duration]\n") + fmt.Fprintf(os.Stderr, "Usage: dbn pubsub subscribe [duration]\n") os.Exit(1) } duration := 30 * time.Second @@ -243,14 +243,26 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) { // Helper functions func createClient() (client.NetworkClient, error) { - config := client.DefaultClientConfig("network-cli") + config := client.DefaultClientConfig("dbn") + + // Use active environment's gateway URL + gatewayURL := getGatewayURL() + config.GatewayURL = gatewayURL + + // Try to get bootstrap peers from active environment + // For now, we'll use the default bootstrap peers from config + // In the future, environments could specify their own bootstrap peers + env, err := GetActiveEnvironment() + if err == nil && env != nil { + // Environment loaded successfully - gateway URL already set above + // Bootstrap peers could be added to Environment struct in the future + _ = env // Use env if we add bootstrap peers to it + } // Check for existing credentials using enhanced authentication creds, err := auth.GetValidEnhancedCredentials() if err != nil { // No valid credentials found, use the enhanced authentication flow - gatewayURL := getGatewayURL() - newCreds, authErr := auth.GetOrPromptForCredentials(gatewayURL) if authErr != nil { return nil, fmt.Errorf("authentication failed: %w", authErr) diff --git a/pkg/cli/config_commands.go b/pkg/cli/config_commands.go deleted file mode 100644 index 84f267e..0000000 --- a/pkg/cli/config_commands.go +++ /dev/null @@ -1,519 +0,0 @@ -package cli - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/DeBrosOfficial/network/pkg/config" - "github.com/DeBrosOfficial/network/pkg/encryption" -) - -// HandleConfigCommand handles config management commands -func HandleConfigCommand(args []string) { - if len(args) == 0 { - showConfigHelp() - return - } - - subcommand := args[0] - subargs := args[1:] - - switch subcommand { - case "init": - handleConfigInit(subargs) - case "validate": - handleConfigValidate(subargs) - case "help": - showConfigHelp() - default: - fmt.Fprintf(os.Stderr, "Unknown config subcommand: %s\n", subcommand) - showConfigHelp() - os.Exit(1) - } -} - -func showConfigHelp() { - fmt.Printf("Config Management Commands\n\n") - fmt.Printf("Usage: network-cli config [options]\n\n") - fmt.Printf("Subcommands:\n") - fmt.Printf(" init - Generate full network stack in ~/.debros (bootstrap + 2 nodes + gateway)\n") - fmt.Printf(" validate --name - Validate a config file\n\n") - fmt.Printf("Init Default Behavior (no --type):\n") - fmt.Printf(" Generates bootstrap.yaml, node2.yaml, node3.yaml, gateway.yaml with:\n") - fmt.Printf(" - Auto-generated identities for bootstrap, node2, node3\n") - fmt.Printf(" - Correct bootstrap_peers and join addresses\n") - fmt.Printf(" - Default ports: P2P 4001-4003, HTTP 5001-5003, Raft 7001-7003\n\n") - fmt.Printf("Init Options:\n") - fmt.Printf(" --type - Single config type: node, bootstrap, gateway (skips stack generation)\n") - fmt.Printf(" --name - Output filename (default: depends on --type or 'stack' for full stack)\n") - fmt.Printf(" --force - Overwrite existing config/stack files\n\n") - fmt.Printf("Single Config Options (with --type):\n") - fmt.Printf(" --id - Node ID for bootstrap peers\n") - fmt.Printf(" --listen-port - LibP2P listen port (default: 4001)\n") - fmt.Printf(" --rqlite-http-port - RQLite HTTP port (default: 5001)\n") - fmt.Printf(" --rqlite-raft-port - RQLite Raft port (default: 7001)\n") - fmt.Printf(" --join - RQLite address to join (required for non-bootstrap)\n") - fmt.Printf(" --bootstrap-peers - Comma-separated bootstrap peer multiaddrs\n\n") - fmt.Printf("Examples:\n") - fmt.Printf(" network-cli config init # Generate full stack\n") - fmt.Printf(" network-cli config init --force # Overwrite existing stack\n") - fmt.Printf(" network-cli config init --type bootstrap # Single bootstrap config (legacy)\n") - fmt.Printf(" network-cli config validate --name node.yaml\n") -} - -func handleConfigInit(args []string) { - // Parse flags - var ( - cfgType = "" - name = "" // Will be set based on type if not provided - id string - listenPort = 4001 - rqliteHTTPPort = 5001 - rqliteRaftPort = 7001 - joinAddr string - bootstrapPeers string - force bool - ) - - for i := 0; i < len(args); i++ { - switch args[i] { - case "--type": - if i+1 < len(args) { - cfgType = args[i+1] - i++ - } - case "--name": - if i+1 < len(args) { - name = args[i+1] - i++ - } - case "--id": - if i+1 < len(args) { - id = args[i+1] - i++ - } - case "--listen-port": - if i+1 < len(args) { - if p, err := strconv.Atoi(args[i+1]); err == nil { - listenPort = p - } - i++ - } - case "--rqlite-http-port": - if i+1 < len(args) { - if p, err := strconv.Atoi(args[i+1]); err == nil { - rqliteHTTPPort = p - } - i++ - } - case "--rqlite-raft-port": - if i+1 < len(args) { - if p, err := strconv.Atoi(args[i+1]); err == nil { - rqliteRaftPort = p - } - i++ - } - case "--join": - if i+1 < len(args) { - joinAddr = args[i+1] - i++ - } - case "--bootstrap-peers": - if i+1 < len(args) { - bootstrapPeers = args[i+1] - i++ - } - case "--force": - force = true - } - } - - // If --type is not specified, generate full stack - if cfgType == "" { - initFullStack(force) - return - } - - // Otherwise, continue with single-file generation - // Validate type - if cfgType != "node" && cfgType != "bootstrap" && cfgType != "gateway" { - fmt.Fprintf(os.Stderr, "Invalid --type: %s (expected: node, bootstrap, or gateway)\n", cfgType) - os.Exit(1) - } - - // Set default name based on type if not provided - if name == "" { - switch cfgType { - case "bootstrap": - name = "bootstrap.yaml" - case "gateway": - name = "gateway.yaml" - default: - name = "node.yaml" - } - } - - // Ensure config directory exists - configDir, err := config.EnsureConfigDir() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to ensure config directory: %v\n", err) - os.Exit(1) - } - - configPath := filepath.Join(configDir, name) - - // Check if file exists - if !force { - if _, err := os.Stat(configPath); err == nil { - fmt.Fprintf(os.Stderr, "Config file already exists at %s (use --force to overwrite)\n", configPath) - os.Exit(1) - } - } - - // Generate config based on type - var configContent string - switch cfgType { - case "node": - configContent = GenerateNodeConfig(name, id, listenPort, rqliteHTTPPort, rqliteRaftPort, joinAddr, bootstrapPeers) - case "bootstrap": - configContent = GenerateBootstrapConfig(name, id, listenPort, rqliteHTTPPort, rqliteRaftPort) - case "gateway": - configContent = GenerateGatewayConfig(bootstrapPeers) - } - - // Write config file - if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write config file: %v\n", err) - os.Exit(1) - } - - fmt.Printf("✅ Configuration file created: %s\n", configPath) - fmt.Printf(" Type: %s\n", cfgType) - fmt.Printf("\nYou can now start the %s using the generated config.\n", cfgType) -} - -func handleConfigValidate(args []string) { - var name string - for i := 0; i < len(args); i++ { - if args[i] == "--name" && i+1 < len(args) { - name = args[i+1] - i++ - } - } - - if name == "" { - fmt.Fprintf(os.Stderr, "Missing --name flag\n") - showConfigHelp() - os.Exit(1) - } - - configDir, err := config.ConfigDir() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get config directory: %v\n", err) - os.Exit(1) - } - - configPath := filepath.Join(configDir, name) - file, err := os.Open(configPath) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to open config file: %v\n", err) - os.Exit(1) - } - defer file.Close() - - var cfg config.Config - if err := config.DecodeStrict(file, &cfg); err != nil { - fmt.Fprintf(os.Stderr, "Failed to parse config: %v\n", err) - os.Exit(1) - } - - // Run validation - errs := cfg.Validate() - if len(errs) > 0 { - fmt.Fprintf(os.Stderr, "\n❌ Configuration errors (%d):\n", len(errs)) - for _, err := range errs { - fmt.Fprintf(os.Stderr, " - %s\n", err) - } - os.Exit(1) - } - - fmt.Printf("✅ Config is valid: %s\n", configPath) -} - -func initFullStack(force bool) { - fmt.Printf("🚀 Initializing full network stack...\n") - - // Ensure ~/.debros directory exists - homeDir, err := os.UserHomeDir() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get home directory: %v\n", err) - os.Exit(1) - } - debrosDir := filepath.Join(homeDir, ".debros") - if err := os.MkdirAll(debrosDir, 0755); err != nil { - fmt.Fprintf(os.Stderr, "Failed to create ~/.debros directory: %v\n", err) - os.Exit(1) - } - - // Step 1: Generate bootstrap identity - bootstrapIdentityDir := filepath.Join(debrosDir, "bootstrap") - bootstrapIdentityPath := filepath.Join(bootstrapIdentityDir, "identity.key") - - if !force { - if _, err := os.Stat(bootstrapIdentityPath); err == nil { - fmt.Fprintf(os.Stderr, "Bootstrap identity already exists at %s (use --force to overwrite)\n", bootstrapIdentityPath) - os.Exit(1) - } - } - - bootstrapInfo, err := encryption.GenerateIdentity() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate bootstrap identity: %v\n", err) - os.Exit(1) - } - if err := os.MkdirAll(bootstrapIdentityDir, 0755); err != nil { - fmt.Fprintf(os.Stderr, "Failed to create bootstrap data directory: %v\n", err) - os.Exit(1) - } - if err := encryption.SaveIdentity(bootstrapInfo, bootstrapIdentityPath); err != nil { - fmt.Fprintf(os.Stderr, "Failed to save bootstrap identity: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated bootstrap identity: %s (Peer ID: %s)\n", bootstrapIdentityPath, bootstrapInfo.PeerID.String()) - - // Construct bootstrap multiaddr - bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String()) - fmt.Printf(" Bootstrap multiaddr: %s\n", bootstrapMultiaddr) - - // Generate configs for all nodes... - // (rest of the implementation - similar to what was in main.go) - // I'll keep it similar to the original for consistency - - // Step 2: Generate bootstrap.yaml - bootstrapName := "bootstrap.yaml" - bootstrapPath := filepath.Join(debrosDir, bootstrapName) - if !force { - if _, err := os.Stat(bootstrapPath); err == nil { - fmt.Fprintf(os.Stderr, "Bootstrap config already exists at %s (use --force to overwrite)\n", bootstrapPath) - os.Exit(1) - } - } - bootstrapContent := GenerateBootstrapConfig(bootstrapName, "", 4001, 5001, 7001) - if err := os.WriteFile(bootstrapPath, []byte(bootstrapContent), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write bootstrap config: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated bootstrap config: %s\n", bootstrapPath) - - // Step 3: Generate node2.yaml - node2Name := "node2.yaml" - node2Path := filepath.Join(debrosDir, node2Name) - if !force { - if _, err := os.Stat(node2Path); err == nil { - fmt.Fprintf(os.Stderr, "Node2 config already exists at %s (use --force to overwrite)\n", node2Path) - os.Exit(1) - } - } - node2Content := GenerateNodeConfig(node2Name, "", 4002, 5002, 7002, "localhost:5001", bootstrapMultiaddr) - if err := os.WriteFile(node2Path, []byte(node2Content), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write node2 config: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated node2 config: %s\n", node2Path) - - // Step 4: Generate node3.yaml - node3Name := "node3.yaml" - node3Path := filepath.Join(debrosDir, node3Name) - if !force { - if _, err := os.Stat(node3Path); err == nil { - fmt.Fprintf(os.Stderr, "Node3 config already exists at %s (use --force to overwrite)\n", node3Path) - os.Exit(1) - } - } - node3Content := GenerateNodeConfig(node3Name, "", 4003, 5003, 7003, "localhost:5001", bootstrapMultiaddr) - if err := os.WriteFile(node3Path, []byte(node3Content), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write node3 config: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated node3 config: %s\n", node3Path) - - // Step 5: Generate gateway.yaml - gatewayName := "gateway.yaml" - gatewayPath := filepath.Join(debrosDir, gatewayName) - if !force { - if _, err := os.Stat(gatewayPath); err == nil { - fmt.Fprintf(os.Stderr, "Gateway config already exists at %s (use --force to overwrite)\n", gatewayPath) - os.Exit(1) - } - } - gatewayContent := GenerateGatewayConfig(bootstrapMultiaddr) - if err := os.WriteFile(gatewayPath, []byte(gatewayContent), 0644); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write gateway config: %v\n", err) - os.Exit(1) - } - fmt.Printf("✅ Generated gateway config: %s\n", gatewayPath) - - fmt.Printf("\n" + strings.Repeat("=", 60) + "\n") - fmt.Printf("✅ Full network stack initialized successfully!\n") - fmt.Printf(strings.Repeat("=", 60) + "\n") - fmt.Printf("\nBootstrap Peer ID: %s\n", bootstrapInfo.PeerID.String()) - fmt.Printf("Bootstrap Multiaddr: %s\n", bootstrapMultiaddr) - fmt.Printf("\nGenerated configs:\n") - fmt.Printf(" - %s\n", bootstrapPath) - fmt.Printf(" - %s\n", node2Path) - fmt.Printf(" - %s\n", node3Path) - fmt.Printf(" - %s\n", gatewayPath) - fmt.Printf("\nStart the network with: make dev\n") -} - -// GenerateNodeConfig generates a node configuration -func GenerateNodeConfig(name, id string, listenPort, rqliteHTTPPort, rqliteRaftPort int, joinAddr, bootstrapPeers string) string { - nodeID := id - if nodeID == "" { - nodeID = fmt.Sprintf("node-%d", time.Now().Unix()) - } - - // Parse bootstrap peers - var peers []string - if bootstrapPeers != "" { - for _, p := range strings.Split(bootstrapPeers, ",") { - if p = strings.TrimSpace(p); p != "" { - peers = append(peers, p) - } - } - } - - // Construct data_dir from name stem (remove .yaml) - dataDir := strings.TrimSuffix(name, ".yaml") - dataDir = filepath.Join(os.ExpandEnv("~"), ".debros", dataDir) - - var peersYAML strings.Builder - if len(peers) == 0 { - peersYAML.WriteString(" bootstrap_peers: []") - } else { - peersYAML.WriteString(" bootstrap_peers:\n") - for _, p := range peers { - fmt.Fprintf(&peersYAML, " - \"%s\"\n", p) - } - } - - if joinAddr == "" { - joinAddr = "localhost:5001" - } - - return fmt.Sprintf(`node: - id: "%s" - type: "node" - listen_addresses: - - "/ip4/0.0.0.0/tcp/%d" - data_dir: "%s" - max_connections: 50 - -database: - data_dir: "%s/rqlite" - replication_factor: 3 - shard_count: 16 - max_database_size: 1073741824 - backup_interval: "24h" - rqlite_port: %d - rqlite_raft_port: %d - rqlite_join_address: "%s" - cluster_sync_interval: "30s" - peer_inactivity_limit: "24h" - min_cluster_size: 1 - -discovery: -%s - discovery_interval: "15s" - bootstrap_port: %d - http_adv_address: "127.0.0.1:%d" - raft_adv_address: "127.0.0.1:%d" - node_namespace: "default" - -security: - enable_tls: false - -logging: - level: "info" - format: "console" -`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, joinAddr, peersYAML.String(), 4001, rqliteHTTPPort, rqliteRaftPort) -} - -// GenerateBootstrapConfig generates a bootstrap configuration -func GenerateBootstrapConfig(name, id string, listenPort, rqliteHTTPPort, rqliteRaftPort int) string { - nodeID := id - if nodeID == "" { - nodeID = "bootstrap" - } - - dataDir := filepath.Join(os.ExpandEnv("~"), ".debros", "bootstrap") - - return fmt.Sprintf(`node: - id: "%s" - type: "bootstrap" - listen_addresses: - - "/ip4/0.0.0.0/tcp/%d" - data_dir: "%s" - max_connections: 50 - -database: - data_dir: "%s/rqlite" - replication_factor: 3 - shard_count: 16 - max_database_size: 1073741824 - backup_interval: "24h" - rqlite_port: %d - rqlite_raft_port: %d - rqlite_join_address: "" - cluster_sync_interval: "30s" - peer_inactivity_limit: "24h" - min_cluster_size: 1 - -discovery: - bootstrap_peers: [] - discovery_interval: "15s" - bootstrap_port: %d - http_adv_address: "127.0.0.1:%d" - raft_adv_address: "127.0.0.1:%d" - node_namespace: "default" - -security: - enable_tls: false - -logging: - level: "info" - format: "console" -`, nodeID, listenPort, dataDir, dataDir, rqliteHTTPPort, rqliteRaftPort, 4001, rqliteHTTPPort, rqliteRaftPort) -} - -// GenerateGatewayConfig generates a gateway configuration -func GenerateGatewayConfig(bootstrapPeers string) string { - var peers []string - if bootstrapPeers != "" { - for _, p := range strings.Split(bootstrapPeers, ",") { - if p = strings.TrimSpace(p); p != "" { - peers = append(peers, p) - } - } - } - - var peersYAML strings.Builder - if len(peers) == 0 { - peersYAML.WriteString("bootstrap_peers: []") - } else { - peersYAML.WriteString("bootstrap_peers:\n") - for _, p := range peers { - fmt.Fprintf(&peersYAML, " - \"%s\"\n", p) - } - } - - return fmt.Sprintf(`listen_addr: ":6001" -client_namespace: "default" -rqlite_dsn: "" -%s -`, peersYAML.String()) -} diff --git a/pkg/cli/dev_commands.go b/pkg/cli/dev_commands.go new file mode 100644 index 0000000..de25a3e --- /dev/null +++ b/pkg/cli/dev_commands.go @@ -0,0 +1,194 @@ +package cli + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/DeBrosOfficial/network/pkg/environments/development" +) + +// HandleDevCommand handles the dev command group +func HandleDevCommand(args []string) { + if len(args) == 0 { + showDevHelp() + return + } + + subcommand := args[0] + subargs := args[1:] + + switch subcommand { + case "up": + handleDevUp(subargs) + case "down": + handleDevDown(subargs) + case "status": + handleDevStatus(subargs) + case "logs": + handleDevLogs(subargs) + case "help": + showDevHelp() + default: + fmt.Fprintf(os.Stderr, "Unknown dev subcommand: %s\n", subcommand) + showDevHelp() + os.Exit(1) + } +} + +func showDevHelp() { + fmt.Printf("🚀 Development Environment Commands\n\n") + fmt.Printf("Usage: dbn dev [options]\n\n") + fmt.Printf("Subcommands:\n") + fmt.Printf(" up - Start development environment (2 bootstraps + 3 nodes + gateway)\n") + fmt.Printf(" down - Stop all development services\n") + fmt.Printf(" status - Show status of running services\n") + fmt.Printf(" logs - Tail logs for a component\n") + fmt.Printf(" help - Show this help\n\n") + fmt.Printf("Examples:\n") + fmt.Printf(" dbn dev up\n") + fmt.Printf(" dbn dev down\n") + fmt.Printf(" dbn dev status\n") + fmt.Printf(" dbn dev logs bootstrap --follow\n") +} + +func handleDevUp(args []string) { + ctx := context.Background() + + // Get home directory and .debros path + homeDir, err := os.UserHomeDir() + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err) + os.Exit(1) + } + debrosDir := filepath.Join(homeDir, ".debros") + + // Step 1: Check dependencies + fmt.Printf("📋 Checking dependencies...\n\n") + checker := development.NewDependencyChecker() + if _, err := checker.CheckAll(); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } + fmt.Printf("✓ All required dependencies available\n\n") + + // Step 2: Check ports + fmt.Printf("🔌 Checking port availability...\n\n") + portChecker := development.NewPortChecker() + if _, err := portChecker.CheckAll(); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n\n", err) + fmt.Fprintf(os.Stderr, "Port mapping:\n") + for port, service := range development.PortMap() { + fmt.Fprintf(os.Stderr, " %d - %s\n", port, service) + } + fmt.Fprintf(os.Stderr, "\n") + os.Exit(1) + } + fmt.Printf("✓ All required ports available\n\n") + + // Step 3: Ensure configs + fmt.Printf("⚙️ Preparing configuration files...\n\n") + ensurer := development.NewConfigEnsurer(debrosDir) + if err := ensurer.EnsureAll(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to prepare configs: %v\n", err) + os.Exit(1) + } + fmt.Printf("\n") + + // Step 4: Start services + pm := development.NewProcessManager(debrosDir, os.Stdout) + if err := pm.StartAll(ctx); err != nil { + fmt.Fprintf(os.Stderr, "❌ Error starting services: %v\n", err) + os.Exit(1) + } + + // Step 5: Show summary + fmt.Printf("🎉 Development environment is running!\n\n") + fmt.Printf("Key endpoints:\n") + fmt.Printf(" Gateway: http://localhost:6001\n") + fmt.Printf(" Bootstrap IPFS: http://localhost:4501\n") + fmt.Printf(" Bootstrap2 IPFS: http://localhost:4511\n") + fmt.Printf(" Node2 IPFS: http://localhost:4502\n") + fmt.Printf(" Node3 IPFS: http://localhost:4503\n") + fmt.Printf(" Node4 IPFS: http://localhost:4504\n") + fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n") + fmt.Printf(" Olric Cache: http://localhost:3320\n\n") + fmt.Printf("Useful commands:\n") + fmt.Printf(" dbn dev status - Show status\n") + fmt.Printf(" dbn dev logs bootstrap - Bootstrap logs\n") + fmt.Printf(" dbn dev logs bootstrap2 - Bootstrap2 logs\n") + fmt.Printf(" dbn dev down - Stop all services\n\n") + fmt.Printf("Logs directory: %s/logs\n\n", debrosDir) +} + +func handleDevDown(args []string) { + homeDir, err := os.UserHomeDir() + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err) + os.Exit(1) + } + debrosDir := filepath.Join(homeDir, ".debros") + + pm := development.NewProcessManager(debrosDir, os.Stdout) + ctx := context.Background() + + if err := pm.StopAll(ctx); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Error stopping services: %v\n", err) + } +} + +func handleDevStatus(args []string) { + homeDir, err := os.UserHomeDir() + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err) + os.Exit(1) + } + debrosDir := filepath.Join(homeDir, ".debros") + + pm := development.NewProcessManager(debrosDir, os.Stdout) + ctx := context.Background() + + pm.Status(ctx) +} + +func handleDevLogs(args []string) { + if len(args) == 0 { + fmt.Fprintf(os.Stderr, "Usage: dbn dev logs [--follow]\n") + fmt.Fprintf(os.Stderr, "\nComponents: bootstrap, bootstrap2, node2, node3, node4, gateway, ipfs-bootstrap, ipfs-bootstrap2, ipfs-node2, ipfs-node3, ipfs-node4, olric, anon\n") + os.Exit(1) + } + + component := args[0] + follow := len(args) > 1 && args[1] == "--follow" + + homeDir, err := os.UserHomeDir() + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err) + os.Exit(1) + } + debrosDir := filepath.Join(homeDir, ".debros") + + logPath := filepath.Join(debrosDir, "logs", fmt.Sprintf("%s.log", component)) + if _, err := os.Stat(logPath); os.IsNotExist(err) { + fmt.Fprintf(os.Stderr, "❌ Log file not found: %s\n", logPath) + os.Exit(1) + } + + if follow { + // Run tail -f + tailCmd := fmt.Sprintf("tail -f %s", logPath) + fmt.Printf("Following %s (press Ctrl+C to stop)...\n\n", logPath) + // syscall.Exec doesn't work in all environments, use exec.Command instead + cmd := exec.Command("sh", "-c", tailCmd) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + cmd.Run() + } else { + // Cat the file + data, _ := os.ReadFile(logPath) + fmt.Print(string(data)) + } +} diff --git a/pkg/cli/env_commands.go b/pkg/cli/env_commands.go index 064f871..4094a06 100644 --- a/pkg/cli/env_commands.go +++ b/pkg/cli/env_commands.go @@ -35,7 +35,7 @@ func HandleEnvCommand(args []string) { func showEnvHelp() { fmt.Printf("🌍 Environment Management Commands\n\n") - fmt.Printf("Usage: network-cli env \n\n") + fmt.Printf("Usage: dbn env \n\n") fmt.Printf("Subcommands:\n") fmt.Printf(" list - List all available environments\n") fmt.Printf(" current - Show current active environment\n") @@ -46,12 +46,12 @@ func showEnvHelp() { fmt.Printf(" devnet - Development network (https://devnet.debros.network)\n") fmt.Printf(" testnet - Test network (https://testnet.debros.network)\n\n") fmt.Printf("Examples:\n") - fmt.Printf(" network-cli env list\n") - fmt.Printf(" network-cli env current\n") - fmt.Printf(" network-cli env switch devnet\n") - fmt.Printf(" network-cli env enable testnet\n") - fmt.Printf(" network-cli devnet enable # Shorthand for switch to devnet\n") - fmt.Printf(" network-cli testnet enable # Shorthand for switch to testnet\n") + fmt.Printf(" dbn env list\n") + fmt.Printf(" dbn env current\n") + fmt.Printf(" dbn env switch devnet\n") + fmt.Printf(" dbn env enable testnet\n") + fmt.Printf(" dbn devnet enable # Shorthand for switch to devnet\n") + fmt.Printf(" dbn testnet enable # Shorthand for switch to testnet\n") } func handleEnvList() { @@ -99,7 +99,7 @@ func handleEnvCurrent() { func handleEnvSwitch(args []string) { if len(args) == 0 { - fmt.Fprintf(os.Stderr, "Usage: network-cli env switch \n") + fmt.Fprintf(os.Stderr, "Usage: dbn env switch \n") fmt.Fprintf(os.Stderr, "Available: local, devnet, testnet\n") os.Exit(1) } diff --git a/pkg/cli/prod_commands.go b/pkg/cli/prod_commands.go new file mode 100644 index 0000000..1ebe089 --- /dev/null +++ b/pkg/cli/prod_commands.go @@ -0,0 +1,1378 @@ +package cli + +import ( + "bufio" + "errors" + "flag" + "fmt" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/DeBrosOfficial/network/pkg/environments/production" + "github.com/multiformats/go-multiaddr" +) + +// normalizeBootstrapPeers normalizes and validates bootstrap peer multiaddrs +func normalizeBootstrapPeers(peersStr string) ([]string, error) { + if peersStr == "" { + return nil, nil + } + + // Split by comma and trim whitespace + rawPeers := strings.Split(peersStr, ",") + peers := make([]string, 0, len(rawPeers)) + seen := make(map[string]bool) + + for _, peer := range rawPeers { + peer = strings.TrimSpace(peer) + if peer == "" { + continue + } + + // Validate multiaddr format + if _, err := multiaddr.NewMultiaddr(peer); err != nil { + return nil, fmt.Errorf("invalid multiaddr %q: %w", peer, err) + } + + // Deduplicate + if !seen[peer] { + peers = append(peers, peer) + seen[peer] = true + } + } + + return peers, nil +} + +// HandleProdCommand handles production environment commands +func HandleProdCommand(args []string) { + if len(args) == 0 { + showProdHelp() + return + } + + subcommand := args[0] + subargs := args[1:] + + switch subcommand { + case "install": + handleProdInstall(subargs) + case "upgrade": + handleProdUpgrade(subargs) + case "status": + handleProdStatus() + case "start": + handleProdStart() + case "stop": + handleProdStop() + case "restart": + handleProdRestart() + case "logs": + handleProdLogs(subargs) + case "uninstall": + handleProdUninstall() + case "help": + showProdHelp() + default: + fmt.Fprintf(os.Stderr, "Unknown prod subcommand: %s\n", subcommand) + showProdHelp() + os.Exit(1) + } +} + +func showProdHelp() { + fmt.Printf("Production Environment Commands\n\n") + fmt.Printf("Usage: dbn prod [options]\n\n") + fmt.Printf("Subcommands:\n") + fmt.Printf(" install - Full production bootstrap (requires root/sudo)\n") + fmt.Printf(" Options:\n") + fmt.Printf(" --force - Reconfigure all settings\n") + fmt.Printf(" --bootstrap - Install as bootstrap node\n") + fmt.Printf(" --vps-ip IP - VPS public IP address (required for non-bootstrap)\n") + fmt.Printf(" --peers ADDRS - Comma-separated bootstrap peer multiaddrs (required for non-bootstrap)\n") + fmt.Printf(" --cluster-secret HEX - 64-hex cluster secret (required for non-bootstrap)\n") + fmt.Printf(" --bootstrap-join ADDR - Bootstrap raft join address (for secondary bootstrap)\n") + fmt.Printf(" --domain DOMAIN - Domain for HTTPS (optional)\n") + fmt.Printf(" --branch BRANCH - Git branch to use (main or nightly, default: main)\n") + fmt.Printf(" --ignore-resource-checks - Skip disk/RAM/CPU prerequisite validation\n") + fmt.Printf(" upgrade - Upgrade existing installation (requires root/sudo)\n") + fmt.Printf(" Options:\n") + fmt.Printf(" --restart - Automatically restart services after upgrade\n") + fmt.Printf(" --branch BRANCH - Git branch to use (main or nightly, uses saved preference if not specified)\n") + fmt.Printf(" --no-pull - Skip git clone/pull, use existing /home/debros/src\n") + fmt.Printf(" status - Show status of production services\n") + fmt.Printf(" start - Start all production services (requires root/sudo)\n") + fmt.Printf(" stop - Stop all production services (requires root/sudo)\n") + fmt.Printf(" restart - Restart all production services (requires root/sudo)\n") + fmt.Printf(" logs - View production service logs\n") + fmt.Printf(" Service aliases: node, ipfs, cluster, gateway, olric\n") + fmt.Printf(" Options:\n") + fmt.Printf(" --follow - Follow logs in real-time\n") + fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n") + fmt.Printf("Examples:\n") + fmt.Printf(" # Bootstrap node (main branch)\n") + fmt.Printf(" sudo dbn prod install --bootstrap\n\n") + fmt.Printf(" # Bootstrap node (nightly branch)\n") + fmt.Printf(" sudo dbn prod install --bootstrap --branch nightly\n\n") + fmt.Printf(" # Join existing cluster\n") + fmt.Printf(" sudo dbn prod install --vps-ip 10.0.0.2 --peers /ip4/10.0.0.1/tcp/4001/p2p/Qm...\n\n") + fmt.Printf(" # Secondary bootstrap joining existing cluster\n") + fmt.Printf(" sudo dbn prod install --bootstrap --vps-ip 10.0.0.2 --bootstrap-join 10.0.0.1:7001 --peers /ip4/10.0.0.1/tcp/4001/p2p/Qm...\n\n") + fmt.Printf(" # Upgrade using saved branch preference\n") + fmt.Printf(" sudo dbn prod upgrade --restart\n\n") + fmt.Printf(" # Upgrade and switch to nightly branch\n") + fmt.Printf(" sudo dbn prod upgrade --restart --branch nightly\n\n") + fmt.Printf(" # Upgrade without pulling latest code (use existing /home/debros/src)\n") + fmt.Printf(" sudo dbn prod upgrade --restart --no-pull\n\n") + fmt.Printf(" # Service management\n") + fmt.Printf(" sudo dbn prod start\n") + fmt.Printf(" sudo dbn prod stop\n") + fmt.Printf(" sudo dbn prod restart\n\n") + fmt.Printf(" dbn prod status\n") + fmt.Printf(" dbn prod logs node --follow\n") + fmt.Printf(" dbn prod logs gateway --follow\n") +} + +func handleProdInstall(args []string) { + // Parse arguments using flag.FlagSet + fs := flag.NewFlagSet("install", flag.ContinueOnError) + fs.SetOutput(os.Stderr) + + force := fs.Bool("force", false, "Reconfigure all settings") + isBootstrap := fs.Bool("bootstrap", false, "Install as bootstrap node") + skipResourceChecks := fs.Bool("ignore-resource-checks", false, "Skip disk/RAM/CPU prerequisite validation") + vpsIP := fs.String("vps-ip", "", "VPS public IP address (required for non-bootstrap)") + domain := fs.String("domain", "", "Domain for HTTPS (optional)") + peersStr := fs.String("peers", "", "Comma-separated bootstrap peer multiaddrs (required for non-bootstrap)") + bootstrapJoin := fs.String("bootstrap-join", "", "Bootstrap raft join address (for secondary bootstrap)") + branch := fs.String("branch", "main", "Git branch to use (main or nightly)") + clusterSecret := fs.String("cluster-secret", "", "Hex-encoded 32-byte cluster secret (required for non-bootstrap nodes)") + + if err := fs.Parse(args); err != nil { + if err == flag.ErrHelp { + return + } + fmt.Fprintf(os.Stderr, "❌ Failed to parse flags: %v\n", err) + os.Exit(1) + } + + // Validate branch + if *branch != "main" && *branch != "nightly" { + fmt.Fprintf(os.Stderr, "❌ Invalid branch: %s (must be 'main' or 'nightly')\n", *branch) + os.Exit(1) + } + + // Normalize and validate bootstrap peers + bootstrapPeers, err := normalizeBootstrapPeers(*peersStr) + if err != nil { + fmt.Fprintf(os.Stderr, "❌ Invalid bootstrap peers: %v\n", err) + fmt.Fprintf(os.Stderr, " Example: --peers /ip4/10.0.0.1/tcp/4001/p2p/Qm...,/ip4/10.0.0.2/tcp/4001/p2p/Qm...\n") + os.Exit(1) + } + + // Validate setup requirements + if os.Geteuid() != 0 { + fmt.Fprintf(os.Stderr, "❌ Production install must be run as root (use sudo)\n") + os.Exit(1) + } + + // Validate bootstrap node requirements + if *isBootstrap { + if *vpsIP == "" { + fmt.Fprintf(os.Stderr, "❌ --vps-ip is required for bootstrap nodes\n") + fmt.Fprintf(os.Stderr, " Bootstrap nodes must advertise a public IP address for other nodes to connect\n") + fmt.Fprintf(os.Stderr, " Usage: sudo dbn prod install --bootstrap --vps-ip \n") + fmt.Fprintf(os.Stderr, " Example: sudo dbn prod install --bootstrap --vps-ip 203.0.113.1\n") + os.Exit(1) + } + // Validate secondary bootstrap requirements + if *bootstrapJoin == "" { + fmt.Fprintf(os.Stderr, "⚠️ Warning: Primary bootstrap node detected (--bootstrap without --bootstrap-join)\n") + fmt.Fprintf(os.Stderr, " This node will form a new cluster. To join existing cluster as secondary bootstrap:\n") + fmt.Fprintf(os.Stderr, " sudo dbn prod install --bootstrap --vps-ip %s --bootstrap-join :7001 --peers \n", *vpsIP) + } + } + + // Validate non-bootstrap node requirements + if !*isBootstrap { + if *vpsIP == "" { + fmt.Fprintf(os.Stderr, "❌ --vps-ip is required for non-bootstrap nodes\n") + fmt.Fprintf(os.Stderr, " Usage: sudo dbn prod install --vps-ip --peers \n") + os.Exit(1) + } + if len(bootstrapPeers) == 0 { + fmt.Fprintf(os.Stderr, "❌ --peers is required for non-bootstrap nodes\n") + fmt.Fprintf(os.Stderr, " Usage: sudo dbn prod install --vps-ip --peers \n") + fmt.Fprintf(os.Stderr, " Example: --peers /ip4/10.0.0.1/tcp/4001/p2p/Qm...\n") + os.Exit(1) + } + if *clusterSecret == "" { + fmt.Fprintf(os.Stderr, "❌ --cluster-secret is required for non-bootstrap nodes\n") + fmt.Fprintf(os.Stderr, " Provide the 64-hex secret from the bootstrap node (cat ~/.debros/secrets/cluster-secret)\n") + os.Exit(1) + } + } + + if *clusterSecret != "" { + if err := production.ValidateClusterSecret(*clusterSecret); err != nil { + fmt.Fprintf(os.Stderr, "❌ Invalid --cluster-secret: %v\n", err) + os.Exit(1) + } + } + + debrosHome := "/home/debros" + debrosDir := debrosHome + "/.debros" + setup := production.NewProductionSetup(debrosHome, os.Stdout, *force, *branch, false, *skipResourceChecks, *clusterSecret) + + // Check port availability before proceeding + if err := ensurePortsAvailable("prod install", defaultPorts()); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } + + // Save branch preference for future upgrades + if err := production.SaveBranchPreference(debrosDir, *branch); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Warning: Failed to save branch preference: %v\n", err) + } + + // Phase 1: Check prerequisites + fmt.Printf("\n📋 Phase 1: Checking prerequisites...\n") + if err := setup.Phase1CheckPrerequisites(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Prerequisites check failed: %v\n", err) + os.Exit(1) + } + + // Phase 2: Provision environment + fmt.Printf("\n🛠️ Phase 2: Provisioning environment...\n") + if err := setup.Phase2ProvisionEnvironment(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Environment provisioning failed: %v\n", err) + os.Exit(1) + } + + // Phase 2b: Install binaries + fmt.Printf("\nPhase 2b: Installing binaries...\n") + if err := setup.Phase2bInstallBinaries(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Binary installation failed: %v\n", err) + os.Exit(1) + } + + // Determine node type early + nodeType := "node" + if *isBootstrap { + nodeType = "bootstrap" + } + + // Phase 3: Generate secrets FIRST (before service initialization) + // This ensures cluster secret and swarm key exist before repos are seeded + fmt.Printf("\n🔐 Phase 3: Generating secrets...\n") + if err := setup.Phase3GenerateSecrets(*isBootstrap); err != nil { + fmt.Fprintf(os.Stderr, "❌ Secret generation failed: %v\n", err) + os.Exit(1) + } + + // Phase 2c: Initialize services (after secrets are in place) + fmt.Printf("\nPhase 2c: Initializing services...\n") + if err := setup.Phase2cInitializeServices(nodeType, bootstrapPeers, *vpsIP); err != nil { + fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err) + os.Exit(1) + } + + // Phase 4: Generate configs + fmt.Printf("\n⚙️ Phase 4: Generating configurations...\n") + enableHTTPS := *domain != "" + if err := setup.Phase4GenerateConfigs(*isBootstrap, bootstrapPeers, *vpsIP, enableHTTPS, *domain, *bootstrapJoin); err != nil { + fmt.Fprintf(os.Stderr, "❌ Configuration generation failed: %v\n", err) + os.Exit(1) + } + + // Phase 5: Create systemd services + fmt.Printf("\n🔧 Phase 5: Creating systemd services...\n") + if err := setup.Phase5CreateSystemdServices(nodeType, *vpsIP); err != nil { + fmt.Fprintf(os.Stderr, "❌ Service creation failed: %v\n", err) + os.Exit(1) + } + + // Give services a moment to fully initialize before verification + fmt.Printf("\n⏳ Waiting for services to initialize...\n") + time.Sleep(5 * time.Second) + + // Verify all services are running correctly + if err := verifyProductionRuntime("prod install"); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + fmt.Fprintf(os.Stderr, " Installation completed but services are not healthy. Check logs with: dbn prod logs \n") + os.Exit(1) + } + + // Log completion with actual peer ID + setup.LogSetupComplete(setup.NodePeerID) + fmt.Printf("✅ Production installation complete and healthy!\n\n") +} + +func handleProdUpgrade(args []string) { + // Parse arguments using flag.FlagSet + fs := flag.NewFlagSet("upgrade", flag.ContinueOnError) + fs.SetOutput(os.Stderr) + + force := fs.Bool("force", false, "Reconfigure all settings") + restartServices := fs.Bool("restart", false, "Automatically restart services after upgrade") + noPull := fs.Bool("no-pull", false, "Skip git clone/pull, use existing /home/debros/src") + branch := fs.String("branch", "", "Git branch to use (main or nightly, uses saved preference if not specified)") + + // Support legacy flags for backwards compatibility + fs.Bool("nightly", false, "Use nightly branch (deprecated, use --branch nightly)") + fs.Bool("main", false, "Use main branch (deprecated, use --branch main)") + + if err := fs.Parse(args); err != nil { + if err == flag.ErrHelp { + return + } + fmt.Fprintf(os.Stderr, "❌ Failed to parse flags: %v\n", err) + os.Exit(1) + } + + // Handle legacy flags + nightlyFlag := fs.Lookup("nightly") + mainFlag := fs.Lookup("main") + if nightlyFlag != nil && nightlyFlag.Value.String() == "true" { + *branch = "nightly" + } + if mainFlag != nil && mainFlag.Value.String() == "true" { + *branch = "main" + } + + // Validate branch if provided + if *branch != "" && *branch != "main" && *branch != "nightly" { + fmt.Fprintf(os.Stderr, "❌ Invalid branch: %s (must be 'main' or 'nightly')\n", *branch) + os.Exit(1) + } + + if os.Geteuid() != 0 { + fmt.Fprintf(os.Stderr, "❌ Production upgrade must be run as root (use sudo)\n") + os.Exit(1) + } + + debrosHome := "/home/debros" + debrosDir := debrosHome + "/.debros" + fmt.Printf("🔄 Upgrading production installation...\n") + fmt.Printf(" This will preserve existing configurations and data\n") + fmt.Printf(" Configurations will be updated to latest format\n\n") + + setup := production.NewProductionSetup(debrosHome, os.Stdout, *force, *branch, *noPull, false, "") + + // Log if --no-pull is enabled + if *noPull { + fmt.Printf(" ⚠️ --no-pull flag enabled: Skipping git clone/pull\n") + fmt.Printf(" Using existing repository at %s/src\n", debrosHome) + } + + // If branch was explicitly provided, save it for future upgrades + if *branch != "" { + if err := production.SaveBranchPreference(debrosDir, *branch); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Warning: Failed to save branch preference: %v\n", err) + } else { + fmt.Printf(" Using branch: %s (saved for future upgrades)\n", *branch) + } + } else { + // Show which branch is being used (read from saved preference) + currentBranch := production.ReadBranchPreference(debrosDir) + fmt.Printf(" Using branch: %s (from saved preference)\n", currentBranch) + } + + // Phase 1: Check prerequisites + fmt.Printf("\n📋 Phase 1: Checking prerequisites...\n") + if err := setup.Phase1CheckPrerequisites(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Prerequisites check failed: %v\n", err) + os.Exit(1) + } + + // Phase 2: Provision environment (ensures directories exist) + fmt.Printf("\n🛠️ Phase 2: Provisioning environment...\n") + if err := setup.Phase2ProvisionEnvironment(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Environment provisioning failed: %v\n", err) + os.Exit(1) + } + + // Stop services before upgrading binaries (if this is an upgrade) + if setup.IsUpdate() { + fmt.Printf("\n⏹️ Stopping services before upgrade...\n") + serviceController := production.NewSystemdController() + services := []string{ + "debros-gateway.service", + "debros-node-bootstrap.service", + "debros-node-node.service", + "debros-ipfs-cluster-bootstrap.service", + "debros-ipfs-cluster-node.service", + "debros-ipfs-bootstrap.service", + "debros-ipfs-node.service", + // Note: RQLite is managed by node process, not as separate service + "debros-olric.service", + } + for _, svc := range services { + unitPath := filepath.Join("/etc/systemd/system", svc) + if _, err := os.Stat(unitPath); err == nil { + if err := serviceController.StopService(svc); err != nil { + fmt.Printf(" ⚠️ Warning: Failed to stop %s: %v\n", svc, err) + } else { + fmt.Printf(" ✓ Stopped %s\n", svc) + } + } + } + // Give services time to shut down gracefully + time.Sleep(2 * time.Second) + } + + // Check port availability after stopping services + if err := ensurePortsAvailable("prod upgrade", defaultPorts()); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } + + // Phase 2b: Install/update binaries + fmt.Printf("\nPhase 2b: Installing/updating binaries...\n") + if err := setup.Phase2bInstallBinaries(); err != nil { + fmt.Fprintf(os.Stderr, "❌ Binary installation failed: %v\n", err) + os.Exit(1) + } + + // Detect node type from existing installation + nodeType := "node" + if setup.IsUpdate() { + // Check if bootstrap config exists + bootstrapConfig := filepath.Join("/home/debros/.debros", "configs", "bootstrap.yaml") + if _, err := os.Stat(bootstrapConfig); err == nil { + nodeType = "bootstrap" + } else { + // Check data directory structure + bootstrapDataPath := filepath.Join("/home/debros/.debros", "data", "bootstrap") + if _, err := os.Stat(bootstrapDataPath); err == nil { + nodeType = "bootstrap" + } + } + fmt.Printf(" Detected node type: %s\n", nodeType) + } else { + fmt.Printf(" ⚠️ No existing installation detected, treating as fresh install\n") + fmt.Printf(" Use 'dbn prod install --bootstrap' for fresh bootstrap installation\n") + nodeType = "bootstrap" // Default for upgrade if nothing exists + } + + // Phase 3: Ensure secrets exist (preserves existing secrets) + fmt.Printf("\n🔐 Phase 3: Ensuring secrets...\n") + if err := setup.Phase3GenerateSecrets(nodeType == "bootstrap"); err != nil { + fmt.Fprintf(os.Stderr, "❌ Secret generation failed: %v\n", err) + os.Exit(1) + } + + // Phase 4: Regenerate configs (updates to latest format) + // Preserve existing config settings (bootstrap_peers, domain, join_address, etc.) + enableHTTPS := false + domain := "" + bootstrapJoin := "" + + // Helper function to extract multiaddr list from config + extractBootstrapPeers := func(configPath string) []string { + var peers []string + if data, err := os.ReadFile(configPath); err == nil { + configStr := string(data) + inBootstrapPeers := false + for _, line := range strings.Split(configStr, "\n") { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(trimmed, "bootstrap_peers:") || strings.HasPrefix(trimmed, "bootstrap peers:") { + inBootstrapPeers = true + continue + } + if inBootstrapPeers { + if strings.HasPrefix(trimmed, "-") { + // Extract multiaddr after the dash + parts := strings.SplitN(trimmed, "-", 2) + if len(parts) > 1 { + peer := strings.TrimSpace(parts[1]) + peer = strings.Trim(peer, "\"'") + if peer != "" && strings.HasPrefix(peer, "/") { + peers = append(peers, peer) + } + } + } else if trimmed == "" || !strings.HasPrefix(trimmed, "-") { + // End of bootstrap_peers list + break + } + } + } + } + return peers + } + + // Read existing node config to preserve bootstrap_peers and join_address + nodeConfigFile := "bootstrap.yaml" + if nodeType == "node" { + nodeConfigFile = "node.yaml" + } + nodeConfigPath := filepath.Join(debrosDir, "configs", nodeConfigFile) + + // Extract bootstrap peers from existing node config + bootstrapPeers := extractBootstrapPeers(nodeConfigPath) + + // Extract VPS IP from advertise addresses and bootstrap join address + vpsIP := "" + if data, err := os.ReadFile(nodeConfigPath); err == nil { + configStr := string(data) + for _, line := range strings.Split(configStr, "\n") { + trimmed := strings.TrimSpace(line) + // Try to extract VPS IP from http_adv_address or raft_adv_address + // Only set if not already found (first valid IP wins) + if vpsIP == "" && (strings.HasPrefix(trimmed, "http_adv_address:") || strings.HasPrefix(trimmed, "raft_adv_address:")) { + parts := strings.SplitN(trimmed, ":", 2) + if len(parts) > 1 { + addr := strings.TrimSpace(parts[1]) + addr = strings.Trim(addr, "\"'") + if addr != "" && addr != "null" && addr != "localhost:5001" && addr != "localhost:7001" { + // Extract IP from address (format: "IP:PORT" or "[IPv6]:PORT") + if host, _, err := net.SplitHostPort(addr); err == nil && host != "" && host != "localhost" { + vpsIP = host + // Continue loop to also check for bootstrap join address + } + } + } + } + // Extract bootstrap join address if it's a bootstrap node + if nodeType == "bootstrap" && strings.HasPrefix(trimmed, "rqlite_join_address:") { + parts := strings.SplitN(trimmed, ":", 2) + if len(parts) > 1 { + bootstrapJoin = strings.TrimSpace(parts[1]) + bootstrapJoin = strings.Trim(bootstrapJoin, "\"'") + if bootstrapJoin == "null" || bootstrapJoin == "" { + bootstrapJoin = "" + } + } + } + } + } + + // Read existing gateway config to preserve domain and HTTPS settings + gatewayConfigPath := filepath.Join(debrosDir, "configs", "gateway.yaml") + if data, err := os.ReadFile(gatewayConfigPath); err == nil { + configStr := string(data) + if strings.Contains(configStr, "domain:") { + for _, line := range strings.Split(configStr, "\n") { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(trimmed, "domain:") { + parts := strings.SplitN(trimmed, ":", 2) + if len(parts) > 1 { + domain = strings.TrimSpace(parts[1]) + if domain != "" && domain != "\"\"" && domain != "''" && domain != "null" { + domain = strings.Trim(domain, "\"'") + enableHTTPS = true + } else { + domain = "" + } + } + break + } + } + } + } + + fmt.Printf(" Preserving existing configuration:\n") + if len(bootstrapPeers) > 0 { + fmt.Printf(" - Bootstrap peers: %d peer(s) preserved\n", len(bootstrapPeers)) + } + if vpsIP != "" { + fmt.Printf(" - VPS IP: %s\n", vpsIP) + } + if domain != "" { + fmt.Printf(" - Domain: %s\n", domain) + } + if bootstrapJoin != "" { + fmt.Printf(" - Bootstrap join address: %s\n", bootstrapJoin) + } + + // Phase 2c: Ensure services are properly initialized (fixes existing repos) + // Now that we have bootstrap peers and VPS IP, we can properly configure IPFS Cluster + fmt.Printf("\nPhase 2c: Ensuring services are properly initialized...\n") + if err := setup.Phase2cInitializeServices(nodeType, bootstrapPeers, vpsIP); err != nil { + fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err) + os.Exit(1) + } + + if err := setup.Phase4GenerateConfigs(nodeType == "bootstrap", bootstrapPeers, vpsIP, enableHTTPS, domain, bootstrapJoin); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Config generation warning: %v\n", err) + fmt.Fprintf(os.Stderr, " Existing configs preserved\n") + } + + // Phase 5: Update systemd services + fmt.Printf("\n🔧 Phase 5: Updating systemd services...\n") + if err := setup.Phase5CreateSystemdServices(nodeType, ""); err != nil { + fmt.Fprintf(os.Stderr, "⚠️ Service update warning: %v\n", err) + } + + fmt.Printf("\n✅ Upgrade complete!\n") + if *restartServices { + fmt.Printf(" Restarting services...\n") + // Reload systemd daemon + if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil { + fmt.Fprintf(os.Stderr, " ⚠️ Warning: Failed to reload systemd daemon: %v\n", err) + } + // Restart services to apply changes - use getProductionServices to only restart existing services + services := getProductionServices() + if len(services) == 0 { + fmt.Printf(" ⚠️ No services found to restart\n") + } else { + for _, svc := range services { + if err := exec.Command("systemctl", "restart", svc).Run(); err != nil { + fmt.Printf(" ⚠️ Failed to restart %s: %v\n", svc, err) + } else { + fmt.Printf(" ✓ Restarted %s\n", svc) + } + } + fmt.Printf(" ✓ All services restarted\n") + // Give services a moment to fully initialize before verification + fmt.Printf(" ⏳ Waiting for services to initialize...\n") + time.Sleep(5 * time.Second) + // Verify services are healthy after restart + if err := verifyProductionRuntime("prod upgrade --restart"); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + fmt.Fprintf(os.Stderr, " Upgrade completed but services are not healthy. Check logs with: dbn prod logs \n") + os.Exit(1) + } + fmt.Printf(" ✅ All services verified healthy\n") + } + } else { + fmt.Printf(" To apply changes, restart services:\n") + fmt.Printf(" sudo systemctl daemon-reload\n") + fmt.Printf(" sudo systemctl restart debros-*\n") + } + fmt.Printf("\n") +} + +func handleProdStatus() { + fmt.Printf("Production Environment Status\n\n") + + // Check for all possible service names (bootstrap and node variants) + serviceNames := []string{ + "debros-ipfs-bootstrap", + "debros-ipfs-node", + "debros-ipfs-cluster-bootstrap", + "debros-ipfs-cluster-node", + // Note: RQLite is managed by node process, not as separate service + "debros-olric", + "debros-node-bootstrap", + "debros-node-node", + "debros-gateway", + } + + // Friendly descriptions + descriptions := map[string]string{ + "debros-ipfs-bootstrap": "IPFS Daemon (Bootstrap)", + "debros-ipfs-node": "IPFS Daemon (Node)", + "debros-ipfs-cluster-bootstrap": "IPFS Cluster (Bootstrap)", + "debros-ipfs-cluster-node": "IPFS Cluster (Node)", + "debros-olric": "Olric Cache Server", + "debros-node-bootstrap": "DeBros Node (Bootstrap) - includes RQLite", + "debros-node-node": "DeBros Node (Node) - includes RQLite", + "debros-gateway": "DeBros Gateway", + } + + fmt.Printf("Services:\n") + found := false + for _, svc := range serviceNames { + cmd := exec.Command("systemctl", "is-active", "--quiet", svc) + err := cmd.Run() + status := "❌ Inactive" + if err == nil { + status = "✅ Active" + found = true + } + fmt.Printf(" %s: %s\n", status, descriptions[svc]) + } + + if !found { + fmt.Printf(" (No services found - installation may be incomplete)\n") + } + + fmt.Printf("\nDirectories:\n") + debrosDir := "/home/debros/.debros" + if _, err := os.Stat(debrosDir); err == nil { + fmt.Printf(" ✅ %s exists\n", debrosDir) + } else { + fmt.Printf(" ❌ %s not found\n", debrosDir) + } + + fmt.Printf("\nView logs with: dbn prod logs \n") +} + +// resolveServiceName resolves service aliases to actual systemd service names +func resolveServiceName(alias string) ([]string, error) { + // Service alias mapping + aliases := map[string][]string{ + "node": {"debros-node-bootstrap", "debros-node-node"}, + "ipfs": {"debros-ipfs-bootstrap", "debros-ipfs-node"}, + "cluster": {"debros-ipfs-cluster-bootstrap", "debros-ipfs-cluster-node"}, + "ipfs-cluster": {"debros-ipfs-cluster-bootstrap", "debros-ipfs-cluster-node"}, + "gateway": {"debros-gateway"}, + "olric": {"debros-olric"}, + "rqlite": {"debros-node-bootstrap", "debros-node-node"}, // RQLite logs are in node logs + } + + // Check if it's an alias + if serviceNames, ok := aliases[strings.ToLower(alias)]; ok { + // Filter to only existing services + var existing []string + for _, svc := range serviceNames { + unitPath := filepath.Join("/etc/systemd/system", svc+".service") + if _, err := os.Stat(unitPath); err == nil { + existing = append(existing, svc) + } + } + if len(existing) == 0 { + return nil, fmt.Errorf("no services found for alias %q", alias) + } + return existing, nil + } + + // Check if it's already a full service name + unitPath := filepath.Join("/etc/systemd/system", alias+".service") + if _, err := os.Stat(unitPath); err == nil { + return []string{alias}, nil + } + + // Try without .service suffix + if !strings.HasSuffix(alias, ".service") { + unitPath = filepath.Join("/etc/systemd/system", alias+".service") + if _, err := os.Stat(unitPath); err == nil { + return []string{alias}, nil + } + } + + return nil, fmt.Errorf("service %q not found. Use: node, ipfs, cluster, gateway, olric, or full service name", alias) +} + +func handleProdLogs(args []string) { + if len(args) == 0 { + fmt.Fprintf(os.Stderr, "Usage: dbn prod logs [--follow]\n") + fmt.Fprintf(os.Stderr, "\nService aliases:\n") + fmt.Fprintf(os.Stderr, " node, ipfs, cluster, gateway, olric\n") + fmt.Fprintf(os.Stderr, "\nOr use full service name:\n") + fmt.Fprintf(os.Stderr, " debros-node-bootstrap, debros-gateway, etc.\n") + os.Exit(1) + } + + serviceAlias := args[0] + follow := false + if len(args) > 1 && (args[1] == "--follow" || args[1] == "-f") { + follow = true + } + + // Resolve service alias to actual service names + serviceNames, err := resolveServiceName(serviceAlias) + if err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + fmt.Fprintf(os.Stderr, "\nAvailable service aliases: node, ipfs, cluster, gateway, olric\n") + fmt.Fprintf(os.Stderr, "Or use full service name like: debros-node-bootstrap\n") + os.Exit(1) + } + + // If multiple services match, show all of them + if len(serviceNames) > 1 { + if follow { + fmt.Fprintf(os.Stderr, "⚠️ Multiple services match alias %q:\n", serviceAlias) + for _, svc := range serviceNames { + fmt.Fprintf(os.Stderr, " - %s\n", svc) + } + fmt.Fprintf(os.Stderr, "\nShowing logs for all matching services...\n\n") + // Use journalctl with multiple units (build args correctly) + args := []string{} + for _, svc := range serviceNames { + args = append(args, "-u", svc) + } + args = append(args, "-f") + cmd := exec.Command("journalctl", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + cmd.Run() + } else { + for i, svc := range serviceNames { + if i > 0 { + fmt.Printf("\n" + strings.Repeat("=", 70) + "\n\n") + } + fmt.Printf("📋 Logs for %s:\n\n", svc) + cmd := exec.Command("journalctl", "-u", svc, "-n", "50") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Run() + } + } + return + } + + // Single service + service := serviceNames[0] + if follow { + fmt.Printf("Following logs for %s (press Ctrl+C to stop)...\n\n", service) + cmd := exec.Command("journalctl", "-u", service, "-f") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + cmd.Run() + } else { + cmd := exec.Command("journalctl", "-u", service, "-n", "50") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Run() + } +} + +// errServiceNotFound marks units that systemd does not know about. +var errServiceNotFound = errors.New("service not found") + +type portSpec struct { + Name string + Port int +} + +var servicePorts = map[string][]portSpec{ + "debros-gateway": {{"Gateway API", 6001}}, + "debros-olric": {{"Olric HTTP", 3320}, {"Olric Memberlist", 3322}}, + "debros-node-bootstrap": {{"RQLite HTTP", 5001}, {"RQLite Raft", 7001}, {"IPFS Cluster API", 9094}}, + "debros-node-node": {{"RQLite HTTP", 5001}, {"RQLite Raft", 7001}, {"IPFS Cluster API", 9094}}, + "debros-ipfs-bootstrap": {{"IPFS API", 4501}, {"IPFS Gateway", 8080}, {"IPFS Swarm", 4001}}, + "debros-ipfs-node": {{"IPFS API", 4501}, {"IPFS Gateway", 8080}, {"IPFS Swarm", 4001}}, + "debros-ipfs-cluster-bootstrap": {{"IPFS Cluster API", 9094}}, + "debros-ipfs-cluster-node": {{"IPFS Cluster API", 9094}}, +} + +// defaultPorts is used for fresh installs/upgrades before unit files exist. +func defaultPorts() []portSpec { + return []portSpec{ + {"IPFS Swarm", 4001}, + {"IPFS API", 4501}, + {"IPFS Gateway", 8080}, + {"Gateway API", 6001}, + {"RQLite HTTP", 5001}, + {"RQLite Raft", 7001}, + {"IPFS Cluster API", 9094}, + {"Olric HTTP", 3320}, + {"Olric Memberlist", 3322}, + } +} + +func isServiceActive(service string) (bool, error) { + cmd := exec.Command("systemctl", "is-active", "--quiet", service) + if err := cmd.Run(); err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + switch exitErr.ExitCode() { + case 3: + return false, nil + case 4: + return false, errServiceNotFound + } + } + return false, err + } + return true, nil +} + +func isServiceEnabled(service string) (bool, error) { + cmd := exec.Command("systemctl", "is-enabled", "--quiet", service) + if err := cmd.Run(); err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + switch exitErr.ExitCode() { + case 1: + return false, nil // Service is disabled + case 4: + return false, errServiceNotFound + } + } + return false, err + } + return true, nil +} + +func collectPortsForServices(services []string, skipActive bool) ([]portSpec, error) { + seen := make(map[int]portSpec) + for _, svc := range services { + if skipActive { + active, err := isServiceActive(svc) + if err != nil { + return nil, fmt.Errorf("unable to check %s: %w", svc, err) + } + if active { + continue + } + } + for _, spec := range servicePorts[svc] { + if _, ok := seen[spec.Port]; !ok { + seen[spec.Port] = spec + } + } + } + ports := make([]portSpec, 0, len(seen)) + for _, spec := range seen { + ports = append(ports, spec) + } + return ports, nil +} + +func ensurePortsAvailable(action string, ports []portSpec) error { + for _, spec := range ports { + ln, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%d", spec.Port)) + if err != nil { + if errors.Is(err, syscall.EADDRINUSE) || strings.Contains(err.Error(), "address already in use") { + return fmt.Errorf("%s cannot continue: %s (port %d) is already in use", action, spec.Name, spec.Port) + } + return fmt.Errorf("%s cannot continue: failed to inspect %s (port %d): %w", action, spec.Name, spec.Port, err) + } + _ = ln.Close() + } + return nil +} + +func checkHTTP(client *http.Client, method, url, label string) error { + req, err := http.NewRequest(method, url, nil) + if err != nil { + return fmt.Errorf("%s check failed: %w", label, err) + } + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("%s check failed: %w", label, err) + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("%s returned HTTP %d", label, resp.StatusCode) + } + return nil +} + +func serviceExists(name string) bool { + unitPath := filepath.Join("/etc/systemd/system", name+".service") + _, err := os.Stat(unitPath) + return err == nil +} + +func verifyProductionRuntime(action string) error { + services := getProductionServices() + issues := make([]string, 0) + + for _, svc := range services { + active, err := isServiceActive(svc) + if err != nil { + issues = append(issues, fmt.Sprintf("%s status unknown (%v)", svc, err)) + continue + } + if !active { + issues = append(issues, fmt.Sprintf("%s is inactive", svc)) + } + } + + client := &http.Client{Timeout: 3 * time.Second} + + if err := checkHTTP(client, "GET", "http://127.0.0.1:5001/status", "RQLite status"); err == nil { + } else if serviceExists("debros-node-bootstrap") || serviceExists("debros-node-node") { + issues = append(issues, err.Error()) + } + + if err := checkHTTP(client, "POST", "http://127.0.0.1:4501/api/v0/version", "IPFS API"); err == nil { + } else if serviceExists("debros-ipfs-bootstrap") || serviceExists("debros-ipfs-node") { + issues = append(issues, err.Error()) + } + + if err := checkHTTP(client, "GET", "http://127.0.0.1:9094/health", "IPFS Cluster"); err == nil { + } else if serviceExists("debros-ipfs-cluster-bootstrap") || serviceExists("debros-ipfs-cluster-node") { + issues = append(issues, err.Error()) + } + + if err := checkHTTP(client, "GET", "http://127.0.0.1:6001/health", "Gateway health"); err == nil { + } else if serviceExists("debros-gateway") { + issues = append(issues, err.Error()) + } + + if err := checkHTTP(client, "GET", "http://127.0.0.1:3320/ping", "Olric ping"); err == nil { + } else if serviceExists("debros-olric") { + issues = append(issues, err.Error()) + } + + if len(issues) > 0 { + return fmt.Errorf("%s verification failed:\n - %s", action, strings.Join(issues, "\n - ")) + } + return nil +} + +// getProductionServices returns a list of all DeBros production service names that exist +func getProductionServices() []string { + // All possible service names (both bootstrap and node variants) + allServices := []string{ + "debros-gateway", + "debros-node-node", + "debros-node-bootstrap", + "debros-olric", + // Note: RQLite is managed by node process, not as separate service + "debros-ipfs-cluster-bootstrap", + "debros-ipfs-cluster-node", + "debros-ipfs-bootstrap", + "debros-ipfs-node", + } + + // Filter to only existing services by checking if unit file exists + var existing []string + for _, svc := range allServices { + unitPath := filepath.Join("/etc/systemd/system", svc+".service") + if _, err := os.Stat(unitPath); err == nil { + existing = append(existing, svc) + } + } + + return existing +} + +func isServiceMasked(service string) (bool, error) { + cmd := exec.Command("systemctl", "is-enabled", service) + output, err := cmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if strings.Contains(outputStr, "masked") { + return true, nil + } + return false, err + } + return false, nil +} + +func handleProdStart() { + if os.Geteuid() != 0 { + fmt.Fprintf(os.Stderr, "❌ Production commands must be run as root (use sudo)\n") + os.Exit(1) + } + + fmt.Printf("Starting all DeBros production services...\n") + + services := getProductionServices() + if len(services) == 0 { + fmt.Printf(" ⚠️ No DeBros services found\n") + return + } + + // Reset failed state for all services before starting + // This helps with services that were previously in failed state + resetArgs := []string{"reset-failed"} + resetArgs = append(resetArgs, services...) + exec.Command("systemctl", resetArgs...).Run() + + // Check which services are inactive and need to be started + inactive := make([]string, 0, len(services)) + for _, svc := range services { + // Check if service is masked and unmask it + masked, err := isServiceMasked(svc) + if err == nil && masked { + fmt.Printf(" ⚠️ %s is masked, unmasking...\n", svc) + if err := exec.Command("systemctl", "unmask", svc).Run(); err != nil { + fmt.Printf(" ⚠️ Failed to unmask %s: %v\n", svc, err) + } else { + fmt.Printf(" ✓ Unmasked %s\n", svc) + } + } + + active, err := isServiceActive(svc) + if err != nil { + fmt.Printf(" ⚠️ Unable to check %s: %v\n", svc, err) + continue + } + if active { + fmt.Printf(" ℹ️ %s already running\n", svc) + // Re-enable if disabled (in case it was stopped with 'dbn prod stop') + enabled, err := isServiceEnabled(svc) + if err == nil && !enabled { + if err := exec.Command("systemctl", "enable", svc).Run(); err != nil { + fmt.Printf(" ⚠️ Failed to re-enable %s: %v\n", svc, err) + } else { + fmt.Printf(" ✓ Re-enabled %s (will auto-start on boot)\n", svc) + } + } + continue + } + inactive = append(inactive, svc) + } + + if len(inactive) == 0 { + fmt.Printf("\n✅ All services already running\n") + return + } + + // Check port availability for services we're about to start + ports, err := collectPortsForServices(inactive, false) + if err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } + if err := ensurePortsAvailable("prod start", ports); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } + + // Enable and start inactive services + for _, svc := range inactive { + // Re-enable the service first (in case it was disabled by 'dbn prod stop') + enabled, err := isServiceEnabled(svc) + if err == nil && !enabled { + if err := exec.Command("systemctl", "enable", svc).Run(); err != nil { + fmt.Printf(" ⚠️ Failed to enable %s: %v\n", svc, err) + } else { + fmt.Printf(" ✓ Enabled %s (will auto-start on boot)\n", svc) + } + } + + // Start the service + if err := exec.Command("systemctl", "start", svc).Run(); err != nil { + fmt.Printf(" ⚠️ Failed to start %s: %v\n", svc, err) + } else { + fmt.Printf(" ✓ Started %s\n", svc) + } + } + + // Give services more time to fully initialize before verification + // Some services may need more time to start up, especially if they're + // waiting for dependencies or initializing databases + fmt.Printf(" ⏳ Waiting for services to initialize...\n") + time.Sleep(5 * time.Second) + + // Wait for services to actually become active (with retries) + maxRetries := 6 + for i := 0; i < maxRetries; i++ { + allActive := true + for _, svc := range inactive { + active, err := isServiceActive(svc) + if err != nil || !active { + allActive = false + break + } + } + if allActive { + break + } + if i < maxRetries-1 { + time.Sleep(2 * time.Second) + } + } + + // Verify all services are healthy + if err := verifyProductionRuntime("prod start"); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + fmt.Fprintf(os.Stderr, "\n Services may still be starting. Check status with:\n") + fmt.Fprintf(os.Stderr, " systemctl status debros-*\n") + fmt.Fprintf(os.Stderr, " dbn prod logs \n") + os.Exit(1) + } + + fmt.Printf("\n✅ All services started and healthy\n") +} + +func handleProdStop() { + if os.Geteuid() != 0 { + fmt.Fprintf(os.Stderr, "❌ Production commands must be run as root (use sudo)\n") + os.Exit(1) + } + + fmt.Printf("Stopping all DeBros production services...\n") + + services := getProductionServices() + if len(services) == 0 { + fmt.Printf(" ⚠️ No DeBros services found\n") + return + } + + // Stop all services at once using a single systemctl command + // This is more efficient and ensures they all stop together + stopArgs := []string{"stop"} + stopArgs = append(stopArgs, services...) + if err := exec.Command("systemctl", stopArgs...).Run(); err != nil { + fmt.Printf(" ⚠️ Warning: Some services may have failed to stop: %v\n", err) + // Continue anyway - we'll verify and handle individually below + } + + // Wait a moment for services to fully stop + time.Sleep(2 * time.Second) + + // Reset failed state for any services that might be in failed state + // This helps with services stuck in "activating auto-restart" + resetArgs := []string{"reset-failed"} + resetArgs = append(resetArgs, services...) + exec.Command("systemctl", resetArgs...).Run() + + // Wait again after reset-failed + time.Sleep(1 * time.Second) + + // Stop again to ensure they're stopped (in case reset-failed caused a restart) + exec.Command("systemctl", stopArgs...).Run() + time.Sleep(1 * time.Second) + + hadError := false + for _, svc := range services { + active, err := isServiceActive(svc) + if err != nil { + fmt.Printf(" ⚠️ Unable to check %s: %v\n", svc, err) + hadError = true + continue + } + if !active { + fmt.Printf(" ✓ Stopped %s\n", svc) + } else { + // Service is still active, try stopping it individually + fmt.Printf(" ⚠️ %s still active, attempting individual stop...\n", svc) + if err := exec.Command("systemctl", "stop", svc).Run(); err != nil { + fmt.Printf(" ❌ Failed to stop %s: %v\n", svc, err) + hadError = true + } else { + // Wait and verify again + time.Sleep(1 * time.Second) + if stillActive, _ := isServiceActive(svc); stillActive { + fmt.Printf(" ❌ %s restarted itself (Restart=always)\n", svc) + hadError = true + } else { + fmt.Printf(" ✓ Stopped %s\n", svc) + } + } + } + + // Disable the service to prevent it from auto-starting on boot + enabled, err := isServiceEnabled(svc) + if err != nil { + fmt.Printf(" ⚠️ Unable to check if %s is enabled: %v\n", svc, err) + // Continue anyway - try to disable + } + if enabled { + if err := exec.Command("systemctl", "disable", svc).Run(); err != nil { + fmt.Printf(" ⚠️ Failed to disable %s: %v\n", svc, err) + hadError = true + } else { + fmt.Printf(" ✓ Disabled %s (will not auto-start on boot)\n", svc) + } + } else { + fmt.Printf(" ℹ️ %s already disabled\n", svc) + } + } + + if hadError { + fmt.Fprintf(os.Stderr, "\n⚠️ Some services may still be restarting due to Restart=always\n") + fmt.Fprintf(os.Stderr, " Check status with: systemctl list-units 'debros-*'\n") + fmt.Fprintf(os.Stderr, " If services are still restarting, they may need manual intervention\n") + } else { + fmt.Printf("\n✅ All services stopped and disabled (will not auto-start on boot)\n") + fmt.Printf(" Use 'dbn prod start' to start and re-enable services\n") + } +} + +func handleProdRestart() { + if os.Geteuid() != 0 { + fmt.Fprintf(os.Stderr, "❌ Production commands must be run as root (use sudo)\n") + os.Exit(1) + } + + fmt.Printf("Restarting all DeBros production services...\n") + + services := getProductionServices() + if len(services) == 0 { + fmt.Printf(" ⚠️ No DeBros services found\n") + return + } + + // Stop all active services first + fmt.Printf(" Stopping services...\n") + for _, svc := range services { + active, err := isServiceActive(svc) + if err != nil { + fmt.Printf(" ⚠️ Unable to check %s: %v\n", svc, err) + continue + } + if !active { + fmt.Printf(" ℹ️ %s was already stopped\n", svc) + continue + } + if err := exec.Command("systemctl", "stop", svc).Run(); err != nil { + fmt.Printf(" ⚠️ Failed to stop %s: %v\n", svc, err) + } else { + fmt.Printf(" ✓ Stopped %s\n", svc) + } + } + + // Check port availability before restarting + ports, err := collectPortsForServices(services, false) + if err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } + if err := ensurePortsAvailable("prod restart", ports); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } + + // Start all services + fmt.Printf(" Starting services...\n") + for _, svc := range services { + if err := exec.Command("systemctl", "start", svc).Run(); err != nil { + fmt.Printf(" ⚠️ Failed to start %s: %v\n", svc, err) + } else { + fmt.Printf(" ✓ Started %s\n", svc) + } + } + + // Give services a moment to fully initialize before verification + fmt.Printf(" ⏳ Waiting for services to initialize...\n") + time.Sleep(3 * time.Second) + + // Verify all services are healthy + if err := verifyProductionRuntime("prod restart"); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } + + fmt.Printf("\n✅ All services restarted and healthy\n") +} + +func handleProdUninstall() { + if os.Geteuid() != 0 { + fmt.Fprintf(os.Stderr, "❌ Production uninstall must be run as root (use sudo)\n") + os.Exit(1) + } + + fmt.Printf("⚠️ This will stop and remove all DeBros production services\n") + fmt.Printf("⚠️ Configuration and data will be preserved in /home/debros/.debros\n\n") + fmt.Printf("Continue? (yes/no): ") + + reader := bufio.NewReader(os.Stdin) + response, _ := reader.ReadString('\n') + response = strings.ToLower(strings.TrimSpace(response)) + + if response != "yes" && response != "y" { + fmt.Printf("Uninstall cancelled\n") + return + } + + services := []string{ + "debros-gateway", + "debros-node-node", + "debros-node-bootstrap", + "debros-olric", + // Note: RQLite is managed by node process, not as separate service + "debros-ipfs-cluster-bootstrap", + "debros-ipfs-cluster-node", + "debros-ipfs-bootstrap", + "debros-ipfs-node", + } + + fmt.Printf("Stopping services...\n") + for _, svc := range services { + exec.Command("systemctl", "stop", svc).Run() + exec.Command("systemctl", "disable", svc).Run() + unitPath := filepath.Join("/etc/systemd/system", svc+".service") + os.Remove(unitPath) + } + + exec.Command("systemctl", "daemon-reload").Run() + fmt.Printf("✅ Services uninstalled\n") + fmt.Printf(" Configuration and data preserved in /home/debros/.debros\n") + fmt.Printf(" To remove all data: rm -rf /home/debros/.debros\n\n") +} diff --git a/pkg/cli/prod_commands_test.go b/pkg/cli/prod_commands_test.go new file mode 100644 index 0000000..874a9ff --- /dev/null +++ b/pkg/cli/prod_commands_test.go @@ -0,0 +1,81 @@ +package cli + +import ( + "testing" +) + +// TestProdCommandFlagParsing verifies that prod command flags are parsed correctly +func TestProdCommandFlagParsing(t *testing.T) { + tests := []struct { + name string + args []string + expectBootstrap bool + expectVPSIP string + expectBootstrapJoin string + expectPeers string + }{ + { + name: "bootstrap node", + args: []string{"install", "--bootstrap"}, + expectBootstrap: true, + }, + { + name: "non-bootstrap with vps-ip", + args: []string{"install", "--vps-ip", "10.0.0.2", "--peers", "multiaddr1,multiaddr2"}, + expectVPSIP: "10.0.0.2", + expectPeers: "multiaddr1,multiaddr2", + }, + { + name: "secondary bootstrap", + args: []string{"install", "--bootstrap", "--vps-ip", "10.0.0.3", "--bootstrap-join", "10.0.0.1:7001"}, + expectBootstrap: true, + expectVPSIP: "10.0.0.3", + expectBootstrapJoin: "10.0.0.1:7001", + }, + { + name: "with domain", + args: []string{"install", "--bootstrap", "--domain", "example.com"}, + expectBootstrap: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Extract flags manually to verify parsing logic + isBootstrap := false + var vpsIP, peersStr, bootstrapJoin string + + for i, arg := range tt.args { + switch arg { + case "--bootstrap": + isBootstrap = true + case "--peers": + if i+1 < len(tt.args) { + peersStr = tt.args[i+1] + } + case "--vps-ip": + if i+1 < len(tt.args) { + vpsIP = tt.args[i+1] + } + case "--bootstrap-join": + if i+1 < len(tt.args) { + bootstrapJoin = tt.args[i+1] + } + } + } + + if isBootstrap != tt.expectBootstrap { + t.Errorf("expected bootstrap=%v, got %v", tt.expectBootstrap, isBootstrap) + } + if vpsIP != tt.expectVPSIP { + t.Errorf("expected vpsIP=%q, got %q", tt.expectVPSIP, vpsIP) + } + if peersStr != tt.expectPeers { + t.Errorf("expected peers=%q, got %q", tt.expectPeers, peersStr) + } + if bootstrapJoin != tt.expectBootstrapJoin { + t.Errorf("expected bootstrapJoin=%q, got %q", tt.expectBootstrapJoin, bootstrapJoin) + } + }) + } +} diff --git a/pkg/cli/rqlite_commands.go b/pkg/cli/rqlite_commands.go deleted file mode 100644 index b9961cb..0000000 --- a/pkg/cli/rqlite_commands.go +++ /dev/null @@ -1,327 +0,0 @@ -package cli - -import ( - "fmt" - "net" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - "github.com/DeBrosOfficial/network/pkg/config" - "gopkg.in/yaml.v3" -) - -// HandleRQLiteCommand handles rqlite-related commands -func HandleRQLiteCommand(args []string) { - if len(args) == 0 { - showRQLiteHelp() - return - } - - if runtime.GOOS != "linux" { - fmt.Fprintf(os.Stderr, "❌ RQLite commands are only supported on Linux\n") - os.Exit(1) - } - - subcommand := args[0] - subargs := args[1:] - - switch subcommand { - case "fix": - handleRQLiteFix(subargs) - case "help": - showRQLiteHelp() - default: - fmt.Fprintf(os.Stderr, "Unknown rqlite subcommand: %s\n", subcommand) - showRQLiteHelp() - os.Exit(1) - } -} - -func showRQLiteHelp() { - fmt.Printf("🗄️ RQLite Commands\n\n") - fmt.Printf("Usage: network-cli rqlite [options]\n\n") - fmt.Printf("Subcommands:\n") - fmt.Printf(" fix - Fix misconfigured join address and clean stale raft state\n\n") - fmt.Printf("Description:\n") - fmt.Printf(" The 'fix' command automatically repairs common rqlite cluster issues:\n") - fmt.Printf(" - Corrects join address from HTTP port (5001) to Raft port (7001) if misconfigured\n") - fmt.Printf(" - Cleans stale raft state that prevents proper cluster formation\n") - fmt.Printf(" - Restarts the node service with corrected configuration\n\n") - fmt.Printf("Requirements:\n") - fmt.Printf(" - Must be run as root (use sudo)\n") - fmt.Printf(" - Only works on non-bootstrap nodes (nodes with join_address configured)\n") - fmt.Printf(" - Stops and restarts the debros-node service\n\n") - fmt.Printf("Examples:\n") - fmt.Printf(" sudo network-cli rqlite fix\n") -} - -func handleRQLiteFix(args []string) { - requireRoot() - - // Parse optional flags - dryRun := false - for _, arg := range args { - if arg == "--dry-run" || arg == "-n" { - dryRun = true - } - } - - if dryRun { - fmt.Printf("🔍 Dry-run mode - no changes will be made\n\n") - } - - fmt.Printf("🔧 RQLite Cluster Repair\n\n") - - // Load config - configPath, err := config.DefaultPath("node.yaml") - if err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to determine config path: %v\n", err) - os.Exit(1) - } - - cfg, err := loadConfigForRepair(configPath) - if err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to load config: %v\n", err) - os.Exit(1) - } - - // Check if this is a bootstrap node - if cfg.Node.Type == "bootstrap" || cfg.Database.RQLiteJoinAddress == "" { - fmt.Printf("ℹ️ This is a bootstrap node (no join address configured)\n") - fmt.Printf(" Bootstrap nodes don't need repair - they are the cluster leader\n") - fmt.Printf(" Run this command on follower nodes instead\n") - return - } - - joinAddr := cfg.Database.RQLiteJoinAddress - - // Check if join address needs fixing - needsConfigFix := needsFix(joinAddr, cfg.Database.RQLiteRaftPort, cfg.Database.RQLitePort) - var fixedAddr string - - if needsConfigFix { - fmt.Printf("⚠️ Detected misconfigured join address: %s\n", joinAddr) - fmt.Printf(" Expected Raft port (%d) but found HTTP port (%d)\n", cfg.Database.RQLiteRaftPort, cfg.Database.RQLitePort) - - // Extract host from join address - host, _, err := parseJoinAddress(joinAddr) - if err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to parse join address: %v\n", err) - os.Exit(1) - } - - // Fix the join address - rqlite expects Raft port for -join - fixedAddr = fmt.Sprintf("%s:%d", host, cfg.Database.RQLiteRaftPort) - fmt.Printf(" Corrected address: %s\n\n", fixedAddr) - } else { - fmt.Printf("✅ Join address looks correct: %s\n", joinAddr) - fmt.Printf(" Will clean stale raft state to ensure proper cluster formation\n\n") - fixedAddr = joinAddr // No change needed - } - - if dryRun { - fmt.Printf("🔍 Dry-run: Would clean raft state") - if needsConfigFix { - fmt.Printf(" and fix config") - } - fmt.Printf("\n") - return - } - - // Stop the service - fmt.Printf("⏹️ Stopping debros-node service...\n") - if err := stopService("debros-node"); err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to stop service: %v\n", err) - os.Exit(1) - } - fmt.Printf(" ✓ Service stopped\n\n") - - // Update config file if needed - if needsConfigFix { - fmt.Printf("📝 Updating configuration file...\n") - if err := updateConfigJoinAddress(configPath, fixedAddr); err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to update config: %v\n", err) - fmt.Fprintf(os.Stderr, " Service is stopped - please fix manually and restart\n") - os.Exit(1) - } - fmt.Printf(" ✓ Config updated: %s\n\n", configPath) - } - - // Clean raft state - fmt.Printf("🧹 Cleaning stale raft state...\n") - dataDir := expandDataDir(cfg.Node.DataDir) - raftDir := filepath.Join(dataDir, "rqlite", "raft") - if err := cleanRaftState(raftDir); err != nil { - fmt.Fprintf(os.Stderr, "⚠️ Failed to clean raft state: %v\n", err) - fmt.Fprintf(os.Stderr, " Continuing anyway - raft state may still exist\n") - } else { - fmt.Printf(" ✓ Raft state cleaned\n\n") - } - - // Restart the service - fmt.Printf("🚀 Restarting debros-node service...\n") - if err := startService("debros-node"); err != nil { - fmt.Fprintf(os.Stderr, "❌ Failed to start service: %v\n", err) - fmt.Fprintf(os.Stderr, " Config has been fixed - please restart manually:\n") - fmt.Fprintf(os.Stderr, " sudo systemctl start debros-node\n") - os.Exit(1) - } - fmt.Printf(" ✓ Service started\n\n") - - fmt.Printf("✅ Repair complete!\n\n") - fmt.Printf("The node should now join the cluster correctly.\n") - fmt.Printf("Monitor logs with: sudo network-cli service logs node --follow\n") -} - -func loadConfigForRepair(path string) (*config.Config, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open config file: %w", err) - } - defer file.Close() - - var cfg config.Config - if err := config.DecodeStrict(file, &cfg); err != nil { - return nil, fmt.Errorf("failed to parse config: %w", err) - } - - return &cfg, nil -} - -func needsFix(joinAddr string, raftPort int, httpPort int) bool { - if joinAddr == "" { - return false - } - - // Remove http:// or https:// prefix if present - addr := joinAddr - if strings.HasPrefix(addr, "http://") { - addr = strings.TrimPrefix(addr, "http://") - } else if strings.HasPrefix(addr, "https://") { - addr = strings.TrimPrefix(addr, "https://") - } - - // Parse host:port - _, port, err := net.SplitHostPort(addr) - if err != nil { - return false // Can't parse, assume it's fine - } - - // Check if port matches HTTP port (incorrect - should be Raft port) - if port == fmt.Sprintf("%d", httpPort) { - return true - } - - // If it matches Raft port, it's correct - if port == fmt.Sprintf("%d", raftPort) { - return false - } - - // Unknown port - assume it's fine - return false -} - -func parseJoinAddress(joinAddr string) (host, port string, err error) { - // Remove http:// or https:// prefix if present - addr := joinAddr - if strings.HasPrefix(addr, "http://") { - addr = strings.TrimPrefix(addr, "http://") - } else if strings.HasPrefix(addr, "https://") { - addr = strings.TrimPrefix(addr, "https://") - } - - host, port, err = net.SplitHostPort(addr) - if err != nil { - return "", "", fmt.Errorf("invalid join address format: %w", err) - } - - return host, port, nil -} - -func updateConfigJoinAddress(configPath string, newJoinAddr string) error { - // Read the file - data, err := os.ReadFile(configPath) - if err != nil { - return fmt.Errorf("failed to read config file: %w", err) - } - - // Parse YAML into a generic map to preserve structure - var yamlData map[string]interface{} - if err := yaml.Unmarshal(data, &yamlData); err != nil { - return fmt.Errorf("failed to parse YAML: %w", err) - } - - // Navigate to database.rqlite_join_address - database, ok := yamlData["database"].(map[string]interface{}) - if !ok { - return fmt.Errorf("database section not found in config") - } - - database["rqlite_join_address"] = newJoinAddr - - // Write back to file - updatedData, err := yaml.Marshal(yamlData) - if err != nil { - return fmt.Errorf("failed to marshal YAML: %w", err) - } - - if err := os.WriteFile(configPath, updatedData, 0644); err != nil { - return fmt.Errorf("failed to write config file: %w", err) - } - - return nil -} - -func expandDataDir(dataDir string) string { - expanded := os.ExpandEnv(dataDir) - if strings.HasPrefix(expanded, "~") { - home, err := os.UserHomeDir() - if err != nil { - return expanded // Fallback to original - } - expanded = filepath.Join(home, expanded[1:]) - } - return expanded -} - -func cleanRaftState(raftDir string) error { - if _, err := os.Stat(raftDir); os.IsNotExist(err) { - return nil // Directory doesn't exist, nothing to clean - } - - // Remove raft state files - filesToRemove := []string{ - "peers.json", - "peers.json.backup", - "peers.info", - "raft.db", - } - - for _, file := range filesToRemove { - filePath := filepath.Join(raftDir, file) - if err := os.Remove(filePath); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to remove %s: %w", filePath, err) - } - } - - return nil -} - -func stopService(serviceName string) error { - cmd := exec.Command("systemctl", "stop", serviceName) - if err := cmd.Run(); err != nil { - return fmt.Errorf("systemctl stop failed: %w", err) - } - return nil -} - -func startService(serviceName string) error { - cmd := exec.Command("systemctl", "start", serviceName) - if err := cmd.Run(); err != nil { - return fmt.Errorf("systemctl start failed: %w", err) - } - return nil -} diff --git a/pkg/cli/service.go b/pkg/cli/service.go deleted file mode 100644 index 6379db2..0000000 --- a/pkg/cli/service.go +++ /dev/null @@ -1,243 +0,0 @@ -package cli - -import ( - "fmt" - "os" - "os/exec" - "runtime" - "strings" -) - -// HandleServiceCommand handles systemd service management commands -func HandleServiceCommand(args []string) { - if len(args) == 0 { - showServiceHelp() - return - } - - if runtime.GOOS != "linux" { - fmt.Fprintf(os.Stderr, "❌ Service commands are only supported on Linux with systemd\n") - os.Exit(1) - } - - subcommand := args[0] - subargs := args[1:] - - switch subcommand { - case "start": - handleServiceStart(subargs) - case "stop": - handleServiceStop(subargs) - case "restart": - handleServiceRestart(subargs) - case "status": - handleServiceStatus(subargs) - case "logs": - handleServiceLogs(subargs) - case "help": - showServiceHelp() - default: - fmt.Fprintf(os.Stderr, "Unknown service subcommand: %s\n", subcommand) - showServiceHelp() - os.Exit(1) - } -} - -func showServiceHelp() { - fmt.Printf("🔧 Service Management Commands\n\n") - fmt.Printf("Usage: network-cli service [options]\n\n") - fmt.Printf("Subcommands:\n") - fmt.Printf(" start - Start services\n") - fmt.Printf(" stop - Stop services\n") - fmt.Printf(" restart - Restart services\n") - fmt.Printf(" status - Show service status\n") - fmt.Printf(" logs - View service logs\n\n") - fmt.Printf("Targets:\n") - fmt.Printf(" node - DeBros node service\n") - fmt.Printf(" gateway - DeBros gateway service\n") - fmt.Printf(" all - All DeBros services\n\n") - fmt.Printf("Logs Options:\n") - fmt.Printf(" --follow - Follow logs in real-time (-f)\n") - fmt.Printf(" --since=