Merge pull request #72 from DeBrosOfficial/super

Super
This commit is contained in:
anonpenguin 2025-11-28 22:27:52 +02:00 committed by GitHub
commit 432952ed69
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
83 changed files with 6497 additions and 3354 deletions

197
.github/workflows/release-apt.yml vendored Normal file
View File

@ -0,0 +1,197 @@
name: Release APT Package
on:
release:
types: [published]
workflow_dispatch:
inputs:
version:
description: "Version to release (e.g., 0.69.20)"
required: true
permissions:
contents: write
packages: write
jobs:
build-deb:
name: Build Debian Package
runs-on: ubuntu-latest
strategy:
matrix:
arch: [amd64, arm64]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23"
- name: Get version
id: version
run: |
if [ "${{ github.event_name }}" = "release" ]; then
VERSION="${{ github.event.release.tag_name }}"
VERSION="${VERSION#v}" # Remove 'v' prefix if present
else
VERSION="${{ github.event.inputs.version }}"
fi
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Set up QEMU (for arm64)
if: matrix.arch == 'arm64'
uses: docker/setup-qemu-action@v3
- name: Build binary
env:
GOARCH: ${{ matrix.arch }}
CGO_ENABLED: 0
run: |
VERSION="${{ steps.version.outputs.version }}"
COMMIT=$(git rev-parse --short HEAD)
DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS="-X 'main.version=$VERSION' -X 'main.commit=$COMMIT' -X 'main.date=$DATE'"
mkdir -p build/usr/local/bin
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama cmd/cli/main.go
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-node cmd/node/main.go
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-gateway cmd/gateway/main.go
- name: Create Debian package structure
run: |
VERSION="${{ steps.version.outputs.version }}"
ARCH="${{ matrix.arch }}"
PKG_NAME="orama_${VERSION}_${ARCH}"
mkdir -p ${PKG_NAME}/DEBIAN
mkdir -p ${PKG_NAME}/usr/local/bin
# Copy binaries
cp build/usr/local/bin/* ${PKG_NAME}/usr/local/bin/
chmod 755 ${PKG_NAME}/usr/local/bin/*
# Create control file
cat > ${PKG_NAME}/DEBIAN/control << EOF
Package: orama
Version: ${VERSION}
Section: net
Priority: optional
Architecture: ${ARCH}
Depends: libc6
Maintainer: DeBros Team <team@debros.network>
Description: Orama Network - Distributed P2P Database System
Orama is a distributed peer-to-peer network that combines
RQLite for distributed SQL, IPFS for content-addressed storage,
and LibP2P for peer discovery and communication.
EOF
# Create postinst script
cat > ${PKG_NAME}/DEBIAN/postinst << 'EOF'
#!/bin/bash
set -e
echo ""
echo "Orama installed successfully!"
echo ""
echo "To set up your node, run:"
echo " sudo orama install"
echo ""
EOF
chmod 755 ${PKG_NAME}/DEBIAN/postinst
- name: Build .deb package
run: |
VERSION="${{ steps.version.outputs.version }}"
ARCH="${{ matrix.arch }}"
PKG_NAME="orama_${VERSION}_${ARCH}"
dpkg-deb --build ${PKG_NAME}
mv ${PKG_NAME}.deb orama_${VERSION}_${ARCH}.deb
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: deb-${{ matrix.arch }}
path: "*.deb"
publish-apt:
name: Publish to APT Repository
needs: build-deb
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: packages
- name: Get version
id: version
run: |
if [ "${{ github.event_name }}" = "release" ]; then
VERSION="${{ github.event.release.tag_name }}"
VERSION="${VERSION#v}"
else
VERSION="${{ github.event.inputs.version }}"
fi
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Set up GPG
if: env.GPG_PRIVATE_KEY != ''
env:
GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }}
run: |
echo "$GPG_PRIVATE_KEY" | gpg --import
- name: Create APT repository structure
run: |
mkdir -p apt-repo/pool/main/o/orama
mkdir -p apt-repo/dists/stable/main/binary-amd64
mkdir -p apt-repo/dists/stable/main/binary-arm64
# Move packages
mv packages/deb-amd64/*.deb apt-repo/pool/main/o/orama/
mv packages/deb-arm64/*.deb apt-repo/pool/main/o/orama/
# Generate Packages files
cd apt-repo
dpkg-scanpackages --arch amd64 pool/ > dists/stable/main/binary-amd64/Packages
dpkg-scanpackages --arch arm64 pool/ > dists/stable/main/binary-arm64/Packages
gzip -k dists/stable/main/binary-amd64/Packages
gzip -k dists/stable/main/binary-arm64/Packages
# Generate Release file
cat > dists/stable/Release << EOF
Origin: Orama
Label: Orama
Suite: stable
Codename: stable
Architectures: amd64 arm64
Components: main
Description: Orama Network APT Repository
EOF
cd ..
- name: Upload to release
if: github.event_name == 'release'
uses: softprops/action-gh-release@v1
with:
files: |
apt-repo/pool/main/o/orama/*.deb
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Deploy APT repository to GitHub Pages
uses: peaceiris/actions-gh-pages@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./apt-repo
destination_dir: apt
keep_files: true

View File

@ -13,12 +13,119 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Deprecated
### Fixed
## [0.72.0] - 2025-11-28
### Added
- Interactive prompt for selecting local or remote gateway URL during CLI login.
- Support for discovering and configuring IPFS Cluster peers during installation and runtime via the gateway status endpoint.
- New CLI flags (`--ipfs-cluster-peer`, `--ipfs-cluster-addrs`) added to the `prod install` command for cluster discovery.
### Changed
- Renamed the main network node executable from `node` to `orama-node` and the gateway executable to `orama-gateway`.
- Improved the `auth login` flow to use a TLS-aware HTTP client, supporting Let's Encrypt staging certificates for remote gateways.
- Updated the production installer to set `CAP_NET_BIND_SERVICE` on `orama-node` to allow binding to privileged ports (80/443) without root.
- Updated the production installer to configure IPFS Cluster to listen on port 9098 for consistent multi-node communication.
- Refactored the `prod install` process to generate configurations before initializing services, ensuring configuration files are present.
### Deprecated
### Removed
### Fixed
- Corrected the IPFS Cluster API port used in `node.yaml` template from 9096 to 9098 to match the cluster's LibP2P port.
- Fixed the `anyone-client` systemd service configuration to use the correct binary name and allow writing to the home directory.
## [0.71.0] - 2025-11-27
### Added
- Added `certutil` package for managing self-signed CA and node certificates.
- Added support for SNI-based TCP routing for internal services (RQLite Raft, IPFS, Olric) when HTTPS is enabled.
- Added `--dry-run`, `--no-pull`, and DNS validation checks to the production installer.
- Added `tlsutil` package to centralize TLS configuration and support trusted self-signed certificates for internal communication.
### Changed
- Refactored production installer to use a unified node architecture, removing the separate `debros-gateway` service and embedding the gateway within `debros-node`.
- Improved service health checks in the CLI with exponential backoff retries for better reliability during startup and upgrades.
- Updated RQLite to listen on an internal port (7002) when SNI is enabled, allowing the SNI gateway to handle external port 7001.
- Enhanced systemd service files with stricter security settings (e.g., `ProtectHome=read-only`, `ProtectSystem=strict`).
- Updated IPFS configuration to bind Swarm to all interfaces (0.0.0.0) for external connectivity.
### Deprecated
### Removed
### Fixed
- Fixed an issue where the `anyone-client` installation could fail due to missing NPM cache directories by ensuring proper initialization and ownership.
## [0.70.0] - 2025-11-26
### Added
\n
### Changed
- The HTTP Gateway is now embedded directly within each network node, simplifying deployment and removing the need for a separate gateway service.
- The configuration for the full API Gateway (including Auth, PubSub, and internal service routing) is now part of the main node configuration.
- Development environment setup no longer generates a separate `gateway.yaml` file or starts a standalone gateway process.
- Updated local environment descriptions and default gateway fallback to reflect the node-1 designation.
### Deprecated
### Removed
### Fixed
- Updated the installation instructions in the README to reflect the correct APT repository URL.
## [0.69.22] - 2025-11-26
### Added
- Added 'Peer connection status' to the health check list in the README.
### Changed
- Unified development environment nodes, renaming 'bootstrap', 'bootstrap2', 'node2', 'node3', 'node4' to 'node-1' through 'node-5'.
- Renamed internal configuration fields and CLI flags from 'bootstrap peers' to 'peers' for consistency across the unified node architecture.
- Updated development environment configuration files and data directories to use the unified 'node-N' naming scheme (e.g., `node-1.yaml`, `data/node-1`).
- Changed the default main gateway port in the development environment from 6001 to 6000, reserving 6001-6005 for individual node gateways.
- Removed the explicit 'node.type' configuration field (bootstrap/node) as all nodes now use a unified configuration.
- Improved RQLite cluster joining logic to prioritize joining the most up-to-date peer (highest Raft log index) instead of prioritizing 'bootstrap' nodes.
### Deprecated
### Removed
### Fixed
- Fixed migration logic to correctly handle the transition from old unified data directories to the new 'node-1' structure.
## [0.69.21] - 2025-11-26
### Added
- Introduced a new interactive TUI wizard for production installation (`sudo orama install`).
- Added support for APT package repository generation and publishing via GitHub Actions.
- Added new simplified production CLI commands (`orama install`, `orama upgrade`, `orama status`, etc.) as aliases for the legacy `orama prod` commands.
- Added support for a unified HTTP reverse proxy gateway within the node process, routing internal services (RQLite, IPFS, Cluster) via a single port.
- Added support for SNI-based TCP routing for secure access to services like RQLite Raft and IPFS Swarm.
### Changed
- Renamed the primary CLI binary from `dbn` to `orama` across the entire codebase, documentation, and build system.
- Migrated the production installation directory structure from `~/.debros` to `~/.orama`.
- Consolidated production service management into unified systemd units (e.g., `debros-node.service` replaces `debros-node-bootstrap.service` and `debros-node-node.service`).
- Updated the default IPFS configuration to bind API and Gateway addresses to `127.0.0.1` for enhanced security, relying on the new unified gateway for external access.
- Updated RQLite service configuration to bind to `127.0.0.1` for HTTP and Raft ports, relying on the new SNI gateway for external cluster communication.
### Deprecated
### Removed
### Fixed
- Corrected configuration path resolution logic to correctly check for config files in the new `~/.orama/` directory structure.
## [0.69.20] - 2025-11-22
### Added
- Added verification step to ensure the IPFS Cluster secret is correctly written after configuration updates.
### Changed
- Improved reliability of `anyone-client` installation and verification by switching to using `npx` for execution and checks, especially for globally installed scoped packages.
- Updated the `anyone-client` systemd service to use `npx` for execution and explicitly set the PATH environment variable to ensure the client runs correctly.
@ -27,12 +134,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.19] - 2025-11-22
### Added
\n
### Changed
- Updated the installation command for 'anyone-client' to use the correct scoped package name (@anyone-protocol/anyone-client).
### Deprecated
@ -40,14 +152,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.18] - 2025-11-22
### Added
- Integrated `anyone-client` (SOCKS5 proxy) installation and systemd service (`debros-anyone-client.service`).
- Added port availability checking logic to prevent conflicts when starting services (e.g., `anyone-client` on port 9050).
### Changed
- Updated system dependencies installation to include `nodejs` and `npm` required for `anyone-client`.
- Modified Olric configuration generation to bind to the specific VPS IP if provided, otherwise defaults to 0.0.0.0.
- Improved IPFS Cluster initialization by passing `CLUSTER_SECRET` directly as an environment variable.
@ -57,17 +173,21 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.17] - 2025-11-21
### Added
- Initial implementation of a Push Notification Service for the Gateway, utilizing the Expo API.
- Detailed documentation for RQLite operations, monitoring, and troubleshooting was added to the README.
### Changed
- Improved `make stop` and `dbn dev down` commands to ensure all development services are forcefully killed after graceful shutdown attempt.
- Refactored RQLite startup logic to simplify cluster establishment and remove complex, error-prone leadership/recovery checks, relying on RQLite's built-in join mechanism.
- RQLite logs are now written to individual log files (e.g., `~/.debros/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
- RQLite logs are now written to individual log files (e.g., `~/.orama/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
- Improved peer exchange discovery logging to suppress expected 'protocols not supported' warnings from lightweight clients like the Gateway.
### Deprecated
@ -75,17 +195,21 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.17] - 2025-11-21
### Added
- Initial implementation of a Push Notification Service for the Gateway, utilizing the Expo API.
- Detailed documentation for RQLite operations, monitoring, and troubleshooting in the README.
### Changed
- Improved `make stop` and `dbn dev down` commands to ensure all development services are forcefully killed after graceful shutdown attempt.
- Refactored RQLite startup logic to simplify cluster establishment and remove complex, error-prone leadership/recovery checks, relying on RQLite's built-in join mechanism.
- RQLite logs are now written to individual log files (e.g., `~/.debros/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
- RQLite logs are now written to individual log files (e.g., `~/.orama/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
- Improved peer exchange discovery logging to suppress expected 'protocols not supported' warnings from lightweight clients like the Gateway.
### Deprecated
@ -93,12 +217,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.16] - 2025-11-16
### Added
\n
### Changed
- Improved the `make stop` command to ensure a more robust and graceful shutdown of development services.
- Enhanced the `make kill` command and underlying scripts for more reliable force termination of stray development processes.
- Increased the graceful shutdown timeout for development processes from 500ms to 2 seconds before resorting to force kill.
@ -108,12 +237,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.15] - 2025-11-16
### Added
\n
### Changed
- Improved authentication flow to handle wallet addresses case-insensitively during nonce creation and verification.
### Deprecated
@ -121,13 +255,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.14] - 2025-11-14
### Added
- Added support for background reconnection to the Olric cache cluster in the Gateway, improving resilience if the cache is temporarily unavailable.
### Changed
- Improved the RQLite database client connection handling to ensure connections are properly closed and reused safely.
- RQLite Manager now updates its advertised addresses if cluster discovery provides more accurate information (e.g., replacing localhost).
@ -136,13 +274,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Removed internal RQLite process management from the development runner, as RQLite is now expected to be managed externally or via Docker.
## [0.69.13] - 2025-11-14
### Added
\n
### Changed
- The Gateway service now waits for the Olric cache service to start before attempting initialization.
- Improved robustness of Olric cache client initialization with retry logic and exponential backoff.
@ -151,14 +293,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Corrected the default path logic for 'gateway.yaml' to prioritize the production data directory while maintaining fallback to legacy paths.
## [0.69.12] - 2025-11-14
### Added
- The `prod install` command now requires the `--cluster-secret` flag for all non-bootstrap nodes to ensure correct IPFS Cluster configuration.
### Changed
- Updated IPFS configuration to bind API and Gateway addresses to `0.0.0.0` instead of `127.0.0.1` for better network accessibility.
### Deprecated
@ -166,13 +311,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.11] - 2025-11-13
### Added
- Added a new comprehensive shell script (`scripts/test-cluster-health.sh`) for checking the health and replication status of RQLite, IPFS, and IPFS Cluster across production environments.
### Changed
- Improved RQLite cluster discovery logic to ensure `peers.json` is correctly generated and includes the local node, which is crucial for reliable cluster recovery.
- Refactored logging across discovery and RQLite components for cleaner, more concise output, especially for routine operations.
- Updated the installation and upgrade process to correctly configure IPFS Cluster bootstrap peers using the node's public IP, improving cluster formation reliability.
@ -182,16 +331,19 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where RQLite recovery operations (like clearing Raft state) did not correctly force the regeneration of `peers.json`, preventing successful cluster rejoin.
- Corrected the port calculation logic for IPFS Cluster to ensure the correct LibP2P listen port (9098) is used for bootstrap peer addressing.
## [0.69.10] - 2025-11-13
### Added
- Automatic health monitoring and recovery for RQLite cluster split-brain scenarios.
- RQLite now waits indefinitely for the minimum cluster size to be met before starting, preventing single-node cluster formation.
### Changed
- Updated default IPFS swarm port from 4001 to 4101 to avoid conflicts with LibP2P.
### Deprecated
@ -199,16 +351,19 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Resolved an issue where RQLite could start as a single-node cluster if peer discovery was slow, by enforcing minimum cluster size before startup.
- Improved cluster recovery logic to correctly use `bootstrap-expect` for new clusters and ensure proper process restart during recovery.
## [0.69.9] - 2025-11-12
### Added
- Added automatic recovery logic for RQLite (database) nodes stuck in a configuration mismatch, which attempts to clear stale Raft state if peers have more recent data.
- Added logic to discover IPFS Cluster peers directly from the LibP2P host's peerstore, improving peer discovery before the Cluster API is fully operational.
### Changed
- Improved the IPFS Cluster configuration update process to prioritize writing to the `peerstore` file before updating `service.json`, ensuring the source of truth is updated first.
### Deprecated
@ -216,14 +371,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.8] - 2025-11-12
### Added
- Improved `dbn prod start` to automatically unmask and re-enable services if they were previously masked or disabled.
- Added automatic discovery and configuration of all IPFS Cluster peers during runtime to improve cluster connectivity.
### Changed
- Enhanced `dbn prod start` and `dbn prod stop` reliability by adding service state resets, retries, and ensuring services are disabled when stopped.
- Filtered peer exchange addresses in LibP2P discovery to only include the standard LibP2P port (4001), preventing exposure of internal service ports.
@ -232,13 +391,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Improved IPFS Cluster bootstrap configuration repair logic to automatically infer and update bootstrap peer addresses if the bootstrap node is available.
## [0.69.7] - 2025-11-12
### Added
\n
### Changed
- Improved logic for determining Olric server addresses during configuration generation, especially for bootstrap and non-bootstrap nodes.
- Enhanced IPFS cluster configuration to correctly handle IPv6 addresses when updating bootstrap peers.
@ -247,14 +410,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.6] - 2025-11-12
### Added
- Improved production service health checks and port availability validation during install, upgrade, start, and restart commands.
- Added service aliases (node, ipfs, cluster, gateway, olric) to `dbn prod logs` command for easier log viewing.
### Changed
- Updated node configuration logic to correctly advertise public IP addresses in multiaddrs (for P2P discovery) and RQLite addresses, improving connectivity for nodes behind NAT/firewalls.
- Enhanced `dbn prod install` and `dbn prod upgrade` to automatically detect and preserve existing VPS IP, domain, and cluster join information.
- Improved RQLite cluster discovery to automatically replace localhost/loopback addresses with the actual public IP when exchanging metadata between peers.
@ -266,14 +433,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where the RQLite process would wait indefinitely for a join target; now uses a 5-minute timeout.
- Corrected the location of the gateway configuration file reference in the README.
## [0.69.5] - 2025-11-11
### Added
\n
### Changed
- Moved the default location for `gateway.yaml` configuration file from `configs/` to the new `data/` directory for better organization.
- Updated configuration path logic to search for `gateway.yaml` in the new `data/` directory first.
@ -282,13 +453,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.4] - 2025-11-11
### Added
\n
### Changed
- RQLite database management is now integrated directly into the main node process, removing separate RQLite systemd services (debros-rqlite-*).
- RQLite database management is now integrated directly into the main node process, removing separate RQLite systemd services (debros-rqlite-\*).
- Improved log file provisioning to only create necessary log files based on the node type being installed (bootstrap or node).
### Deprecated
@ -296,26 +472,35 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.3] - 2025-11-11
### Added
- Added `--ignore-resource-checks` flag to the install command to skip disk, RAM, and CPU prerequisite validation.
### Changed
\n
### Deprecated
### Removed
### Fixed
\n
## [0.69.2] - 2025-11-11
### Added
- Added `--no-pull` flag to `dbn prod upgrade` to skip git repository updates and use existing source code.
### Changed
- Removed deprecated environment management commands (`env`, `devnet`, `testnet`, `local`).
- Removed deprecated network commands (`health`, `peers`, `status`, `peer-id`, `connect`, `query`, `pubsub`) from the main CLI interface.
@ -324,14 +509,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.1] - 2025-11-11
### Added
- Added automatic service stopping before binary upgrades during the `prod upgrade` process to ensure a clean update.
- Added logic to preserve existing configuration settings (like `bootstrap_peers`, `domain`, and `rqlite_join_address`) when regenerating configurations during `prod upgrade`.
### Changed
- Improved the `prod upgrade` process to be more robust by preserving critical configuration details and gracefully stopping services.
### Deprecated
@ -339,15 +528,19 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.0] - 2025-11-11
### Added
- Added comprehensive documentation for setting up HTTPS using a domain name, including configuration steps for both installation and existing setups.
- Added the `--force` flag to the `install` command for reconfiguring all settings.
- Added new log targets (`ipfs-cluster`, `rqlite`, `olric`) and improved the `dbn prod logs` command documentation.
### Changed
- Improved the IPFS Cluster configuration logic to ensure the cluster secret and IPFS API port are correctly synchronized during updates.
- Refined the directory structure creation process to ensure node-specific data directories are created only when initializing services.
@ -356,13 +549,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.68.1] - 2025-11-11
### Added
- Pre-create log files during setup to ensure correct permissions for systemd logging.
### Changed
- Improved binary installation process to handle copying files individually, preventing potential shell wildcard issues.
- Enhanced ownership fixing logic during installation to ensure all files created by root (especially during service initialization) are correctly owned by the 'debros' user.
@ -371,14 +568,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.68.0] - 2025-11-11
### Added
- Added comprehensive documentation for production deployment, including installation, upgrade, service management, and troubleshooting.
- Added new CLI commands (`dbn prod start`, `dbn prod stop`, `dbn prod restart`) for convenient management of production systemd services.
### Changed
- Updated IPFS configuration during production installation to use port 4501 for the API (to avoid conflicts with RQLite on port 5001) and port 8080 for the Gateway.
### Deprecated
@ -386,15 +587,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Ensured that IPFS configuration automatically disables AutoConf when a private swarm key is present during installation and upgrade, preventing startup errors.
## [0.67.7] - 2025-11-11
### Added
- Added support for specifying the Git branch (main or nightly) during `prod install` and `prod upgrade`.
- The chosen branch is now saved and automatically used for future upgrades unless explicitly overridden.
### Changed
- Updated help messages and examples for production commands to include branch options.
### Deprecated
@ -402,12 +606,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.67.6] - 2025-11-11
### Added
\n
### Changed
- The binary installer now updates the source repository if it already exists, instead of only cloning it if missing.
### Deprecated
@ -415,15 +624,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Resolved an issue where disabling AutoConf in the IPFS repository could leave 'auto' placeholders in the config, causing startup errors.
## [0.67.5] - 2025-11-11
### Added
- Added `--restart` option to `dbn prod upgrade` to automatically restart services after upgrade.
- The gateway now supports an optional `--config` flag to specify the configuration file path.
### Changed
- Improved `dbn prod upgrade` process to better handle existing installations, including detecting node type and ensuring configurations are updated to the latest format.
- Configuration loading logic for `node` and `gateway` commands now correctly handles absolute paths passed via command line or systemd.
@ -432,13 +644,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue during production upgrades where IPFS repositories in private swarms might fail to start due to `AutoConf` not being disabled.
## [0.67.4] - 2025-11-11
### Added
\n
### Changed
- Improved configuration file loading logic to support absolute paths for config files.
- Updated IPFS Cluster initialization during setup to run `ipfs-cluster-service init` and automatically configure the cluster secret.
- IPFS repositories initialized with a private swarm key will now automatically disable AutoConf.
@ -448,13 +664,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed configuration path resolution to correctly check for config files in both the legacy (`~/.debros/`) and production (`~/.debros/configs/`) directories.
- Fixed configuration path resolution to correctly check for config files in both the legacy (`~/.orama/`) and production (`~/.orama/configs/`) directories.
## [0.67.3] - 2025-11-11
### Added
\n
### Changed
- Improved reliability of IPFS (Kubo) installation by switching from a single install script to the official step-by-step download and extraction process.
- Updated IPFS (Kubo) installation to use version v0.38.2.
- Enhanced binary installation routines (RQLite, IPFS, Go) to ensure the installed binaries are immediately available in the current process's PATH.
@ -464,14 +684,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed potential installation failures for RQLite by adding error checking to the binary copy command.
## [0.67.2] - 2025-11-11
### Added
- Added a new utility function to reliably resolve the full path of required external binaries (like ipfs, rqlited, etc.).
### Changed
- Improved service initialization by validating the availability and path of all required external binaries before creating systemd service units.
- Updated systemd service generation logic to use the resolved, fully-qualified paths for external binaries instead of relying on hardcoded paths.
@ -480,13 +703,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Changed IPFS initialization from a warning to a fatal error if the repo fails to initialize, ensuring setup stops on critical failures.
## [0.67.1] - 2025-11-11
### Added
\n
### Changed
- Improved disk space check logic to correctly check the parent directory if the specified path does not exist.
### Deprecated
@ -494,15 +721,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue in the installation script where the extracted CLI binary might be named 'dbn' instead of 'network-cli', ensuring successful installation regardless of the extracted filename.
## [0.67.0] - 2025-11-11
### Added
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
### Changed
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
@ -511,15 +741,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
## [0.67.0] - 2025-11-11
### Added
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
### Changed
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
@ -528,13 +761,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
## [0.66.1] - 2025-11-11
### Added
\n
### Changed
- Allow bootstrap nodes to optionally define a join address to synchronize with another bootstrap cluster.
### Deprecated
@ -542,14 +779,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.66.0] - 2025-11-11
### Added
- Pre-installation checks for minimum system resources (10GB disk space, 2GB RAM, 2 CPU cores) are now performed during setup.
- All systemd services (IPFS, RQLite, Olric, Node, Gateway) now log directly to dedicated files in the logs directory instead of using the system journal.
### Changed
- Improved logging instructions in the setup completion message to reference the new dedicated log files.
### Deprecated
@ -557,14 +798,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.65.0] - 2025-11-11
### Added
- Expanded the local development environment (`dbn dev up`) from 3 nodes to 5 nodes (2 bootstraps and 3 regular nodes) for better testing of cluster resilience and quorum.
- Added a new `bootstrap2` node configuration and service to the development topology.
### Changed
- Updated the `dbn dev up` command to configure and start all 5 nodes and associated services (IPFS, RQLite, IPFS Cluster).
- Modified RQLite and LibP2P health checks in the development environment to require a quorum of 3 out of 5 nodes.
- Refactored development environment configuration logic using a new `Topology` structure for easier management of node ports and addresses.
@ -574,13 +819,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Ensured that secondary bootstrap nodes can correctly join the primary RQLite cluster in the development environment.
## [0.64.1] - 2025-11-10
### Added
\n
### Changed
- Improved the accuracy of the Raft log index reporting by falling back to reading persisted snapshot metadata from disk if the running RQLite instance is not yet reachable or reports a zero index.
### Deprecated
@ -588,16 +837,20 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.64.0] - 2025-11-10
### Added
- Comprehensive End-to-End (E2E) test suite for Gateway API endpoints (Cache, RQLite, Storage, Network, Auth).
- New E2E tests for concurrent operations and TTL expiry in the distributed cache.
- New E2E tests for LibP2P peer connectivity and discovery.
### Changed
- Improved Gateway E2E test configuration: automatically discovers Gateway URL and API Key from local `~/.debros` configuration files, removing the need for environment variables.
- Improved Gateway E2E test configuration: automatically discovers Gateway URL and API Key from local `~/.orama` configuration files, removing the need for environment variables.
- The `/v1/network/peers` endpoint now returns a flattened list of multiaddresses for all connected peers.
- Improved robustness of Cache API handlers to correctly identify and return 404 (Not Found) errors when keys are missing, even when wrapped by underlying library errors.
- The RQLite transaction handler now supports the legacy `statements` array format in addition to the `ops` array format for easier use.
@ -608,13 +861,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Corrected IPFS Add operation to return the actual file size (byte count) instead of the DAG size in the response.
## [0.63.3] - 2025-11-10
### Added
\n
### Changed
- Improved RQLite cluster stability by automatically clearing stale Raft state on startup if peers have a higher log index, allowing the node to join cleanly.
### Deprecated
@ -622,12 +879,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.63.2] - 2025-11-10
### Added
\n
### Changed
- Improved process termination logic in development environments to ensure child processes are also killed.
- Enhanced the `dev-kill-all.sh` script to reliably kill all processes using development ports, including orphaned processes and their children.
@ -636,12 +898,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.63.1] - 2025-11-10
### Added
\n
### Changed
- Increased the default minimum cluster size for database environments from 1 to 3.
### Deprecated
@ -649,15 +916,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Prevented unnecessary cluster recovery attempts when a node starts up as the first node (fresh bootstrap).
## [0.63.0] - 2025-11-10
### Added
- Added a new `kill` command to the Makefile for forcefully shutting down all development processes.
- Introduced a new `stop` command in the Makefile for graceful shutdown of development processes.
### Changed
- The `kill` command now performs a graceful shutdown attempt followed by a force kill of any lingering processes and verifies that development ports are free.
### Deprecated
@ -665,13 +935,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.62.0] - 2025-11-10
### Added
- The `prod status` command now correctly checks for both 'bootstrap' and 'node' service variants.
### Changed
- The production installation process now generates secrets (like the cluster secret and peer ID) before initializing services. This ensures all necessary secrets are available when services start.
- The `prod install` command now displays the actual Peer ID upon completion instead of a placeholder.
@ -680,15 +954,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where IPFS Cluster initialization was using a hardcoded configuration file instead of relying on the standard `ipfs-cluster-service init` process.
## [0.61.0] - 2025-11-10
### Added
- Introduced a new simplified authentication flow (`dbn auth login`) that allows users to generate an API key directly from a wallet address without signature verification (for development/testing purposes).
- Added a new `PRODUCTION_INSTALL.md` guide for production deployment using the `dbn prod` command suite.
### Changed
- Renamed the primary CLI binary from `network-cli` to `dbn` across all configurations, documentation, and source code.
- Refactored the IPFS configuration logic in the development environment to directly modify the IPFS config file instead of relying on shell commands, improving stability.
- Improved the IPFS Cluster peer count logic to correctly handle NDJSON streaming responses from the `/peers` endpoint.
@ -699,6 +976,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.60.1] - 2025-11-09
@ -1032,7 +1310,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
- Interactive domain configuration during `dbn setup` command
- Automatic port availability checking for ports 80 and 443 before enabling HTTPS
- DNS resolution verification to ensure domain points to the server IP
- TLS certificate cache directory management (`~/.debros/tls-cache`)
- TLS certificate cache directory management (`~/.orama/tls-cache`)
- Gateway automatically serves HTTP (port 80) for ACME challenges and HTTPS (port 443) for traffic
- New gateway config fields: `enable_https`, `domain_name`, `tls_cache_dir`
- **Domain Validation**: Added domain name validation and DNS verification helpers in setup CLI
@ -1102,8 +1380,8 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
- Automatic GitHub Release creation with changelog and artifacts
- Semantic versioning support with pre-release handling
- **Environment Configuration**: Multi-environment switching system
- Default environments: local (http://localhost:6001), devnet (https://devnet.debros.network), testnet (https://testnet.debros.network)
- Stored in `~/.debros/environments.json`
- Default environments: local (http://localhost:6001), devnet (https://devnet.orama.network), testnet (https://testnet.orama.network)
- Stored in `~/.orama/environments.json`
- CLI auto-uses active environment for authentication and operations
- **Comprehensive Documentation**
- `.cursor/RELEASES.md`: Overview and quick start
@ -1132,7 +1410,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
- Explicit control over LibP2P listen addresses for better localhost/development support
- Production/development mode detection for NAT services (disabled for localhost, enabled for production)
- Process management with .dev/pids directory for background process tracking
- Centralized logging to ~/.debros/logs/ for all network services
- Centralized logging to ~/.orama/logs/ for all network services
### Changed
@ -1182,7 +1460,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Changed
- Updated readme
- Where we read .yaml files from and where data is saved to ~/.debros
- Where we read .yaml files from and where data is saved to ~/.orama
### Deprecated
@ -1311,7 +1589,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Changed
- replaced git.debros.io with github.com
- replaced git.orama.io with github.com
### Deprecated

View File

@ -27,14 +27,14 @@ make deps
Useful CLI commands:
```bash
./bin/dbn health
./bin/dbn peers
./bin/dbn status
./bin/orama health
./bin/orama peers
./bin/orama status
````
## Versioning
- The CLI reports its version via `dbn version`.
- The CLI reports its version via `orama version`.
- Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser.
## Pull Requests

View File

@ -6,12 +6,12 @@ test:
go test -v $(TEST)
# Gateway-focused E2E tests assume gateway and nodes are already running
# Auto-discovers configuration from ~/.debros and queries database for API key
# Auto-discovers configuration from ~/.orama and queries database for API key
# No environment variables required
.PHONY: test-e2e
test-e2e:
@echo "Running comprehensive E2E tests..."
@echo "Auto-discovering configuration from ~/.debros..."
@echo "Auto-discovering configuration from ~/.orama..."
go test -v -tags e2e ./e2e
# Network - Distributed P2P Database System
@ -19,7 +19,7 @@ test-e2e:
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
VERSION := 0.69.20
VERSION := 0.72.0
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
@ -29,11 +29,11 @@ build: deps
@echo "Building network executables (version=$(VERSION))..."
@mkdir -p bin
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
go build -ldflags "$(LDFLAGS)" -o bin/node ./cmd/node
go build -ldflags "$(LDFLAGS)" -o bin/dbn cmd/cli/main.go
go build -ldflags "$(LDFLAGS)" -o bin/orama-node ./cmd/node
go build -ldflags "$(LDFLAGS)" -o bin/orama cmd/cli/main.go
# Inject gateway build metadata via pkg path variables
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
@echo "Build complete! Run ./bin/dbn version"
@echo "Build complete! Run ./bin/orama version"
# Install git hooks
install-hooks:
@ -49,46 +49,42 @@ clean:
# Run bootstrap node (auto-selects identity and data dir)
run-node:
@echo "Starting bootstrap node..."
@echo "Config: ~/.debros/bootstrap.yaml"
@echo "Generate it with: dbn config init --type bootstrap"
go run ./cmd/node --config node.yaml
@echo "Starting node..."
@echo "Config: ~/.orama/node.yaml"
go run ./cmd/orama-node --config node.yaml
# Run second node (regular) - requires join address of bootstrap node
# Usage: make run-node2 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002
# Run second node - requires join address
run-node2:
@echo "Starting regular node (node.yaml)..."
@echo "Config: ~/.debros/node.yaml"
@echo "Generate it with: dbn config init --type node --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
go run ./cmd/node --config node2.yaml
@echo "Starting second node..."
@echo "Config: ~/.orama/node2.yaml"
go run ./cmd/orama-node --config node2.yaml
# Run third node (regular) - requires join address of bootstrap node
# Usage: make run-node3 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003
# Run third node - requires join address
run-node3:
@echo "Starting regular node (node2.yaml)..."
@echo "Config: ~/.debros/node2.yaml"
@echo "Generate it with: dbn config init --type node --name node2.yaml --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
go run ./cmd/node --config node3.yaml
@echo "Starting third node..."
@echo "Config: ~/.orama/node3.yaml"
go run ./cmd/orama-node --config node3.yaml
# Run gateway HTTP server
# Usage examples:
# make run-gateway # uses ~/.debros/gateway.yaml
# Config generated with: dbn config init --type gateway
run-gateway:
@echo "Starting gateway HTTP server..."
@echo "Note: Config must be in ~/.debros/gateway.yaml"
@echo "Generate it with: dbn config init --type gateway"
go run ./cmd/gateway
@echo "Note: Config must be in ~/.orama/data/gateway.yaml"
go run ./cmd/orama-gateway
# Setup local domain names for development
setup-domains:
@echo "Setting up local domains..."
@sudo bash scripts/setup-local-domains.sh
# Development environment target
# Uses dbn dev up to start full stack with dependency and port checking
dev: build
@./bin/dbn dev up
# Uses orama dev up to start full stack with dependency and port checking
dev: build setup-domains
@./bin/orama dev up
# Graceful shutdown of all dev services
stop:
@if [ -f ./bin/dbn ]; then \
./bin/dbn dev down || true; \
@if [ -f ./bin/orama ]; then \
./bin/orama dev down || true; \
fi
@bash scripts/dev-kill-all.sh
@ -106,20 +102,17 @@ help:
@echo "Local Development (Recommended):"
@echo " make dev - Start full development stack with one command"
@echo " - Checks dependencies and available ports"
@echo " - Generates configs (2 bootstraps + 3 nodes + gateway)"
@echo " - Starts IPFS, RQLite, Olric, all nodes, and gateway"
@echo " - Validates cluster health (IPFS peers, RQLite, LibP2P)"
@echo " - Stops all services if health checks fail"
@echo " - Includes comprehensive logging"
@echo " - Generates configs and starts all services"
@echo " - Validates cluster health"
@echo " make stop - Gracefully stop all development services"
@echo " make kill - Force kill all development services (use if stop fails)"
@echo ""
@echo "Development Management (via dbn):"
@echo " ./bin/dbn dev status - Show status of all dev services"
@echo " ./bin/dbn dev logs <component> [--follow]"
@echo "Development Management (via orama):"
@echo " ./bin/orama dev status - Show status of all dev services"
@echo " ./bin/orama dev logs <component> [--follow]"
@echo ""
@echo "Individual Node Targets (advanced):"
@echo " run-node - Start bootstrap node directly"
@echo " run-node - Start first node directly"
@echo " run-node2 - Start second node directly"
@echo " run-node3 - Start third node directly"
@echo " run-gateway - Start HTTP gateway directly"

View File

@ -1,175 +0,0 @@
# Production Installation Guide - DeBros Network
This guide covers production deployment of the DeBros Network using the `dbn prod` command suite.
## System Requirements
- **OS**: Ubuntu 20.04 LTS or later, Debian 11+, or other Linux distributions
- **Architecture**: x86_64 (amd64) or ARM64 (aarch64)
- **RAM**: Minimum 4GB, recommended 8GB+
- **Storage**: Minimum 50GB SSD recommended
- **Ports**:
- 4001 (P2P networking)
- 4501 (IPFS HTTP API - bootstrap), 4502/4503 (node2/node3)
- 5001-5003 (RQLite HTTP - one per node)
- 6001 (Gateway)
- 7001-7003 (RQLite Raft - one per node)
- 9094 (IPFS Cluster API - bootstrap), 9104/9114 (node2/node3)
- 3320/3322 (Olric)
- 80, 443 (for HTTPS with Let's Encrypt)
## Installation
### Prerequisites
1. **Root access required**: All production operations require sudo/root privileges
2. **Supported distros**: Ubuntu, Debian, Fedora (via package manager)
3. **Basic tools**: `curl`, `git`, `make`, `build-essential`, `wget`
### Single-Node Bootstrap Installation
Deploy the first node (bootstrap node) on a VPS:
```bash
sudo dbn prod install --bootstrap
```
This will:
1. Check system prerequisites (OS, arch, root privileges, basic tools)
2. Provision the `debros` system user and filesystem structure at `~/.debros`
3. Download and install all required binaries (Go, RQLite, IPFS, IPFS Cluster, Olric, DeBros)
4. Generate secrets (cluster secret, swarm key, node identity)
5. Initialize repositories (IPFS, IPFS Cluster, RQLite)
6. Generate configurations for bootstrap node
7. Create and start systemd services
All files will be under `/home/debros/.debros`:
```
~/.debros/
├── bin/ # Compiled binaries
├── configs/ # YAML configurations
├── data/
│ ├── ipfs/ # IPFS repository
│ ├── ipfs-cluster/ # IPFS Cluster state
│ └── rqlite/ # RQLite database
├── logs/ # Service logs
└── secrets/ # Keys and certificates
```
### Joining Additional Nodes
Every non-bootstrap node must use the exact same IPFS Cluster secret as the bootstrap host. When you provision a follower node:
1. Copy the secret from the bootstrap machine:
```bash
scp debros@<bootstrap-ip>:/home/debros/.debros/secrets/cluster-secret ./cluster-secret
```
2. Run the installer with the `--cluster-secret` flag:
```bash
sudo dbn prod install --vps-ip <public_ip> \
--peers /ip4/<bootstrap-ip>/tcp/4001/p2p/<peer-id> \
--cluster-secret $(cat ./cluster-secret)
```
The installer now enforces `--cluster-secret` for all non-bootstrap nodes, which prevents mismatched cluster PSKs during deployment.
## Service Management
### Check Service Status
```bash
sudo systemctl status debros-node-bootstrap
sudo systemctl status debros-gateway
sudo systemctl status debros-rqlite-bootstrap
```
### View Service Logs
```bash
# Bootstrap node logs
sudo journalctl -u debros-node-bootstrap -f
# Gateway logs
sudo journalctl -u debros-gateway -f
# All services
sudo journalctl -u "debros-*" -f
```
## Health Checks
After installation, verify services are running:
```bash
# Check IPFS
curl http://localhost:4501/api/v0/id
# Check RQLite cluster
curl http://localhost:5001/status
# Check Gateway
curl http://localhost:6001/health
# Check Olric
curl http://localhost:3320/ping
```
## Port Reference
### Development Environment (via `make dev`)
- IPFS API: 4501 (bootstrap), 4502 (node2), 4503 (node3)
- RQLite HTTP: 5001, 5002, 5003
- RQLite Raft: 7001, 7002, 7003
- IPFS Cluster: 9094, 9104, 9114
- P2P: 4001, 4002, 4003
- Gateway: 6001
- Olric: 3320, 3322
### Production Environment (via `sudo dbn prod install`)
- Same port assignments as development for consistency
## Configuration Files
Key configuration files are located in `~/.debros/configs/`:
- **bootstrap.yaml**: Bootstrap node configuration
- **node.yaml**: Regular node configuration
- **gateway.yaml**: HTTP gateway configuration
- **olric.yaml**: In-memory cache configuration
Edit these files directly for advanced configuration, then restart services:
```bash
sudo systemctl restart debros-node-bootstrap
```
## Troubleshooting
### Port already in use
Check which process is using the port:
```bash
sudo lsof -i :4501
sudo lsof -i :5001
sudo lsof -i :7001
```
Kill conflicting processes or change ports in config.
### RQLite cluster not forming
Ensure:
1. Bootstrap node is running: `systemctl status debros-rqlite-bootstrap`
2. Network connectivity between nodes on ports 5001+ (HTTP) and 7001+ (Raft)
3. Check logs: `journalctl -u debros-rqlite-bootstrap -f`
---
**Last Updated**: November 2024
**Compatible with**: Network v1.0.0+

1136
README.md

File diff suppressed because it is too large Load Diff

View File

@ -34,7 +34,7 @@ func main() {
switch command {
case "version":
fmt.Printf("dbn %s", version)
fmt.Printf("orama %s", version)
if commit != "" {
fmt.Printf(" (commit %s)", commit)
}
@ -48,10 +48,30 @@ func main() {
case "dev":
cli.HandleDevCommand(args)
// Production environment commands
// Production environment commands (legacy with 'prod' prefix)
case "prod":
cli.HandleProdCommand(args)
// Direct production commands (new simplified interface)
case "install":
cli.HandleProdCommand(append([]string{"install"}, args...))
case "upgrade":
cli.HandleProdCommand(append([]string{"upgrade"}, args...))
case "migrate":
cli.HandleProdCommand(append([]string{"migrate"}, args...))
case "status":
cli.HandleProdCommand(append([]string{"status"}, args...))
case "start":
cli.HandleProdCommand(append([]string{"start"}, args...))
case "stop":
cli.HandleProdCommand(append([]string{"stop"}, args...))
case "restart":
cli.HandleProdCommand(append([]string{"restart"}, args...))
case "logs":
cli.HandleProdCommand(append([]string{"logs"}, args...))
case "uninstall":
cli.HandleProdCommand(append([]string{"uninstall"}, args...))
// Authentication commands
case "auth":
cli.HandleAuthCommand(args)
@ -85,8 +105,8 @@ func parseGlobalFlags(args []string) {
}
func showHelp() {
fmt.Printf("Network CLI - Distributed P2P Network Management Tool\n\n")
fmt.Printf("Usage: dbn <command> [args...]\n\n")
fmt.Printf("Orama CLI - Distributed P2P Network Management Tool\n\n")
fmt.Printf("Usage: orama <command> [args...]\n\n")
fmt.Printf("💻 Local Development:\n")
fmt.Printf(" dev up - Start full local dev environment\n")
@ -96,15 +116,14 @@ func showHelp() {
fmt.Printf(" dev help - Show dev command help\n\n")
fmt.Printf("🚀 Production Deployment:\n")
fmt.Printf(" prod install [--bootstrap] - Full production bootstrap (requires root/sudo)\n")
fmt.Printf(" prod upgrade - Upgrade existing installation\n")
fmt.Printf(" prod status - Show production service status\n")
fmt.Printf(" prod start - Start all production services (requires root/sudo)\n")
fmt.Printf(" prod stop - Stop all production services (requires root/sudo)\n")
fmt.Printf(" prod restart - Restart all production services (requires root/sudo)\n")
fmt.Printf(" prod logs <service> - View production service logs\n")
fmt.Printf(" prod uninstall - Remove production services (requires root/sudo)\n")
fmt.Printf(" prod help - Show prod command help\n\n")
fmt.Printf(" install - Install production node (requires root/sudo)\n")
fmt.Printf(" upgrade - Upgrade existing installation\n")
fmt.Printf(" status - Show production service status\n")
fmt.Printf(" start - Start all production services (requires root/sudo)\n")
fmt.Printf(" stop - Stop all production services (requires root/sudo)\n")
fmt.Printf(" restart - Restart all production services (requires root/sudo)\n")
fmt.Printf(" logs <service> - View production service logs\n")
fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n")
fmt.Printf("🔐 Authentication:\n")
fmt.Printf(" auth login - Authenticate with wallet\n")
@ -119,16 +138,14 @@ func showHelp() {
fmt.Printf(" --help, -h - Show this help message\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" # Authenticate\n")
fmt.Printf(" dbn auth login\n\n")
fmt.Printf(" # First node (creates new cluster)\n")
fmt.Printf(" sudo orama install --vps-ip 203.0.113.1 --domain node-1.orama.network\n\n")
fmt.Printf(" # Start local dev environment\n")
fmt.Printf(" dbn dev up\n")
fmt.Printf(" dbn dev status\n\n")
fmt.Printf(" # Join existing cluster\n")
fmt.Printf(" sudo orama install --vps-ip 203.0.113.2 --domain node-2.orama.network \\\n")
fmt.Printf(" --peers /ip4/203.0.113.1/tcp/4001/p2p/12D3KooW... --cluster-secret <hex>\n\n")
fmt.Printf(" # Production deployment (requires root/sudo)\n")
fmt.Printf(" sudo dbn prod install --bootstrap\n")
fmt.Printf(" sudo dbn prod upgrade\n")
fmt.Printf(" dbn prod status\n")
fmt.Printf(" dbn prod logs node --follow\n")
fmt.Printf(" # Service management\n")
fmt.Printf(" orama status\n")
fmt.Printf(" orama logs node --follow\n")
}

View File

@ -40,11 +40,11 @@ func getEnvBoolDefault(key string, def bool) bool {
}
}
// parseGatewayConfig loads gateway.yaml from ~/.debros exclusively.
// parseGatewayConfig loads gateway.yaml from ~/.orama exclusively.
// It accepts an optional --config flag for absolute paths (used by systemd services).
func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
// Parse --config flag (optional, for systemd services that pass absolute paths)
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.debros)")
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
flag.Parse()
// Determine config path
@ -63,7 +63,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
}
}
} else {
// Default behavior: look for gateway.yaml in ~/.debros/data/, ~/.debros/configs/, or ~/.debros/
// Default behavior: look for gateway.yaml in ~/.orama/data/, ~/.orama/configs/, or ~/.orama/
configPath, err = config.DefaultPath("gateway.yaml")
if err != nil {
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
@ -77,7 +77,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
ListenAddr string `yaml:"listen_addr"`
ClientNamespace string `yaml:"client_namespace"`
RQLiteDSN string `yaml:"rqlite_dsn"`
BootstrapPeers []string `yaml:"bootstrap_peers"`
Peers []string `yaml:"bootstrap_peers"`
EnableHTTPS bool `yaml:"enable_https"`
DomainName string `yaml:"domain_name"`
TLSCacheDir string `yaml:"tls_cache_dir"`
@ -133,16 +133,16 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
if v := strings.TrimSpace(y.RQLiteDSN); v != "" {
cfg.RQLiteDSN = v
}
if len(y.BootstrapPeers) > 0 {
var bp []string
for _, p := range y.BootstrapPeers {
if len(y.Peers) > 0 {
var peers []string
for _, p := range y.Peers {
p = strings.TrimSpace(p)
if p != "" {
bp = append(bp, p)
peers = append(peers, p)
}
}
if len(bp) > 0 {
cfg.BootstrapPeers = bp
if len(peers) > 0 {
cfg.BootstrapPeers = peers
}
}
@ -157,7 +157,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
// Default TLS cache directory if HTTPS is enabled but not specified
homeDir, err := os.UserHomeDir()
if err == nil {
cfg.TLSCacheDir = filepath.Join(homeDir, ".debros", "tls-cache")
cfg.TLSCacheDir = filepath.Join(homeDir, ".orama", "tls-cache")
}
}
@ -205,7 +205,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
zap.String("path", configPath),
zap.String("addr", cfg.ListenAddr),
zap.String("namespace", cfg.ClientNamespace),
zap.Int("bootstrap_peer_count", len(cfg.BootstrapPeers)),
zap.Int("peer_count", len(cfg.BootstrapPeers)),
)
return cfg

View File

@ -33,7 +33,7 @@ func setup_logger(component logging.Component) (logger *logging.ColoredLogger) {
// parse_flags parses command-line flags and returns them.
func parse_flags() (configName *string, help *bool) {
configName = flag.String("config", "node.yaml", "Config filename in ~/.debros (default: node.yaml)")
configName = flag.String("config", "node.yaml", "Config filename in ~/.orama (default: node.yaml)")
help = flag.Bool("help", false, "Show help")
flag.Parse()
@ -63,7 +63,7 @@ func check_if_should_open_help(help *bool) {
}
}
// select_data_dir validates that we can load the config from ~/.debros
// select_data_dir validates that we can load the config from ~/.orama
func select_data_dir_check(configName *string) {
logger := setup_logger(logging.ComponentNode)
@ -102,8 +102,8 @@ func select_data_dir_check(configName *string) {
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
fmt.Fprintf(os.Stderr, "Config file not found at %s\n", configPath)
fmt.Fprintf(os.Stderr, "\nGenerate it with one of:\n")
fmt.Fprintf(os.Stderr, " dbn config init --type bootstrap\n")
fmt.Fprintf(os.Stderr, " dbn config init --type node --bootstrap-peers '<peer_multiaddr>'\n")
fmt.Fprintf(os.Stderr, " orama config init --type node\n")
fmt.Fprintf(os.Stderr, " orama config init --type node --peers '<peer_multiaddr>'\n")
os.Exit(1)
}
}
@ -135,7 +135,7 @@ func startNode(ctx context.Context, cfg *config.Config, port int) error {
}
}
// Save the peer ID to a file for CLI access (especially useful for bootstrap)
// Save the peer ID to a file for CLI access
peerID := n.GetPeerID()
peerInfoFile := filepath.Join(dataDir, "peer.info")
@ -163,7 +163,7 @@ func startNode(ctx context.Context, cfg *config.Config, port int) error {
logger.Error("Failed to save peer info: %v", zap.Error(err))
} else {
logger.Info("Peer info saved to: %s", zap.String("path", peerInfoFile))
logger.Info("Bootstrap multiaddr: %s", zap.String("path", peerMultiaddr))
logger.Info("Peer multiaddr: %s", zap.String("path", peerMultiaddr))
}
logger.Info("Node started successfully")
@ -272,7 +272,7 @@ func main() {
// Absolute path passed directly (e.g., from systemd service)
configPath = *configName
} else {
// Relative path - use DefaultPath which checks both ~/.debros/configs/ and ~/.debros/
// Relative path - use DefaultPath which checks both ~/.orama/configs/ and ~/.orama/
configPath, err = config.DefaultPath(*configName)
if err != nil {
logger.Error("Failed to determine config path", zap.Error(err))
@ -316,7 +316,7 @@ func main() {
zap.Strings("listen_addresses", cfg.Node.ListenAddresses),
zap.Int("rqlite_http_port", cfg.Database.RQLitePort),
zap.Int("rqlite_raft_port", cfg.Database.RQLiteRaftPort),
zap.Strings("bootstrap_peers", cfg.Discovery.BootstrapPeers),
zap.Strings("peers", cfg.Discovery.BootstrapPeers),
zap.String("rqlite_join_address", cfg.Database.RQLiteJoinAddress),
zap.String("data_directory", cfg.Node.DataDir))

19
debian/control vendored Normal file
View File

@ -0,0 +1,19 @@
Package: orama
Version: 0.69.20
Section: net
Priority: optional
Architecture: amd64
Depends: libc6
Maintainer: DeBros Team <dev@debros.io>
Description: Orama Network - Distributed P2P Database System
Orama is a distributed peer-to-peer network that combines
RQLite for distributed SQL, IPFS for content-addressed storage,
and LibP2P for peer discovery and communication.
.
Features:
- Distributed SQLite database with Raft consensus
- IPFS-based file storage with encryption
- LibP2P peer-to-peer networking
- Olric distributed cache
- Unified HTTP/HTTPS gateway

18
debian/postinst vendored Normal file
View File

@ -0,0 +1,18 @@
#!/bin/bash
set -e
# Post-installation script for orama package
echo "Orama installed successfully!"
echo ""
echo "To set up your node, run:"
echo " sudo orama install"
echo ""
echo "This will launch the interactive installer."
echo ""
echo "For command-line installation:"
echo " sudo orama install --vps-ip <your-ip> --domain <your-domain>"
echo ""
echo "For help:"
echo " orama --help"

View File

@ -35,7 +35,7 @@ var (
cacheMutex sync.RWMutex
)
// loadGatewayConfig loads gateway configuration from ~/.debros/gateway.yaml
// loadGatewayConfig loads gateway configuration from ~/.orama/gateway.yaml
func loadGatewayConfig() (map[string]interface{}, error) {
configPath, err := config.DefaultPath("gateway.yaml")
if err != nil {
@ -55,7 +55,7 @@ func loadGatewayConfig() (map[string]interface{}, error) {
return cfg, nil
}
// loadNodeConfig loads node configuration from ~/.debros/node.yaml or bootstrap.yaml
// loadNodeConfig loads node configuration from ~/.orama/node-*.yaml
func loadNodeConfig(filename string) (map[string]interface{}, error) {
configPath, err := config.DefaultPath(filename)
if err != nil {
@ -111,8 +111,8 @@ func GetRQLiteNodes() []string {
}
cacheMutex.RUnlock()
// Try bootstrap.yaml first, then all node variants
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
// Try all node config files
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
@ -141,13 +141,13 @@ func queryAPIKeyFromRQLite() (string, error) {
return "", fmt.Errorf("failed to get home directory: %w", err)
}
// Try bootstrap first, then all nodes
// Try all node data directories
dbPaths := []string{
filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "bootstrap2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node3", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node4", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "data", "node-1", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "data", "node-2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "data", "node-3", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "data", "node-4", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "data", "node-5", "rqlite", "db.sqlite"),
}
for _, dbPath := range dbPaths {
@ -221,7 +221,7 @@ func GetBootstrapPeers() []string {
}
cacheMutex.RUnlock()
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"}
configFiles := []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"}
seen := make(map[string]struct{})
var peers []string
@ -272,7 +272,7 @@ func GetIPFSClusterURL() string {
cacheMutex.RUnlock()
// Try to load from node config
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
@ -304,7 +304,7 @@ func GetIPFSAPIURL() string {
cacheMutex.RUnlock()
// Try to load from node config
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
@ -329,7 +329,7 @@ func GetIPFSAPIURL() string {
// GetClientNamespace returns the test client namespace from config
func GetClientNamespace() string {
// Try to load from node config
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
nodeCfg, err := loadNodeConfig(cfgFile)
if err != nil {
continue
@ -562,7 +562,7 @@ func CleanupDatabaseTable(t *testing.T, tableName string) {
return
}
dbPath := filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite")
dbPath := filepath.Join(homeDir, ".orama", "data", "node-1", "rqlite", "db.sqlite")
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
t.Logf("warning: failed to open database for cleanup: %v", err)

20
go.mod
View File

@ -5,29 +5,39 @@ go 1.23.8
toolchain go1.24.1
require (
github.com/charmbracelet/bubbles v0.20.0
github.com/charmbracelet/bubbletea v1.2.4
github.com/charmbracelet/lipgloss v1.0.0
github.com/ethereum/go-ethereum v1.13.14
github.com/go-chi/chi/v5 v5.2.3
github.com/gorilla/websocket v1.5.3
github.com/libp2p/go-libp2p v0.41.1
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/mackerelio/go-osstat v0.2.6
github.com/mattn/go-sqlite3 v1.14.32
github.com/multiformats/go-multiaddr v0.15.0
github.com/olric-data/olric v0.7.0
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.40.0
golang.org/x/net v0.42.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/RoaringBitmap/roaring v1.9.4 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.22.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/buraksezer/consistent v0.10.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charmbracelet/x/ansi v0.4.5 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
@ -35,6 +45,7 @@ require (
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/elastic/gosigar v0.14.3 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
@ -70,15 +81,20 @@ require (
github.com/libp2p/go-netroute v0.2.2 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-sqlite3 v1.14.32 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/miekg/dns v1.1.66 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mschoch/smat v0.2.0 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
@ -121,6 +137,7 @@ require (
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/redis/go-redis/v9 v9.8.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
@ -141,6 +158,5 @@ require (
golang.org/x/text v0.27.0 // indirect
golang.org/x/tools v0.35.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)

34
go.sum
View File

@ -19,6 +19,10 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
@ -44,6 +48,16 @@ github.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0ma
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE=
github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU=
github.com/charmbracelet/bubbletea v1.2.4 h1:KN8aCViA0eps9SCOThb2/XPIlea3ANJLUkv3KnQRNCE=
github.com/charmbracelet/bubbletea v1.2.4/go.mod h1:Qr6fVQw+wX7JkWWkVyXYk/ZUQ92a6XNekLXa3rR18MM=
github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg=
github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo=
github.com/charmbracelet/x/ansi v0.4.5 h1:LqK4vwBNaXw2AyGIICa5/29Sbdq58GbGdFngSexTdRM=
github.com/charmbracelet/x/ansi v0.4.5/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw=
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
@ -75,6 +89,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ=
github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
@ -85,6 +101,8 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE=
github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -238,6 +256,8 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po=
github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0=
github.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA=
@ -246,6 +266,10 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -271,6 +295,12 @@ github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
@ -399,6 +429,9 @@ github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtB
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE=
@ -585,6 +618,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@ -34,15 +34,15 @@ func GetCredentialsPath() (string, error) {
return "", fmt.Errorf("failed to get home directory: %w", err)
}
debrosDir := filepath.Join(homeDir, ".debros")
if err := os.MkdirAll(debrosDir, 0700); err != nil {
return "", fmt.Errorf("failed to create .debros directory: %w", err)
oramaDir := filepath.Join(homeDir, ".orama")
if err := os.MkdirAll(oramaDir, 0700); err != nil {
return "", fmt.Errorf("failed to create .orama directory: %w", err)
}
return filepath.Join(debrosDir, "credentials.json"), nil
return filepath.Join(oramaDir, "credentials.json"), nil
}
// LoadCredentials loads credentials from ~/.debros/credentials.json
// LoadCredentials loads credentials from ~/.orama/credentials.json
func LoadCredentials() (*CredentialStore, error) {
credPath, err := GetCredentialsPath()
if err != nil {
@ -80,7 +80,7 @@ func LoadCredentials() (*CredentialStore, error) {
return &store, nil
}
// SaveCredentials saves credentials to ~/.debros/credentials.json
// SaveCredentials saves credentials to ~/.orama/credentials.json
func (store *CredentialStore) SaveCredentials() error {
credPath, err := GetCredentialsPath()
if err != nil {

View File

@ -10,6 +10,8 @@ import (
"os"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/tlsutil"
)
// PerformSimpleAuthentication performs a simple authentication flow where the user
@ -91,7 +93,13 @@ func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, err
}
endpoint := gatewayURL + "/v1/auth/simple-key"
resp, err := http.Post(endpoint, "application/json", bytes.NewReader(payload))
// Extract domain from URL for TLS configuration
// This uses tlsutil which handles Let's Encrypt staging certificates for *.debros.network
domain := extractDomainFromURL(gatewayURL)
client := tlsutil.NewHTTPClientForDomain(30*time.Second, domain)
resp, err := client.Post(endpoint, "application/json", bytes.NewReader(payload))
if err != nil {
return "", fmt.Errorf("failed to call gateway: %w", err)
}
@ -114,3 +122,23 @@ func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, err
return apiKey, nil
}
// extractDomainFromURL extracts the domain from a URL
// Removes protocol (https://, http://), path, and port components
func extractDomainFromURL(url string) string {
// Remove protocol prefixes
url = strings.TrimPrefix(url, "https://")
url = strings.TrimPrefix(url, "http://")
// Remove path component
if idx := strings.Index(url, "/"); idx != -1 {
url = url[:idx]
}
// Remove port component
if idx := strings.Index(url, ":"); idx != -1 {
url = url[:idx]
}
return url
}

View File

@ -199,7 +199,7 @@ func (as *AuthServer) handleCallback(w http.ResponseWriter, r *http.Request) {
%s
</div>
<p>Your credentials have been saved securely to <code>~/.debros/credentials.json</code></p>
<p>Your credentials have been saved securely to <code>~/.orama/credentials.json</code></p>
<p><strong>You can now close this browser window and return to your terminal.</strong></p>
</div>
</body>

View File

@ -0,0 +1,257 @@
// Package certutil provides utilities for managing self-signed certificates
package certutil
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"net"
"os"
"path/filepath"
"time"
)
// CertificateManager manages self-signed certificates for the network
type CertificateManager struct {
baseDir string
}
// NewCertificateManager creates a new certificate manager
func NewCertificateManager(baseDir string) *CertificateManager {
return &CertificateManager{
baseDir: baseDir,
}
}
// EnsureCACertificate creates or loads the CA certificate
func (cm *CertificateManager) EnsureCACertificate() ([]byte, []byte, error) {
caCertPath := filepath.Join(cm.baseDir, "ca.crt")
caKeyPath := filepath.Join(cm.baseDir, "ca.key")
// Check if CA already exists
if _, err := os.Stat(caCertPath); err == nil {
certPEM, err := os.ReadFile(caCertPath)
if err != nil {
return nil, nil, fmt.Errorf("failed to read CA certificate: %w", err)
}
keyPEM, err := os.ReadFile(caKeyPath)
if err != nil {
return nil, nil, fmt.Errorf("failed to read CA key: %w", err)
}
return certPEM, keyPEM, nil
}
// Create new CA certificate
certPEM, keyPEM, err := cm.generateCACertificate()
if err != nil {
return nil, nil, err
}
// Ensure directory exists
if err := os.MkdirAll(cm.baseDir, 0700); err != nil {
return nil, nil, fmt.Errorf("failed to create cert directory: %w", err)
}
// Write to files
if err := os.WriteFile(caCertPath, certPEM, 0644); err != nil {
return nil, nil, fmt.Errorf("failed to write CA certificate: %w", err)
}
if err := os.WriteFile(caKeyPath, keyPEM, 0600); err != nil {
return nil, nil, fmt.Errorf("failed to write CA key: %w", err)
}
return certPEM, keyPEM, nil
}
// EnsureNodeCertificate creates or loads a node certificate signed by the CA
func (cm *CertificateManager) EnsureNodeCertificate(hostname string, caCertPEM, caKeyPEM []byte) ([]byte, []byte, error) {
certPath := filepath.Join(cm.baseDir, fmt.Sprintf("%s.crt", hostname))
keyPath := filepath.Join(cm.baseDir, fmt.Sprintf("%s.key", hostname))
// Check if certificate already exists
if _, err := os.Stat(certPath); err == nil {
certData, err := os.ReadFile(certPath)
if err != nil {
return nil, nil, fmt.Errorf("failed to read certificate: %w", err)
}
keyData, err := os.ReadFile(keyPath)
if err != nil {
return nil, nil, fmt.Errorf("failed to read key: %w", err)
}
return certData, keyData, nil
}
// Create new certificate
certPEM, keyPEM, err := cm.generateNodeCertificate(hostname, caCertPEM, caKeyPEM)
if err != nil {
return nil, nil, err
}
// Write to files
if err := os.WriteFile(certPath, certPEM, 0644); err != nil {
return nil, nil, fmt.Errorf("failed to write certificate: %w", err)
}
if err := os.WriteFile(keyPath, keyPEM, 0600); err != nil {
return nil, nil, fmt.Errorf("failed to write key: %w", err)
}
return certPEM, keyPEM, nil
}
// generateCACertificate generates a self-signed CA certificate
func (cm *CertificateManager) generateCACertificate() ([]byte, []byte, error) {
// Generate private key
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return nil, nil, fmt.Errorf("failed to generate private key: %w", err)
}
// Create certificate template
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: "DeBros Network Root CA",
Organization: []string{"DeBros"},
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(10, 0, 0), // 10 year validity
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
ExtKeyUsage: []x509.ExtKeyUsage{},
BasicConstraintsValid: true,
IsCA: true,
}
// Self-sign the certificate
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to create certificate: %w", err)
}
// Encode certificate to PEM
certPEM := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: certDER,
})
// Encode private key to PEM
keyDER, err := x509.MarshalPKCS8PrivateKey(privateKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to marshal private key: %w", err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{
Type: "PRIVATE KEY",
Bytes: keyDER,
})
return certPEM, keyPEM, nil
}
// generateNodeCertificate generates a certificate signed by the CA
func (cm *CertificateManager) generateNodeCertificate(hostname string, caCertPEM, caKeyPEM []byte) ([]byte, []byte, error) {
// Parse CA certificate and key
caCert, caKey, err := cm.parseCACertificate(caCertPEM, caKeyPEM)
if err != nil {
return nil, nil, err
}
// Generate node private key
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, nil, fmt.Errorf("failed to generate private key: %w", err)
}
// Create certificate template
template := x509.Certificate{
SerialNumber: big.NewInt(time.Now().UnixNano()),
Subject: pkix.Name{
CommonName: hostname,
},
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(5, 0, 0), // 5 year validity
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
DNSNames: []string{hostname},
}
// Add wildcard support if hostname contains *.debros.network
if hostname == "*.debros.network" {
template.DNSNames = []string{"*.debros.network", "debros.network"}
} else if hostname == "debros.network" {
template.DNSNames = []string{"*.debros.network", "debros.network"}
}
// Try to parse as IP address for IP-based certificates
if ip := net.ParseIP(hostname); ip != nil {
template.IPAddresses = []net.IP{ip}
template.DNSNames = nil
}
// Sign certificate with CA
certDER, err := x509.CreateCertificate(rand.Reader, &template, caCert, &privateKey.PublicKey, caKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to create certificate: %w", err)
}
// Encode certificate to PEM
certPEM := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: certDER,
})
// Encode private key to PEM
keyDER, err := x509.MarshalPKCS8PrivateKey(privateKey)
if err != nil {
return nil, nil, fmt.Errorf("failed to marshal private key: %w", err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{
Type: "PRIVATE KEY",
Bytes: keyDER,
})
return certPEM, keyPEM, nil
}
// parseCACertificate parses CA certificate and key from PEM
func (cm *CertificateManager) parseCACertificate(caCertPEM, caKeyPEM []byte) (*x509.Certificate, *rsa.PrivateKey, error) {
// Parse CA certificate
certBlock, _ := pem.Decode(caCertPEM)
if certBlock == nil {
return nil, nil, fmt.Errorf("failed to parse CA certificate PEM")
}
caCert, err := x509.ParseCertificate(certBlock.Bytes)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse CA certificate: %w", err)
}
// Parse CA private key
keyBlock, _ := pem.Decode(caKeyPEM)
if keyBlock == nil {
return nil, nil, fmt.Errorf("failed to parse CA key PEM")
}
caKey, err := x509.ParsePKCS8PrivateKey(keyBlock.Bytes)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse CA key: %w", err)
}
rsaKey, ok := caKey.(*rsa.PrivateKey)
if !ok {
return nil, nil, fmt.Errorf("CA key is not RSA")
}
return caCert, rsaKey, nil
}
// LoadTLSCertificate loads a TLS certificate from PEM files
func LoadTLSCertificate(certPEM, keyPEM []byte) (tls.Certificate, error) {
return tls.X509KeyPair(certPEM, keyPEM)
}

View File

@ -1,8 +1,10 @@
package cli
import (
"bufio"
"fmt"
"os"
"strings"
"github.com/DeBrosOfficial/network/pkg/auth"
)
@ -50,13 +52,14 @@ func showAuthHelp() {
fmt.Printf(" 1. Run 'dbn auth login'\n")
fmt.Printf(" 2. Enter your wallet address when prompted\n")
fmt.Printf(" 3. Enter your namespace (or press Enter for 'default')\n")
fmt.Printf(" 4. An API key will be generated and saved to ~/.debros/credentials.json\n\n")
fmt.Printf(" 4. An API key will be generated and saved to ~/.orama/credentials.json\n\n")
fmt.Printf("Note: Authentication uses the currently active environment.\n")
fmt.Printf(" Use 'dbn env current' to see your active environment.\n")
}
func handleAuthLogin() {
gatewayURL := getGatewayURL()
// Prompt for node selection
gatewayURL := promptForGatewayURL()
fmt.Printf("🔐 Authenticating with gateway at: %s\n", gatewayURL)
// Use the simple authentication flow
@ -161,7 +164,55 @@ func handleAuthStatus() {
}
}
// promptForGatewayURL interactively prompts for the gateway URL
// Allows user to choose between local node or remote node by domain
func promptForGatewayURL() string {
// Check environment variable first (allows override without prompting)
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
return url
}
reader := bufio.NewReader(os.Stdin)
fmt.Println("\n🌐 Node Connection")
fmt.Println("==================")
fmt.Println("1. Local node (localhost:6001)")
fmt.Println("2. Remote node (enter domain)")
fmt.Print("\nSelect option [1/2]: ")
choice, _ := reader.ReadString('\n')
choice = strings.TrimSpace(choice)
if choice == "1" || choice == "" {
return "http://localhost:6001"
}
if choice != "2" {
fmt.Println("⚠️ Invalid option, using localhost")
return "http://localhost:6001"
}
fmt.Print("Enter node domain (e.g., node-hk19de.debros.network): ")
domain, _ := reader.ReadString('\n')
domain = strings.TrimSpace(domain)
if domain == "" {
fmt.Println("⚠️ No domain entered, using localhost")
return "http://localhost:6001"
}
// Remove any protocol prefix if user included it
domain = strings.TrimPrefix(domain, "https://")
domain = strings.TrimPrefix(domain, "http://")
// Remove trailing slash
domain = strings.TrimSuffix(domain, "/")
// Use HTTPS for remote domains
return fmt.Sprintf("https://%s", domain)
}
// getGatewayURL returns the gateway URL based on environment or env var
// Used by other commands that don't need interactive node selection
func getGatewayURL() string {
// Check environment variable first (for backwards compatibility)
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
@ -174,6 +225,6 @@ func getGatewayURL() string {
return env.GatewayURL
}
// Fallback to default
// Fallback to default (node-1)
return "http://localhost:6001"
}

View File

@ -249,14 +249,11 @@ func createClient() (client.NetworkClient, error) {
gatewayURL := getGatewayURL()
config.GatewayURL = gatewayURL
// Try to get bootstrap peers from active environment
// For now, we'll use the default bootstrap peers from config
// In the future, environments could specify their own bootstrap peers
// Try to get peer configuration from active environment
env, err := GetActiveEnvironment()
if err == nil && env != nil {
// Environment loaded successfully - gateway URL already set above
// Bootstrap peers could be added to Environment struct in the future
_ = env // Use env if we add bootstrap peers to it
_ = env // Reserve for future peer configuration
}
// Check for existing credentials using enhanced authentication

View File

@ -40,30 +40,30 @@ func HandleDevCommand(args []string) {
func showDevHelp() {
fmt.Printf("🚀 Development Environment Commands\n\n")
fmt.Printf("Usage: dbn dev <subcommand> [options]\n\n")
fmt.Printf("Usage: orama dev <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" up - Start development environment (2 bootstraps + 3 nodes + gateway)\n")
fmt.Printf(" up - Start development environment (5 nodes + gateway)\n")
fmt.Printf(" down - Stop all development services\n")
fmt.Printf(" status - Show status of running services\n")
fmt.Printf(" logs <component> - Tail logs for a component\n")
fmt.Printf(" help - Show this help\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" dbn dev up\n")
fmt.Printf(" dbn dev down\n")
fmt.Printf(" dbn dev status\n")
fmt.Printf(" dbn dev logs bootstrap --follow\n")
fmt.Printf(" orama dev up\n")
fmt.Printf(" orama dev down\n")
fmt.Printf(" orama dev status\n")
fmt.Printf(" orama dev logs node-1 --follow\n")
}
func handleDevUp(args []string) {
ctx := context.Background()
// Get home directory and .debros path
// Get home directory and .orama path
homeDir, err := os.UserHomeDir()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
oramaDir := filepath.Join(homeDir, ".orama")
// Step 1: Check dependencies
fmt.Printf("📋 Checking dependencies...\n\n")
@ -90,7 +90,7 @@ func handleDevUp(args []string) {
// Step 3: Ensure configs
fmt.Printf("⚙️ Preparing configuration files...\n\n")
ensurer := development.NewConfigEnsurer(debrosDir)
ensurer := development.NewConfigEnsurer(oramaDir)
if err := ensurer.EnsureAll(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to prepare configs: %v\n", err)
os.Exit(1)
@ -98,7 +98,7 @@ func handleDevUp(args []string) {
fmt.Printf("\n")
// Step 4: Start services
pm := development.NewProcessManager(debrosDir, os.Stdout)
pm := development.NewProcessManager(oramaDir, os.Stdout)
if err := pm.StartAll(ctx); err != nil {
fmt.Fprintf(os.Stderr, "❌ Error starting services: %v\n", err)
os.Exit(1)
@ -108,19 +108,19 @@ func handleDevUp(args []string) {
fmt.Printf("🎉 Development environment is running!\n\n")
fmt.Printf("Key endpoints:\n")
fmt.Printf(" Gateway: http://localhost:6001\n")
fmt.Printf(" Bootstrap IPFS: http://localhost:4501\n")
fmt.Printf(" Bootstrap2 IPFS: http://localhost:4511\n")
fmt.Printf(" Node2 IPFS: http://localhost:4502\n")
fmt.Printf(" Node3 IPFS: http://localhost:4503\n")
fmt.Printf(" Node4 IPFS: http://localhost:4504\n")
fmt.Printf(" Node-1 IPFS: http://localhost:4501\n")
fmt.Printf(" Node-2 IPFS: http://localhost:4502\n")
fmt.Printf(" Node-3 IPFS: http://localhost:4503\n")
fmt.Printf(" Node-4 IPFS: http://localhost:4504\n")
fmt.Printf(" Node-5 IPFS: http://localhost:4505\n")
fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n")
fmt.Printf(" Olric Cache: http://localhost:3320\n\n")
fmt.Printf("Useful commands:\n")
fmt.Printf(" dbn dev status - Show status\n")
fmt.Printf(" dbn dev logs bootstrap - Bootstrap logs\n")
fmt.Printf(" dbn dev logs bootstrap2 - Bootstrap2 logs\n")
fmt.Printf(" dbn dev down - Stop all services\n\n")
fmt.Printf("Logs directory: %s/logs\n\n", debrosDir)
fmt.Printf(" orama dev status - Show status\n")
fmt.Printf(" orama dev logs node-1 - Node-1 logs\n")
fmt.Printf(" orama dev logs node-2 - Node-2 logs\n")
fmt.Printf(" orama dev down - Stop all services\n\n")
fmt.Printf("Logs directory: %s/logs\n\n", oramaDir)
}
func handleDevDown(args []string) {
@ -129,16 +129,16 @@ func handleDevDown(args []string) {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
oramaDir := filepath.Join(homeDir, ".orama")
pm := development.NewProcessManager(debrosDir, os.Stdout)
pm := development.NewProcessManager(oramaDir, os.Stdout)
ctx := context.Background()
if err := pm.StopAll(ctx); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Error stopping services: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ All services have been stopped\n\n")
}
@ -148,9 +148,9 @@ func handleDevStatus(args []string) {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
oramaDir := filepath.Join(homeDir, ".orama")
pm := development.NewProcessManager(debrosDir, os.Stdout)
pm := development.NewProcessManager(oramaDir, os.Stdout)
ctx := context.Background()
pm.Status(ctx)
@ -159,7 +159,7 @@ func handleDevStatus(args []string) {
func handleDevLogs(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: dbn dev logs <component> [--follow]\n")
fmt.Fprintf(os.Stderr, "\nComponents: bootstrap, bootstrap2, node2, node3, node4, gateway, ipfs-bootstrap, ipfs-bootstrap2, ipfs-node2, ipfs-node3, ipfs-node4, olric, anon\n")
fmt.Fprintf(os.Stderr, "\nComponents: node-1, node-2, node-3, node-4, node-5, gateway, ipfs-node-1, ipfs-node-2, ipfs-node-3, ipfs-node-4, ipfs-node-5, olric, anon\n")
os.Exit(1)
}
@ -171,9 +171,9 @@ func handleDevLogs(args []string) {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
oramaDir := filepath.Join(homeDir, ".orama")
logPath := filepath.Join(debrosDir, "logs", fmt.Sprintf("%s.log", component))
logPath := filepath.Join(oramaDir, "logs", fmt.Sprintf("%s.log", component))
if _, err := os.Stat(logPath); os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "❌ Log file not found: %s\n", logPath)
os.Exit(1)

View File

@ -43,8 +43,8 @@ func showEnvHelp() {
fmt.Printf(" enable - Alias for 'switch' (e.g., 'devnet enable')\n\n")
fmt.Printf("Available Environments:\n")
fmt.Printf(" local - Local development (http://localhost:6001)\n")
fmt.Printf(" devnet - Development network (https://devnet.debros.network)\n")
fmt.Printf(" testnet - Test network (https://testnet.debros.network)\n\n")
fmt.Printf(" devnet - Development network (https://devnet.orama.network)\n")
fmt.Printf(" testnet - Test network (https://testnet.orama.network)\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" dbn env list\n")
fmt.Printf(" dbn env current\n")

View File

@ -28,18 +28,18 @@ var DefaultEnvironments = []Environment{
{
Name: "local",
GatewayURL: "http://localhost:6001",
Description: "Local development environment",
Description: "Local development environment (node-1)",
IsActive: true,
},
{
Name: "devnet",
GatewayURL: "https://devnet.debros.network",
GatewayURL: "https://devnet.orama.network",
Description: "Development network (testnet)",
IsActive: false,
},
{
Name: "testnet",
GatewayURL: "https://testnet.debros.network",
GatewayURL: "https://testnet.orama.network",
Description: "Test network (staging)",
IsActive: false,
},

File diff suppressed because it is too large Load Diff

View File

@ -5,76 +5,167 @@ import (
)
// TestProdCommandFlagParsing verifies that prod command flags are parsed correctly
// Note: The installer now uses --vps-ip presence to determine if it's a first node (no --bootstrap flag)
// First node: has --vps-ip but no --peers or --join
// Joining node: has --vps-ip, --peers, and --cluster-secret
func TestProdCommandFlagParsing(t *testing.T) {
tests := []struct {
name string
args []string
expectBootstrap bool
expectVPSIP string
expectBootstrapJoin string
expectPeers string
name string
args []string
expectVPSIP string
expectDomain string
expectPeers string
expectJoin string
expectSecret string
expectBranch string
isFirstNode bool // first node = no peers and no join address
}{
{
name: "bootstrap node",
args: []string{"install", "--bootstrap"},
expectBootstrap: true,
name: "first node (creates new cluster)",
args: []string{"install", "--vps-ip", "10.0.0.1", "--domain", "node-1.example.com"},
expectVPSIP: "10.0.0.1",
expectDomain: "node-1.example.com",
isFirstNode: true,
},
{
name: "non-bootstrap with vps-ip",
args: []string{"install", "--vps-ip", "10.0.0.2", "--peers", "multiaddr1,multiaddr2"},
expectVPSIP: "10.0.0.2",
expectPeers: "multiaddr1,multiaddr2",
name: "joining node with peers",
args: []string{"install", "--vps-ip", "10.0.0.2", "--peers", "/ip4/10.0.0.1/tcp/4001/p2p/Qm123", "--cluster-secret", "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
expectVPSIP: "10.0.0.2",
expectPeers: "/ip4/10.0.0.1/tcp/4001/p2p/Qm123",
expectSecret: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
isFirstNode: false,
},
{
name: "secondary bootstrap",
args: []string{"install", "--bootstrap", "--vps-ip", "10.0.0.3", "--bootstrap-join", "10.0.0.1:7001"},
expectBootstrap: true,
expectVPSIP: "10.0.0.3",
expectBootstrapJoin: "10.0.0.1:7001",
name: "joining node with join address",
args: []string{"install", "--vps-ip", "10.0.0.3", "--join", "10.0.0.1:7001", "--cluster-secret", "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
expectVPSIP: "10.0.0.3",
expectJoin: "10.0.0.1:7001",
expectSecret: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
isFirstNode: false,
},
{
name: "with domain",
args: []string{"install", "--bootstrap", "--domain", "example.com"},
expectBootstrap: true,
name: "with nightly branch",
args: []string{"install", "--vps-ip", "10.0.0.4", "--branch", "nightly"},
expectVPSIP: "10.0.0.4",
expectBranch: "nightly",
isFirstNode: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Extract flags manually to verify parsing logic
isBootstrap := false
var vpsIP, peersStr, bootstrapJoin string
var vpsIP, domain, peersStr, joinAddr, clusterSecret, branch string
for i, arg := range tt.args {
switch arg {
case "--bootstrap":
isBootstrap = true
case "--peers":
if i+1 < len(tt.args) {
peersStr = tt.args[i+1]
}
case "--vps-ip":
if i+1 < len(tt.args) {
vpsIP = tt.args[i+1]
}
case "--bootstrap-join":
case "--domain":
if i+1 < len(tt.args) {
bootstrapJoin = tt.args[i+1]
domain = tt.args[i+1]
}
case "--peers":
if i+1 < len(tt.args) {
peersStr = tt.args[i+1]
}
case "--join":
if i+1 < len(tt.args) {
joinAddr = tt.args[i+1]
}
case "--cluster-secret":
if i+1 < len(tt.args) {
clusterSecret = tt.args[i+1]
}
case "--branch":
if i+1 < len(tt.args) {
branch = tt.args[i+1]
}
}
}
if isBootstrap != tt.expectBootstrap {
t.Errorf("expected bootstrap=%v, got %v", tt.expectBootstrap, isBootstrap)
}
// First node detection: no peers and no join address
isFirstNode := peersStr == "" && joinAddr == ""
if vpsIP != tt.expectVPSIP {
t.Errorf("expected vpsIP=%q, got %q", tt.expectVPSIP, vpsIP)
}
if domain != tt.expectDomain {
t.Errorf("expected domain=%q, got %q", tt.expectDomain, domain)
}
if peersStr != tt.expectPeers {
t.Errorf("expected peers=%q, got %q", tt.expectPeers, peersStr)
}
if bootstrapJoin != tt.expectBootstrapJoin {
t.Errorf("expected bootstrapJoin=%q, got %q", tt.expectBootstrapJoin, bootstrapJoin)
if joinAddr != tt.expectJoin {
t.Errorf("expected join=%q, got %q", tt.expectJoin, joinAddr)
}
if clusterSecret != tt.expectSecret {
t.Errorf("expected clusterSecret=%q, got %q", tt.expectSecret, clusterSecret)
}
if branch != tt.expectBranch {
t.Errorf("expected branch=%q, got %q", tt.expectBranch, branch)
}
if isFirstNode != tt.isFirstNode {
t.Errorf("expected isFirstNode=%v, got %v", tt.isFirstNode, isFirstNode)
}
})
}
}
// TestNormalizePeers tests the peer multiaddr normalization
func TestNormalizePeers(t *testing.T) {
tests := []struct {
name string
input string
expectCount int
expectError bool
}{
{
name: "empty string",
input: "",
expectCount: 0,
expectError: false,
},
{
name: "single peer",
input: "/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj",
expectCount: 1,
expectError: false,
},
{
name: "multiple peers",
input: "/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj,/ip4/10.0.0.2/tcp/4001/p2p/12D3KooWJzL4SHW3o7sZpzjfEPJzC6Ky7gKvJxY8vQVDR2jHc8F1",
expectCount: 2,
expectError: false,
},
{
name: "duplicate peers deduplicated",
input: "/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj,/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj",
expectCount: 1,
expectError: false,
},
{
name: "invalid multiaddr",
input: "not-a-multiaddr",
expectCount: 0,
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
peers, err := normalizePeers(tt.input)
if tt.expectError && err == nil {
t.Errorf("expected error but got none")
}
if !tt.expectError && err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(peers) != tt.expectCount {
t.Errorf("expected %d peers, got %d", tt.expectCount, len(peers))
}
})
}

View File

@ -195,49 +195,49 @@ func (c *Client) Connect() error {
c.pubsub = &pubSubBridge{client: c, adapter: adapter}
c.logger.Info("Pubsub bridge created successfully")
c.logger.Info("Starting bootstrap peer connections...")
c.logger.Info("Starting peer connections...")
// Connect to bootstrap peers FIRST
// Connect to peers FIRST
ctx, cancel := context.WithTimeout(context.Background(), c.config.ConnectTimeout)
defer cancel()
bootstrapPeersConnected := 0
for _, bootstrapAddr := range c.config.BootstrapPeers {
c.logger.Info("Attempting to connect to bootstrap peer", zap.String("addr", bootstrapAddr))
if err := c.connectToBootstrap(ctx, bootstrapAddr); err != nil {
c.logger.Warn("Failed to connect to bootstrap peer",
zap.String("addr", bootstrapAddr),
peersConnected := 0
for _, peerAddr := range c.config.BootstrapPeers {
c.logger.Info("Attempting to connect to peer", zap.String("addr", peerAddr))
if err := c.connectToPeer(ctx, peerAddr); err != nil {
c.logger.Warn("Failed to connect to peer",
zap.String("addr", peerAddr),
zap.Error(err))
continue
}
bootstrapPeersConnected++
c.logger.Info("Successfully connected to bootstrap peer", zap.String("addr", bootstrapAddr))
peersConnected++
c.logger.Info("Successfully connected to peer", zap.String("addr", peerAddr))
}
if bootstrapPeersConnected == 0 {
c.logger.Warn("No bootstrap peers connected, continuing anyway")
if peersConnected == 0 {
c.logger.Warn("No peers connected, continuing anyway")
} else {
c.logger.Info("Bootstrap peer connections completed", zap.Int("connected_count", bootstrapPeersConnected))
c.logger.Info("Peer connections completed", zap.Int("connected_count", peersConnected))
}
c.logger.Info("Adding bootstrap peers to peerstore...")
c.logger.Info("Adding peers to peerstore...")
// Add bootstrap peers to peerstore so we can connect to them later
for _, bootstrapAddr := range c.config.BootstrapPeers {
if ma, err := multiaddr.NewMultiaddr(bootstrapAddr); err == nil {
// Add peers to peerstore so we can connect to them later
for _, peerAddr := range c.config.BootstrapPeers {
if ma, err := multiaddr.NewMultiaddr(peerAddr); err == nil {
if peerInfo, err := peer.AddrInfoFromP2pAddr(ma); err == nil {
c.host.Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, time.Hour*24)
c.logger.Debug("Added bootstrap peer to peerstore",
c.logger.Debug("Added peer to peerstore",
zap.String("peer", peerInfo.ID.String()))
}
}
}
c.logger.Info("Bootstrap peers added to peerstore")
c.logger.Info("Peers added to peerstore")
c.logger.Info("Starting connection monitoring...")
// Client is a lightweight P2P participant - no discovery needed
// We only connect to known bootstrap peers and let nodes handle discovery
// We only connect to known peers and let nodes handle discovery
c.logger.Debug("Client configured as lightweight P2P participant (no discovery)")
// Start minimal connection monitoring

View File

@ -9,8 +9,8 @@ import (
"go.uber.org/zap"
)
// connectToBootstrap connects to a bootstrap peer
func (c *Client) connectToBootstrap(ctx context.Context, addr string) error {
// connectToPeer connects to a peer address
func (c *Client) connectToPeer(ctx context.Context, addr string) error {
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return fmt.Errorf("invalid multiaddr: %w", err)
@ -20,14 +20,14 @@ func (c *Client) connectToBootstrap(ctx context.Context, addr string) error {
peerInfo, err := peer.AddrInfoFromP2pAddr(ma)
if err != nil {
// If there's no peer ID, we can't connect
c.logger.Warn("Bootstrap address missing peer ID, skipping",
c.logger.Warn("Peer address missing peer ID, skipping",
zap.String("addr", addr))
return nil
}
// Avoid dialing ourselves: if the bootstrap address resolves to our own peer ID, skip.
// Avoid dialing ourselves: if the peer address resolves to our own peer ID, skip.
if c.host != nil && peerInfo.ID == c.host.ID() {
c.logger.Debug("Skipping bootstrap address because it resolves to self",
c.logger.Debug("Skipping peer address because it resolves to self",
zap.String("addr", addr),
zap.String("peer_id", peerInfo.ID.String()))
return nil
@ -38,7 +38,7 @@ func (c *Client) connectToBootstrap(ctx context.Context, addr string) error {
return fmt.Errorf("failed to connect to peer: %w", err)
}
c.logger.Debug("Connected to bootstrap peer",
c.logger.Debug("Connected to peer",
zap.String("peer_id", peerInfo.ID.String()),
zap.String("addr", addr))

View File

@ -9,7 +9,7 @@ import (
"github.com/multiformats/go-multiaddr"
)
// DefaultBootstrapPeers returns the library's default bootstrap peer multiaddrs.
// DefaultBootstrapPeers returns the default peer multiaddrs.
// These can be overridden by environment variables or config.
func DefaultBootstrapPeers() []string {
// Check environment variable first
@ -48,7 +48,7 @@ func DefaultDatabaseEndpoints() []string {
}
}
// Try to derive from bootstrap peers if available
// Try to derive from configured peers if available
peers := DefaultBootstrapPeers()
if len(peers) > 0 {
endpoints := make([]string, 0, len(peers))

View File

@ -10,15 +10,15 @@ import (
func TestDefaultBootstrapPeersNonEmpty(t *testing.T) {
old := os.Getenv("DEBROS_BOOTSTRAP_PEERS")
t.Cleanup(func() { os.Setenv("DEBROS_BOOTSTRAP_PEERS", old) })
// Set a valid bootstrap peer
// Set a valid peer
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
_ = os.Setenv("DEBROS_BOOTSTRAP_PEERS", validPeer)
peers := DefaultBootstrapPeers()
if len(peers) == 0 {
t.Fatalf("expected non-empty default bootstrap peers")
t.Fatalf("expected non-empty default peers")
}
if peers[0] != validPeer {
t.Fatalf("expected bootstrap peer %s, got %s", validPeer, peers[0])
t.Fatalf("expected peer %s, got %s", validPeer, peers[0])
}
}

View File

@ -2,7 +2,9 @@ package client
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"sync"
"time"
@ -504,15 +506,100 @@ func (n *NetworkInfoImpl) GetStatus(ctx context.Context) (*NetworkStatus, error)
}
}
// Try to get IPFS peer info (optional - don't fail if unavailable)
ipfsInfo := queryIPFSPeerInfo()
// Try to get IPFS Cluster peer info (optional - don't fail if unavailable)
ipfsClusterInfo := queryIPFSClusterPeerInfo()
return &NetworkStatus{
NodeID: host.ID().String(),
PeerID: host.ID().String(),
Connected: true,
PeerCount: len(connectedPeers),
DatabaseSize: dbSize,
Uptime: time.Since(n.client.startTime),
IPFS: ipfsInfo,
IPFSCluster: ipfsClusterInfo,
}, nil
}
// queryIPFSPeerInfo queries the local IPFS API for peer information
// Returns nil if IPFS is not running or unavailable
func queryIPFSPeerInfo() *IPFSPeerInfo {
// IPFS API typically runs on port 4501 in our setup
client := &http.Client{Timeout: 2 * time.Second}
resp, err := client.Post("http://localhost:4501/api/v0/id", "", nil)
if err != nil {
return nil // IPFS not available
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil
}
var result struct {
ID string `json:"ID"`
Addresses []string `json:"Addresses"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil
}
// Filter addresses to only include public/routable ones
var swarmAddrs []string
for _, addr := range result.Addresses {
// Skip loopback and private addresses for external discovery
if !strings.Contains(addr, "127.0.0.1") && !strings.Contains(addr, "/ip6/::1") {
swarmAddrs = append(swarmAddrs, addr)
}
}
return &IPFSPeerInfo{
PeerID: result.ID,
SwarmAddresses: swarmAddrs,
}
}
// queryIPFSClusterPeerInfo queries the local IPFS Cluster API for peer information
// Returns nil if IPFS Cluster is not running or unavailable
func queryIPFSClusterPeerInfo() *IPFSClusterPeerInfo {
// IPFS Cluster API typically runs on port 9094 in our setup
client := &http.Client{Timeout: 2 * time.Second}
resp, err := client.Get("http://localhost:9094/id")
if err != nil {
return nil // IPFS Cluster not available
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil
}
var result struct {
ID string `json:"id"`
Addresses []string `json:"addresses"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil
}
// Filter addresses to only include public/routable ones for cluster discovery
var clusterAddrs []string
for _, addr := range result.Addresses {
// Skip loopback addresses - only keep routable addresses
if !strings.Contains(addr, "127.0.0.1") && !strings.Contains(addr, "/ip6/::1") {
clusterAddrs = append(clusterAddrs, addr)
}
}
return &IPFSClusterPeerInfo{
PeerID: result.ID,
Addresses: clusterAddrs,
}
}
// ConnectToPeer connects to a specific peer
func (n *NetworkInfoImpl) ConnectToPeer(ctx context.Context, peerAddr string) error {
if !n.client.isConnected() {

View File

@ -114,11 +114,26 @@ type PeerInfo struct {
// NetworkStatus contains overall network status
type NetworkStatus struct {
NodeID string `json:"node_id"`
Connected bool `json:"connected"`
PeerCount int `json:"peer_count"`
DatabaseSize int64 `json:"database_size"`
Uptime time.Duration `json:"uptime"`
NodeID string `json:"node_id"`
PeerID string `json:"peer_id"`
Connected bool `json:"connected"`
PeerCount int `json:"peer_count"`
DatabaseSize int64 `json:"database_size"`
Uptime time.Duration `json:"uptime"`
IPFS *IPFSPeerInfo `json:"ipfs,omitempty"`
IPFSCluster *IPFSClusterPeerInfo `json:"ipfs_cluster,omitempty"`
}
// IPFSPeerInfo contains IPFS peer information for discovery
type IPFSPeerInfo struct {
PeerID string `json:"peer_id"`
SwarmAddresses []string `json:"swarm_addresses"`
}
// IPFSClusterPeerInfo contains IPFS Cluster peer information for cluster discovery
type IPFSClusterPeerInfo struct {
PeerID string `json:"peer_id"` // Cluster peer ID (different from IPFS peer ID)
Addresses []string `json:"addresses"` // Cluster multiaddresses (e.g., /ip4/x.x.x.x/tcp/9098)
}
// HealthStatus contains health check information
@ -158,7 +173,7 @@ type StorageStatus struct {
type ClientConfig struct {
AppName string `json:"app_name"`
DatabaseName string `json:"database_name"`
BootstrapPeers []string `json:"bootstrap_peers"`
BootstrapPeers []string `json:"peers"`
DatabaseEndpoints []string `json:"database_endpoints"`
GatewayURL string `json:"gateway_url"` // Gateway URL for HTTP API access (e.g., "http://localhost:6001")
ConnectTimeout time.Duration `json:"connect_timeout"`

View File

@ -8,20 +8,21 @@ import (
// Config represents the main configuration for a network node
type Config struct {
Node NodeConfig `yaml:"node"`
Database DatabaseConfig `yaml:"database"`
Discovery DiscoveryConfig `yaml:"discovery"`
Security SecurityConfig `yaml:"security"`
Logging LoggingConfig `yaml:"logging"`
Node NodeConfig `yaml:"node"`
Database DatabaseConfig `yaml:"database"`
Discovery DiscoveryConfig `yaml:"discovery"`
Security SecurityConfig `yaml:"security"`
Logging LoggingConfig `yaml:"logging"`
HTTPGateway HTTPGatewayConfig `yaml:"http_gateway"`
}
// NodeConfig contains node-specific configuration
type NodeConfig struct {
ID string `yaml:"id"` // Auto-generated if empty
Type string `yaml:"type"` // "bootstrap" or "node"
ListenAddresses []string `yaml:"listen_addresses"` // LibP2P listen addresses
DataDir string `yaml:"data_dir"` // Data directory
MaxConnections int `yaml:"max_connections"` // Maximum peer connections
Domain string `yaml:"domain"` // Domain for this node (e.g., node-1.orama.network)
}
// DatabaseConfig contains database-related configuration
@ -37,6 +38,13 @@ type DatabaseConfig struct {
RQLiteRaftPort int `yaml:"rqlite_raft_port"` // RQLite Raft consensus port
RQLiteJoinAddress string `yaml:"rqlite_join_address"` // Address to join RQLite cluster
// RQLite node-to-node TLS encryption (for inter-node Raft communication)
// See: https://rqlite.io/docs/guides/security/#encrypting-node-to-node-communication
NodeCert string `yaml:"node_cert"` // Path to X.509 certificate for node-to-node communication
NodeKey string `yaml:"node_key"` // Path to X.509 private key for node-to-node communication
NodeCACert string `yaml:"node_ca_cert"` // Path to CA certificate (optional, uses system CA if not set)
NodeNoVerify bool `yaml:"node_no_verify"` // Skip certificate verification (for testing/self-signed certs)
// Dynamic discovery configuration (always enabled)
ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s
PeerInactivityLimit time.Duration `yaml:"peer_inactivity_limit"` // default: 24h
@ -75,9 +83,9 @@ type IPFSConfig struct {
// DiscoveryConfig contains peer discovery configuration
type DiscoveryConfig struct {
BootstrapPeers []string `yaml:"bootstrap_peers"` // Bootstrap peer addresses
BootstrapPeers []string `yaml:"bootstrap_peers"` // Peer addresses to connect to
DiscoveryInterval time.Duration `yaml:"discovery_interval"` // Discovery announcement interval
BootstrapPort int `yaml:"bootstrap_port"` // Default port for bootstrap nodes
BootstrapPort int `yaml:"bootstrap_port"` // Default port for peer discovery
HttpAdvAddress string `yaml:"http_adv_address"` // HTTP advertisement address
RaftAdvAddress string `yaml:"raft_adv_address"` // Raft advertisement
NodeNamespace string `yaml:"node_namespace"` // Namespace for node identifiers
@ -97,6 +105,56 @@ type LoggingConfig struct {
OutputFile string `yaml:"output_file"` // Empty for stdout
}
// HTTPGatewayConfig contains HTTP reverse proxy gateway configuration
type HTTPGatewayConfig struct {
Enabled bool `yaml:"enabled"` // Enable HTTP gateway
ListenAddr string `yaml:"listen_addr"` // Address to listen on (e.g., ":8080")
NodeName string `yaml:"node_name"` // Node name for routing
Routes map[string]RouteConfig `yaml:"routes"` // Service routes
HTTPS HTTPSConfig `yaml:"https"` // HTTPS/TLS configuration
SNI SNIConfig `yaml:"sni"` // SNI-based TCP routing configuration
// Full gateway configuration (for API, auth, pubsub)
ClientNamespace string `yaml:"client_namespace"` // Namespace for network client
RQLiteDSN string `yaml:"rqlite_dsn"` // RQLite database DSN
OlricServers []string `yaml:"olric_servers"` // List of Olric server addresses
OlricTimeout time.Duration `yaml:"olric_timeout"` // Timeout for Olric operations
IPFSClusterAPIURL string `yaml:"ipfs_cluster_api_url"` // IPFS Cluster API URL
IPFSAPIURL string `yaml:"ipfs_api_url"` // IPFS API URL
IPFSTimeout time.Duration `yaml:"ipfs_timeout"` // Timeout for IPFS operations
}
// HTTPSConfig contains HTTPS/TLS configuration for the gateway
type HTTPSConfig struct {
Enabled bool `yaml:"enabled"` // Enable HTTPS (port 443)
Domain string `yaml:"domain"` // Primary domain (e.g., node-123.orama.network)
AutoCert bool `yaml:"auto_cert"` // Use Let's Encrypt for automatic certificate
UseSelfSigned bool `yaml:"use_self_signed"` // Use self-signed certificates (pre-generated)
CertFile string `yaml:"cert_file"` // Path to certificate file (if not using auto_cert)
KeyFile string `yaml:"key_file"` // Path to key file (if not using auto_cert)
CacheDir string `yaml:"cache_dir"` // Directory for Let's Encrypt certificate cache
HTTPPort int `yaml:"http_port"` // HTTP port for ACME challenge (default: 80)
HTTPSPort int `yaml:"https_port"` // HTTPS port (default: 443)
Email string `yaml:"email"` // Email for Let's Encrypt account
}
// SNIConfig contains SNI-based TCP routing configuration for port 7001
type SNIConfig struct {
Enabled bool `yaml:"enabled"` // Enable SNI-based TCP routing
ListenAddr string `yaml:"listen_addr"` // Address to listen on (e.g., ":7001")
Routes map[string]string `yaml:"routes"` // SNI hostname -> backend address mapping
CertFile string `yaml:"cert_file"` // Path to certificate file
KeyFile string `yaml:"key_file"` // Path to key file
}
// RouteConfig defines a single reverse proxy route
type RouteConfig struct {
PathPrefix string `yaml:"path_prefix"` // URL path prefix (e.g., "/rqlite/http")
BackendURL string `yaml:"backend_url"` // Backend service URL
Timeout time.Duration `yaml:"timeout"` // Request timeout
WebSocket bool `yaml:"websocket"` // Support WebSocket upgrades
}
// ClientConfig represents configuration for network clients
type ClientConfig struct {
AppName string `yaml:"app_name"`
@ -123,7 +181,6 @@ func (c *Config) ParseMultiaddrs() ([]multiaddr.Multiaddr, error) {
func DefaultConfig() *Config {
return &Config{
Node: NodeConfig{
Type: "node",
ListenAddresses: []string{
"/ip4/0.0.0.0/tcp/4001", // TCP only - compatible with Anyone proxy/SOCKS5
},
@ -140,7 +197,7 @@ func DefaultConfig() *Config {
// RQLite-specific configuration
RQLitePort: 5001,
RQLiteRaftPort: 7001,
RQLiteJoinAddress: "", // Empty for bootstrap node
RQLiteJoinAddress: "", // Empty for first node (creates cluster)
// Dynamic discovery (always enabled)
ClusterSyncInterval: 30 * time.Second,
@ -175,5 +232,18 @@ func DefaultConfig() *Config {
Level: "info",
Format: "console",
},
HTTPGateway: HTTPGatewayConfig{
Enabled: true,
ListenAddr: ":8080",
NodeName: "default",
Routes: make(map[string]RouteConfig),
ClientNamespace: "default",
RQLiteDSN: "http://localhost:5001",
OlricServers: []string{"localhost:3320"},
OlricTimeout: 10 * time.Second,
IPFSClusterAPIURL: "http://localhost:9094",
IPFSAPIURL: "http://localhost:5001",
IPFSTimeout: 60 * time.Second,
},
}
}

View File

@ -6,13 +6,13 @@ import (
"path/filepath"
)
// ConfigDir returns the path to the DeBros config directory (~/.debros).
// ConfigDir returns the path to the DeBros config directory (~/.orama).
func ConfigDir() (string, error) {
home, err := os.UserHomeDir()
if err != nil {
return "", fmt.Errorf("failed to determine home directory: %w", err)
}
return filepath.Join(home, ".debros"), nil
return filepath.Join(home, ".orama"), nil
}
// EnsureConfigDir creates the config directory if it does not exist.
@ -28,8 +28,8 @@ func EnsureConfigDir() (string, error) {
}
// DefaultPath returns the path to the config file for the given component name.
// component should be e.g., "node.yaml", "bootstrap.yaml", "gateway.yaml"
// It checks ~/.debros/data/, ~/.debros/configs/, and ~/.debros/ for backward compatibility.
// component should be e.g., "node.yaml", "gateway.yaml"
// It checks ~/.orama/data/, ~/.orama/configs/, and ~/.orama/ for backward compatibility.
// If component is already an absolute path, it returns it as-is.
func DefaultPath(component string) (string, error) {
// If component is already an absolute path, return it directly
@ -53,13 +53,13 @@ func DefaultPath(component string) (string, error) {
gatewayDefault = dataPath
}
// First check in ~/.debros/configs/ (production installer location)
// First check in ~/.orama/configs/ (production installer location)
configsPath := filepath.Join(dir, "configs", component)
if _, err := os.Stat(configsPath); err == nil {
return configsPath, nil
}
// Fallback to ~/.debros/ (legacy/development location)
// Fallback to ~/.orama/ (legacy/development location)
legacyPath := filepath.Join(dir, component)
if _, err := os.Stat(legacyPath); err == nil {
return legacyPath, nil

View File

@ -15,7 +15,7 @@ import (
// ValidationError represents a single validation error with context.
type ValidationError struct {
Path string // e.g., "discovery.bootstrap_peers[0]"
Path string // e.g., "discovery.bootstrap_peers[0]" or "discovery.peers[0]"
Message string // e.g., "invalid multiaddr"
Hint string // e.g., "expected /ip{4,6}/.../tcp/<port>/p2p/<peerID>"
}
@ -61,14 +61,6 @@ func (c *Config) validateNode() []error {
})
}
// Validate type
if nc.Type != "bootstrap" && nc.Type != "node" {
errs = append(errs, ValidationError{
Path: "node.type",
Message: fmt.Sprintf("must be one of [bootstrap node]; got %q", nc.Type),
})
}
// Validate listen_addresses
if len(nc.ListenAddresses) == 0 {
errs = append(errs, ValidationError{
@ -218,33 +210,15 @@ func (c *Config) validateDatabase() []error {
})
}
// Validate rqlite_join_address context-dependently
if c.Node.Type == "node" {
if dc.RQLiteJoinAddress == "" {
// Validate rqlite_join_address format if provided (optional for all nodes)
// The first node in a cluster won't have a join address; subsequent nodes will
if dc.RQLiteJoinAddress != "" {
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
errs = append(errs, ValidationError{
Path: "database.rqlite_join_address",
Message: "required for node type (non-bootstrap)",
Message: err.Error(),
Hint: "expected format: host:port",
})
} else {
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
errs = append(errs, ValidationError{
Path: "database.rqlite_join_address",
Message: err.Error(),
Hint: "expected format: host:port",
})
}
}
} else if c.Node.Type == "bootstrap" {
// Bootstrap nodes can optionally join another bootstrap's RQLite cluster
// This allows secondary bootstraps to synchronize with the primary
if dc.RQLiteJoinAddress != "" {
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
errs = append(errs, ValidationError{
Path: "database.rqlite_join_address",
Message: err.Error(),
Hint: "expected format: host:port",
})
}
}
}
@ -297,7 +271,7 @@ func (c *Config) validateDiscovery() []error {
})
}
// Validate bootstrap_port
// Validate peer discovery port
if disc.BootstrapPort < 1 || disc.BootstrapPort > 65535 {
errs = append(errs, ValidationError{
Path: "discovery.bootstrap_port",
@ -305,17 +279,8 @@ func (c *Config) validateDiscovery() []error {
})
}
// Validate bootstrap_peers context-dependently
if c.Node.Type == "node" {
if len(disc.BootstrapPeers) == 0 {
errs = append(errs, ValidationError{
Path: "discovery.bootstrap_peers",
Message: "required for node type (must not be empty)",
})
}
}
// Validate each bootstrap peer multiaddr
// Validate peer addresses (optional - all nodes are unified peers now)
// Validate each peer multiaddr
seenPeers := make(map[string]bool)
for i, peer := range disc.BootstrapPeers {
path := fmt.Sprintf("discovery.bootstrap_peers[%d]", i)
@ -363,7 +328,7 @@ func (c *Config) validateDiscovery() []error {
if seenPeers[peer] {
errs = append(errs, ValidationError{
Path: path,
Message: "duplicate bootstrap peer",
Message: "duplicate peer",
})
}
seenPeers[peer] = true
@ -486,22 +451,6 @@ func (c *Config) validateLogging() []error {
func (c *Config) validateCrossFields() []error {
var errs []error
// If node.type is invalid, don't run cross-checks
if c.Node.Type != "bootstrap" && c.Node.Type != "node" {
return errs
}
// Cross-check rqlite_join_address vs node type
// Note: Bootstrap nodes can optionally join another bootstrap's cluster
if c.Node.Type == "node" && c.Database.RQLiteJoinAddress == "" {
errs = append(errs, ValidationError{
Path: "database.rqlite_join_address",
Message: "required for non-bootstrap node type",
})
}
return errs
}

View File

@ -5,12 +5,11 @@ import (
"time"
)
// validConfigForType returns a valid config for the given node type
func validConfigForType(nodeType string) *Config {
// validConfigForNode returns a valid config
func validConfigForNode() *Config {
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
cfg := &Config{
Node: NodeConfig{
Type: nodeType,
ID: "test-node-id",
ListenAddresses: []string{"/ip4/0.0.0.0/tcp/4001"},
DataDir: ".",
@ -25,6 +24,7 @@ func validConfigForType(nodeType string) *Config {
RQLitePort: 5001,
RQLiteRaftPort: 7001,
MinClusterSize: 1,
RQLiteJoinAddress: "", // Optional - first node creates cluster, others join
},
Discovery: DiscoveryConfig{
BootstrapPeers: []string{validPeer},
@ -40,51 +40,9 @@ func validConfigForType(nodeType string) *Config {
},
}
// Set rqlite_join_address based on node type
if nodeType == "node" {
cfg.Database.RQLiteJoinAddress = "localhost:5001"
// Node type requires bootstrap peers
cfg.Discovery.BootstrapPeers = []string{validPeer}
} else {
// Bootstrap type: empty join address and peers optional
cfg.Database.RQLiteJoinAddress = ""
cfg.Discovery.BootstrapPeers = []string{}
}
return cfg
}
func TestValidateNodeType(t *testing.T) {
tests := []struct {
name string
nodeType string
shouldError bool
}{
{"bootstrap", "bootstrap", false},
{"node", "node", false},
{"invalid", "invalid-type", true},
{"empty", "", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType("bootstrap") // Start with valid bootstrap
if tt.nodeType == "node" {
cfg = validConfigForType("node")
} else {
cfg.Node.Type = tt.nodeType
}
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
t.Errorf("expected error, got none")
}
if !tt.shouldError && len(errs) > 0 {
t.Errorf("unexpected errors: %v", errs)
}
})
}
}
func TestValidateListenAddresses(t *testing.T) {
tests := []struct {
name string
@ -102,7 +60,7 @@ func TestValidateListenAddresses(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType("node")
cfg := validConfigForNode()
cfg.Node.ListenAddresses = tt.addresses
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
@ -130,7 +88,7 @@ func TestValidateReplicationFactor(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType("node")
cfg := validConfigForNode()
cfg.Database.ReplicationFactor = tt.replication
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
@ -160,7 +118,7 @@ func TestValidateRQLitePorts(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType("node")
cfg := validConfigForNode()
cfg.Database.RQLitePort = tt.httpPort
cfg.Database.RQLiteRaftPort = tt.raftPort
errs := cfg.Validate()
@ -177,21 +135,18 @@ func TestValidateRQLitePorts(t *testing.T) {
func TestValidateRQLiteJoinAddress(t *testing.T) {
tests := []struct {
name string
nodeType string
joinAddr string
shouldError bool
}{
{"node with join", "node", "localhost:5001", false},
{"node without join", "node", "", true},
{"bootstrap with join", "bootstrap", "localhost:5001", false},
{"bootstrap without join", "bootstrap", "", false},
{"invalid join format", "node", "localhost", true},
{"invalid join port", "node", "localhost:99999", true},
{"node with join", "localhost:5001", false},
{"node without join", "", false}, // Join address is optional (first node creates cluster)
{"invalid join format", "localhost", true},
{"invalid join port", "localhost:99999", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType(tt.nodeType)
cfg := validConfigForNode()
cfg.Database.RQLiteJoinAddress = tt.joinAddr
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
@ -204,27 +159,24 @@ func TestValidateRQLiteJoinAddress(t *testing.T) {
}
}
func TestValidateBootstrapPeers(t *testing.T) {
func TestValidatePeerAddresses(t *testing.T) {
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
tests := []struct {
name string
nodeType string
peers []string
shouldError bool
}{
{"node with peer", "node", []string{validPeer}, false},
{"node without peer", "node", []string{}, true},
{"bootstrap with peer", "bootstrap", []string{validPeer}, false},
{"bootstrap without peer", "bootstrap", []string{}, false},
{"invalid multiaddr", "node", []string{"invalid"}, true},
{"missing p2p", "node", []string{"/ip4/127.0.0.1/tcp/4001"}, true},
{"duplicate peer", "node", []string{validPeer, validPeer}, true},
{"invalid port", "node", []string{"/ip4/127.0.0.1/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true},
{"node with peer", []string{validPeer}, false},
{"node without peer", []string{}, false}, // All nodes are unified peers - bootstrap peers optional
{"invalid multiaddr", []string{"invalid"}, true},
{"missing p2p", []string{"/ip4/127.0.0.1/tcp/4001"}, true},
{"duplicate peer", []string{validPeer, validPeer}, true},
{"invalid port", []string{"/ip4/127.0.0.1/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType(tt.nodeType)
cfg := validConfigForNode()
cfg.Discovery.BootstrapPeers = tt.peers
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
@ -253,7 +205,7 @@ func TestValidateLoggingLevel(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType("node")
cfg := validConfigForNode()
cfg.Logging.Level = tt.level
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
@ -280,7 +232,7 @@ func TestValidateLoggingFormat(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType("node")
cfg := validConfigForNode()
cfg.Logging.Format = tt.format
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
@ -307,7 +259,7 @@ func TestValidateMaxConnections(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType("node")
cfg := validConfigForNode()
cfg.Node.MaxConnections = tt.maxConn
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
@ -334,7 +286,7 @@ func TestValidateDiscoveryInterval(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType("node")
cfg := validConfigForNode()
cfg.Discovery.DiscoveryInterval = tt.interval
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
@ -347,7 +299,7 @@ func TestValidateDiscoveryInterval(t *testing.T) {
}
}
func TestValidateBootstrapPort(t *testing.T) {
func TestValidatePeerDiscoveryPort(t *testing.T) {
tests := []struct {
name string
port int
@ -361,7 +313,7 @@ func TestValidateBootstrapPort(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := validConfigForType("node")
cfg := validConfigForNode()
cfg.Discovery.BootstrapPort = tt.port
errs := cfg.Validate()
if tt.shouldError && len(errs) == 0 {
@ -378,7 +330,6 @@ func TestValidateCompleteConfig(t *testing.T) {
// Test a complete valid config
validCfg := &Config{
Node: NodeConfig{
Type: "node",
ID: "node1",
ListenAddresses: []string{"/ip4/0.0.0.0/tcp/4002"},
DataDir: ".",

View File

@ -214,7 +214,7 @@ func (d *Manager) Stop() {
}
// discoverPeers discovers and connects to new peers using non-DHT strategies:
// - Peerstore entries (bootstrap peers added to peerstore by the caller)
// - Peerstore entries (peers added to peerstore by the caller)
// - Peer exchange: query currently connected peers' peerstore entries
func (d *Manager) discoverPeers(ctx context.Context, config Config) {
connectedPeers := d.host.Network().Peers()
@ -242,7 +242,7 @@ func (d *Manager) discoverPeers(ctx context.Context, config Config) {
}
// discoverViaPeerstore attempts to connect to peers found in the host's peerstore.
// This is useful for bootstrap peers that have been pre-populated into the peerstore.
// This is useful for peers that have been pre-populated into the peerstore.
func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int) int {
if maxConnections <= 0 {
return 0
@ -271,7 +271,7 @@ func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int)
}
// Filter peers to only include those with addresses on our port (4001)
// This prevents attempting to connect to IPFS (port 4101) or IPFS Cluster (port 9096)
// This prevents attempting to connect to IPFS (port 4101) or IPFS Cluster (port 9096/9098)
peerInfo := d.host.Peerstore().PeerInfo(pid)
hasValidPort := false
for _, addr := range peerInfo.Addrs {

View File

@ -9,7 +9,7 @@ type RQLiteNodeMetadata struct {
NodeID string `json:"node_id"` // RQLite node ID (from config)
RaftAddress string `json:"raft_address"` // Raft port address (e.g., "51.83.128.181:7001")
HTTPAddress string `json:"http_address"` // HTTP API address (e.g., "51.83.128.181:5001")
NodeType string `json:"node_type"` // "bootstrap" or "node"
NodeType string `json:"node_type"` // Node type identifier
RaftLogIndex uint64 `json:"raft_log_index"` // Current Raft log index (for data comparison)
LastSeen time.Time `json:"last_seen"` // Updated on every announcement
ClusterVersion string `json:"cluster_version"` // For compatibility checking

View File

@ -17,7 +17,8 @@ func TestPortChecker(t *testing.T) {
}
// Check that required port counts match expectations
expectedPortCount := 44 // Based on RequiredPorts
// 5 nodes × 9 ports per node + 4 shared ports = 49
expectedPortCount := 49 // Based on RequiredPorts
if len(checker.ports) != expectedPortCount {
t.Errorf("Expected %d ports, got %d", expectedPortCount, len(checker.ports))
}

View File

@ -14,24 +14,24 @@ import (
// ConfigEnsurer handles all config file creation and validation
type ConfigEnsurer struct {
debrosDir string
oramaDir string
}
// NewConfigEnsurer creates a new config ensurer
func NewConfigEnsurer(debrosDir string) *ConfigEnsurer {
func NewConfigEnsurer(oramaDir string) *ConfigEnsurer {
return &ConfigEnsurer{
debrosDir: debrosDir,
oramaDir: oramaDir,
}
}
// EnsureAll ensures all necessary config files and secrets exist
func (ce *ConfigEnsurer) EnsureAll() error {
// Create directories
if err := os.MkdirAll(ce.debrosDir, 0755); err != nil {
return fmt.Errorf("failed to create .debros directory: %w", err)
if err := os.MkdirAll(ce.oramaDir, 0755); err != nil {
return fmt.Errorf("failed to create .orama directory: %w", err)
}
if err := os.MkdirAll(filepath.Join(ce.debrosDir, "logs"), 0755); err != nil {
if err := os.MkdirAll(filepath.Join(ce.oramaDir, "logs"), 0755); err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}
@ -43,27 +43,27 @@ func (ce *ConfigEnsurer) EnsureAll() error {
// Load topology
topology := DefaultTopology()
// Generate identities for all bootstrap nodes and collect multiaddrs
bootstrapAddrs := []string{}
for _, nodeSpec := range topology.GetBootstrapNodes() {
// Generate identities for first two nodes and collect their multiaddrs as peer addresses
// All nodes use these addresses for initial peer discovery
peerAddrs := []string{}
for i := 0; i < 2 && i < len(topology.Nodes); i++ {
nodeSpec := topology.Nodes[i]
addr, err := ce.ensureNodeIdentity(nodeSpec)
if err != nil {
return fmt.Errorf("failed to ensure identity for %s: %w", nodeSpec.Name, err)
}
bootstrapAddrs = append(bootstrapAddrs, addr)
peerAddrs = append(peerAddrs, addr)
}
// Ensure configs for all bootstrap and regular nodes
// Ensure configs for all nodes
for _, nodeSpec := range topology.Nodes {
if err := ce.ensureNodeConfig(nodeSpec, bootstrapAddrs); err != nil {
if err := ce.ensureNodeConfig(nodeSpec, peerAddrs); err != nil {
return fmt.Errorf("failed to ensure config for %s: %w", nodeSpec.Name, err)
}
}
// Ensure gateway config
if err := ce.ensureGateway(bootstrapAddrs); err != nil {
return fmt.Errorf("failed to ensure gateway: %w", err)
}
// Gateway configuration is now embedded in each node's config
// No separate gateway.yaml needed anymore
// Ensure Olric config
if err := ce.ensureOlric(); err != nil {
@ -75,7 +75,7 @@ func (ce *ConfigEnsurer) EnsureAll() error {
// ensureSharedSecrets creates cluster secret and swarm key if they don't exist
func (ce *ConfigEnsurer) ensureSharedSecrets() error {
secretPath := filepath.Join(ce.debrosDir, "cluster-secret")
secretPath := filepath.Join(ce.oramaDir, "cluster-secret")
if _, err := os.Stat(secretPath); os.IsNotExist(err) {
secret := generateRandomHex(64) // 64 hex chars = 32 bytes
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
@ -84,7 +84,7 @@ func (ce *ConfigEnsurer) ensureSharedSecrets() error {
fmt.Printf("✓ Generated cluster secret\n")
}
swarmKeyPath := filepath.Join(ce.debrosDir, "swarm.key")
swarmKeyPath := filepath.Join(ce.oramaDir, "swarm.key")
if _, err := os.Stat(swarmKeyPath); os.IsNotExist(err) {
keyHex := strings.ToUpper(generateRandomHex(64))
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
@ -99,7 +99,7 @@ func (ce *ConfigEnsurer) ensureSharedSecrets() error {
// ensureNodeIdentity creates or loads a node identity and returns its multiaddr
func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
nodeDir := filepath.Join(ce.oramaDir, nodeSpec.DataDir)
identityPath := filepath.Join(nodeDir, "identity.key")
// Create identity if missing
@ -133,105 +133,56 @@ func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
}
// ensureNodeConfig creates or updates a node configuration
func (ce *ConfigEnsurer) ensureNodeConfig(nodeSpec NodeSpec, bootstrapAddrs []string) error {
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
configPath := filepath.Join(ce.debrosDir, nodeSpec.ConfigFilename)
func (ce *ConfigEnsurer) ensureNodeConfig(nodeSpec NodeSpec, peerAddrs []string) error {
nodeDir := filepath.Join(ce.oramaDir, nodeSpec.DataDir)
configPath := filepath.Join(ce.oramaDir, nodeSpec.ConfigFilename)
if err := os.MkdirAll(nodeDir, 0755); err != nil {
return fmt.Errorf("failed to create node directory: %w", err)
}
if nodeSpec.Role == "bootstrap" {
// Generate bootstrap config
data := templates.BootstrapConfigData{
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
BootstrapPeers: bootstrapAddrs,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
}
config, err := templates.RenderBootstrapConfig(data)
if err != nil {
return fmt.Errorf("failed to render bootstrap config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write bootstrap config: %w", err)
}
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
} else {
// Generate regular node config
data := templates.NodeConfigData{
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
BootstrapPeers: bootstrapAddrs,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
}
config, err := templates.RenderNodeConfig(data)
if err != nil {
return fmt.Errorf("failed to render node config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write node config: %w", err)
}
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
// Generate node config (all nodes are unified)
data := templates.NodeConfigData{
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
BootstrapPeers: peerAddrs,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
UnifiedGatewayPort: nodeSpec.UnifiedGatewayPort,
}
return nil
}
// ensureGateway creates gateway config
func (ce *ConfigEnsurer) ensureGateway(bootstrapAddrs []string) error {
configPath := filepath.Join(ce.debrosDir, "gateway.yaml")
// Get first bootstrap's cluster API port for default
topology := DefaultTopology()
firstBootstrap := topology.GetBootstrapNodes()[0]
data := templates.GatewayConfigData{
ListenPort: topology.GatewayPort,
BootstrapPeers: bootstrapAddrs,
OlricServers: []string{fmt.Sprintf("127.0.0.1:%d", topology.OlricHTTPPort)},
ClusterAPIPort: firstBootstrap.ClusterAPIPort,
IPFSAPIPort: firstBootstrap.IPFSAPIPort,
}
config, err := templates.RenderGatewayConfig(data)
config, err := templates.RenderNodeConfig(data)
if err != nil {
return fmt.Errorf("failed to render gateway config: %w", err)
return fmt.Errorf("failed to render node config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write gateway config: %w", err)
return fmt.Errorf("failed to write node config: %w", err)
}
fmt.Printf("✓ Generated gateway.yaml\n")
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
return nil
}
// Gateway configuration is now embedded in each node's config
// ensureGateway is no longer needed - each node runs its own embedded gateway
// ensureOlric creates Olric config
func (ce *ConfigEnsurer) ensureOlric() error {
configPath := filepath.Join(ce.debrosDir, "olric-config.yaml")
configPath := filepath.Join(ce.oramaDir, "olric-config.yaml")
topology := DefaultTopology()
data := templates.OlricConfigData{
BindAddr: "127.0.0.1",
HTTPPort: topology.OlricHTTPPort,
MemberlistPort: topology.OlricMemberPort,
ServerBindAddr: "127.0.0.1",
HTTPPort: topology.OlricHTTPPort,
MemberlistBindAddr: "127.0.0.1", // localhost for development
MemberlistPort: topology.OlricMemberPort,
MemberlistEnvironment: "local", // development environment
}
config, err := templates.RenderOlricConfig(data)

View File

@ -9,6 +9,8 @@ import (
"os/exec"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/tlsutil"
)
// HealthCheckResult represents the result of a health check
@ -79,7 +81,7 @@ func (pm *ProcessManager) checkRQLiteNode(ctx context.Context, name string, http
result := HealthCheckResult{Name: fmt.Sprintf("RQLite-%s", name)}
urlStr := fmt.Sprintf("http://localhost:%d/status", httpPort)
client := &http.Client{Timeout: 2 * time.Second}
client := tlsutil.NewHTTPClient(2 * time.Second)
resp, err := client.Get(urlStr)
if err != nil {
result.Details = fmt.Sprintf("connection failed: %v", err)
@ -164,43 +166,42 @@ func (pm *ProcessManager) LibP2PHealthCheck(ctx context.Context) HealthCheckResu
// HealthCheckWithRetry performs a health check with retry logic
func (pm *ProcessManager) HealthCheckWithRetry(ctx context.Context, nodes []ipfsNodeInfo, retries int, retryInterval time.Duration, timeout time.Duration) bool {
fmt.Fprintf(pm.logWriter, "\n⚕️ Validating cluster health...\n")
fmt.Fprintf(pm.logWriter, "⚕️ Validating cluster health...")
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
spinnerFrames := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
spinnerIndex := 0
for attempt := 1; attempt <= retries; attempt++ {
// Perform all checks
ipfsResult := pm.IPFSHealthCheck(deadlineCtx, nodes)
rqliteResult := pm.RQLiteHealthCheck(deadlineCtx)
libp2pResult := pm.LibP2PHealthCheck(deadlineCtx)
// Log results
if attempt == 1 || attempt == retries || (attempt%3 == 0) {
fmt.Fprintf(pm.logWriter, " Attempt %d/%d:\n", attempt, retries)
pm.logHealthCheckResult(pm.logWriter, " ", ipfsResult)
pm.logHealthCheckResult(pm.logWriter, " ", rqliteResult)
pm.logHealthCheckResult(pm.logWriter, " ", libp2pResult)
}
// All checks must pass
if ipfsResult.Healthy && rqliteResult.Healthy && libp2pResult.Healthy {
fmt.Fprintf(pm.logWriter, "\n✓ All health checks passed!\n")
fmt.Fprintf(pm.logWriter, "\r✓ Cluster health validated\n")
return true
}
// Show spinner progress
fmt.Fprintf(pm.logWriter, "\r%s Validating cluster health... (%d/%d)", spinnerFrames[spinnerIndex%len(spinnerFrames)], attempt, retries)
spinnerIndex++
if attempt < retries {
select {
case <-time.After(retryInterval):
continue
case <-deadlineCtx.Done():
fmt.Fprintf(pm.logWriter, "\n❌ Health check timeout reached\n")
fmt.Fprintf(pm.logWriter, "\r❌ Health check timeout reached\n")
return false
}
}
}
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed after %d attempts\n", retries)
fmt.Fprintf(pm.logWriter, "\r❌ Health checks failed - services not ready\n")
return false
}

View File

@ -15,11 +15,13 @@ import (
"strings"
"sync"
"time"
"github.com/DeBrosOfficial/network/pkg/tlsutil"
)
// ProcessManager manages all dev environment processes
type ProcessManager struct {
debrosDir string
oramaDir string
pidsDir string
processes map[string]*ManagedProcess
mutex sync.Mutex
@ -35,12 +37,12 @@ type ManagedProcess struct {
}
// NewProcessManager creates a new process manager
func NewProcessManager(debrosDir string, logWriter io.Writer) *ProcessManager {
pidsDir := filepath.Join(debrosDir, ".pids")
func NewProcessManager(oramaDir string, logWriter io.Writer) *ProcessManager {
pidsDir := filepath.Join(oramaDir, ".pids")
os.MkdirAll(pidsDir, 0755)
return &ProcessManager{
debrosDir: debrosDir,
oramaDir: oramaDir,
pidsDir: pidsDir,
processes: make(map[string]*ManagedProcess),
logWriter: logWriter,
@ -49,7 +51,8 @@ func NewProcessManager(debrosDir string, logWriter io.Writer) *ProcessManager {
// StartAll starts all development services
func (pm *ProcessManager) StartAll(ctx context.Context) error {
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n\n")
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n")
fmt.Fprintf(pm.logWriter, "═══════════════════════════════════════\n\n")
topology := DefaultTopology()
@ -66,7 +69,7 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
{"Olric", pm.startOlric},
{"Anon", pm.startAnon},
{"Nodes (Network)", pm.startNodes},
{"Gateway", pm.startGateway},
// Gateway is now per-node (embedded in each node) - no separate main gateway needed
}
for _, svc := range services {
@ -76,6 +79,8 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
}
}
fmt.Fprintf(pm.logWriter, "\n")
// Run health checks with retries before declaring success
const (
healthCheckRetries = 20
@ -84,15 +89,45 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
)
if !pm.HealthCheckWithRetry(ctx, ipfsNodes, healthCheckRetries, healthCheckInterval, healthCheckTimeout) {
fmt.Fprintf(pm.logWriter, "\n❌ Development environment failed health checks - stopping all services\n")
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed - stopping all services\n")
pm.StopAll(ctx)
return fmt.Errorf("cluster health checks failed - services stopped")
}
fmt.Fprintf(pm.logWriter, "\n✅ Development environment started!\n\n")
// Print success and key endpoints
pm.printStartupSummary(topology)
return nil
}
// printStartupSummary prints the final startup summary with key endpoints
func (pm *ProcessManager) printStartupSummary(topology *Topology) {
fmt.Fprintf(pm.logWriter, "\n✅ Development environment ready!\n")
fmt.Fprintf(pm.logWriter, "═══════════════════════════════════════\n\n")
fmt.Fprintf(pm.logWriter, "📡 Access your nodes via unified gateway ports:\n\n")
for _, node := range topology.Nodes {
fmt.Fprintf(pm.logWriter, " %s:\n", node.Name)
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/health\n", node.UnifiedGatewayPort)
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/rqlite/http/db/execute\n", node.UnifiedGatewayPort)
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/cluster/health\n\n", node.UnifiedGatewayPort)
}
fmt.Fprintf(pm.logWriter, "🌐 Main Gateway:\n")
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/v1/status\n\n", topology.GatewayPort)
fmt.Fprintf(pm.logWriter, "📊 Other Services:\n")
fmt.Fprintf(pm.logWriter, " Olric: http://localhost:%d\n", topology.OlricHTTPPort)
fmt.Fprintf(pm.logWriter, " Anon SOCKS: 127.0.0.1:%d\n\n", topology.AnonSOCKSPort)
fmt.Fprintf(pm.logWriter, "📝 Useful Commands:\n")
fmt.Fprintf(pm.logWriter, " ./bin/orama dev status - Check service status\n")
fmt.Fprintf(pm.logWriter, " ./bin/orama dev logs node-1 - View logs\n")
fmt.Fprintf(pm.logWriter, " ./bin/orama dev down - Stop all services\n\n")
fmt.Fprintf(pm.logWriter, "📂 Logs: %s/logs\n", pm.oramaDir)
fmt.Fprintf(pm.logWriter, "⚙️ Config: %s\n\n", pm.oramaDir)
}
// StopAll stops all running processes
func (pm *ProcessManager) StopAll(ctx context.Context) error {
fmt.Fprintf(pm.logWriter, "\n🛑 Stopping development environment...\n\n")
@ -204,10 +239,10 @@ func (pm *ProcessManager) Status(ctx context.Context) {
fmt.Fprintf(pm.logWriter, " %-25s %s (%s)\n", svc.name, status, portStr)
}
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.debrosDir)
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node2.yaml", "node3.yaml", "node4.yaml", "gateway.yaml", "olric-config.yaml"}
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.oramaDir)
configFiles := []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml", "olric-config.yaml"}
for _, f := range configFiles {
path := filepath.Join(pm.debrosDir, f)
path := filepath.Join(pm.oramaDir, f)
if _, err := os.Stat(path); err == nil {
fmt.Fprintf(pm.logWriter, " ✓ %s\n", f)
} else {
@ -215,7 +250,7 @@ func (pm *ProcessManager) Status(ctx context.Context) {
}
}
fmt.Fprintf(pm.logWriter, "\nLogs directory: %s/logs\n\n", pm.debrosDir)
fmt.Fprintf(pm.logWriter, "\nLogs directory: %s/logs\n\n", pm.oramaDir)
}
// Helper functions for starting individual services
@ -226,7 +261,7 @@ func (pm *ProcessManager) buildIPFSNodes(topology *Topology) []ipfsNodeInfo {
for _, nodeSpec := range topology.Nodes {
nodes = append(nodes, ipfsNodeInfo{
name: nodeSpec.Name,
ipfsPath: filepath.Join(pm.debrosDir, nodeSpec.DataDir, "ipfs/repo"),
ipfsPath: filepath.Join(pm.oramaDir, nodeSpec.DataDir, "ipfs/repo"),
apiPort: nodeSpec.IPFSAPIPort,
swarmPort: nodeSpec.IPFSSwarmPort,
gatewayPort: nodeSpec.IPFSGatewayPort,
@ -236,11 +271,11 @@ func (pm *ProcessManager) buildIPFSNodes(topology *Topology) []ipfsNodeInfo {
return nodes
}
// startNodes starts all network nodes (bootstraps and regular)
// startNodes starts all network nodes
func (pm *ProcessManager) startNodes(ctx context.Context) error {
topology := DefaultTopology()
for _, nodeSpec := range topology.Nodes {
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("%s.log", nodeSpec.Name))
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("%s.log", nodeSpec.Name))
if err := pm.startNode(nodeSpec.Name, nodeSpec.ConfigFilename, logPath); err != nil {
return fmt.Errorf("failed to start %s: %w", nodeSpec.Name, err)
}
@ -448,7 +483,7 @@ func (pm *ProcessManager) waitIPFSReady(ctx context.Context, node ipfsNodeInfo)
// ipfsHTTPCall makes an HTTP call to IPFS API
func (pm *ProcessManager) ipfsHTTPCall(ctx context.Context, urlStr string, method string) error {
client := &http.Client{Timeout: 5 * time.Second}
client := tlsutil.NewHTTPClient(5 * time.Second)
req, err := http.NewRequestWithContext(ctx, method, urlStr, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
@ -485,7 +520,7 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
}
// Copy swarm key
swarmKeyPath := filepath.Join(pm.debrosDir, "swarm.key")
swarmKeyPath := filepath.Join(pm.oramaDir, "swarm.key")
if data, err := os.ReadFile(swarmKeyPath); err == nil {
os.WriteFile(filepath.Join(nodes[i].ipfsPath, "swarm.key"), data, 0600)
}
@ -505,7 +540,7 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
// Phase 2: Start all IPFS daemons
for i := range nodes {
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-%s.pid", nodes[i].name))
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-%s.log", nodes[i].name))
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-%s.log", nodes[i].name))
cmd := exec.CommandContext(ctx, "ipfs", "daemon", "--enable-pubsub-experiment", "--repo-dir="+nodes[i].ipfsPath)
logFile, _ := os.Create(logPath)
@ -556,7 +591,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
ipfsPort int
}{
nodeSpec.Name,
filepath.Join(pm.debrosDir, nodeSpec.DataDir, "ipfs-cluster"),
filepath.Join(pm.oramaDir, nodeSpec.DataDir, "ipfs-cluster"),
nodeSpec.ClusterAPIPort,
nodeSpec.ClusterPort,
nodeSpec.IPFSAPIPort,
@ -573,7 +608,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
}
// Read cluster secret to ensure all nodes use the same PSK
secretPath := filepath.Join(pm.debrosDir, "cluster-secret")
secretPath := filepath.Join(pm.oramaDir, "cluster-secret")
clusterSecret, err := os.ReadFile(secretPath)
if err != nil {
return fmt.Errorf("failed to read cluster secret: %w", err)
@ -622,7 +657,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
// Start bootstrap cluster service
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
cmd = exec.CommandContext(ctx, "ipfs-cluster-service", "daemon")
cmd.Env = append(os.Environ(), fmt.Sprintf("IPFS_CLUSTER_PATH=%s", node.clusterPath))
@ -696,7 +731,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
// Start follower cluster service with bootstrap flag
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
args := []string{"daemon"}
if bootstrapMultiaddr != "" {
@ -943,8 +978,8 @@ func (pm *ProcessManager) ensureIPFSClusterPorts(clusterPath string, restAPIPort
func (pm *ProcessManager) startOlric(ctx context.Context) error {
pidPath := filepath.Join(pm.pidsDir, "olric.pid")
logPath := filepath.Join(pm.debrosDir, "logs", "olric.log")
configPath := filepath.Join(pm.debrosDir, "olric-config.yaml")
logPath := filepath.Join(pm.oramaDir, "logs", "olric.log")
configPath := filepath.Join(pm.oramaDir, "olric-config.yaml")
cmd := exec.CommandContext(ctx, "olric-server")
cmd.Env = append(os.Environ(), fmt.Sprintf("OLRIC_SERVER_CONFIG=%s", configPath))
@ -969,7 +1004,7 @@ func (pm *ProcessManager) startAnon(ctx context.Context) error {
}
pidPath := filepath.Join(pm.pidsDir, "anon.pid")
logPath := filepath.Join(pm.debrosDir, "logs", "anon.log")
logPath := filepath.Join(pm.oramaDir, "logs", "anon.log")
cmd := exec.CommandContext(ctx, "npx", "anyone-client")
logFile, _ := os.Create(logPath)
@ -989,7 +1024,7 @@ func (pm *ProcessManager) startAnon(ctx context.Context) error {
func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
cmd := exec.Command("./bin/node", "--config", configFile)
cmd := exec.Command("./bin/orama-node", "--config", configFile)
logFile, _ := os.Create(logPath)
cmd.Stdout = logFile
cmd.Stderr = logFile
@ -1007,7 +1042,7 @@ func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
func (pm *ProcessManager) startGateway(ctx context.Context) error {
pidPath := filepath.Join(pm.pidsDir, "gateway.pid")
logPath := filepath.Join(pm.debrosDir, "logs", "gateway.log")
logPath := filepath.Join(pm.oramaDir, "logs", "gateway.log")
cmd := exec.Command("./bin/gateway", "--config", "gateway.yaml")
logFile, _ := os.Create(logPath)

View File

@ -4,10 +4,9 @@ import "fmt"
// NodeSpec defines configuration for a single dev environment node
type NodeSpec struct {
Name string // bootstrap, bootstrap2, node2, node3, node4
Role string // "bootstrap" or "node"
ConfigFilename string // bootstrap.yaml, bootstrap2.yaml, node2.yaml, etc.
DataDir string // relative path from .debros root
Name string // node-1, node-2, node-3, node-4, node-5
ConfigFilename string // node-1.yaml, node-2.yaml, etc.
DataDir string // relative path from .orama root
P2PPort int // LibP2P listen port
IPFSAPIPort int // IPFS API port
IPFSSwarmPort int // IPFS Swarm port
@ -16,8 +15,9 @@ type NodeSpec struct {
RQLiteRaftPort int // RQLite Raft consensus port
ClusterAPIPort int // IPFS Cluster REST API port
ClusterPort int // IPFS Cluster P2P port
RQLiteJoinTarget string // which bootstrap RQLite port to join (leave empty for bootstraps that lead)
ClusterJoinTarget string // which bootstrap cluster to join (leave empty for bootstrap that leads)
UnifiedGatewayPort int // Unified gateway port (proxies all services)
RQLiteJoinTarget string // which node's RQLite Raft port to join (empty for first node)
ClusterJoinTarget string // which node's cluster to join (empty for first node)
}
// Topology defines the complete development environment topology
@ -33,88 +33,88 @@ type Topology struct {
func DefaultTopology() *Topology {
return &Topology{
Nodes: []NodeSpec{
{
Name: "bootstrap",
Role: "bootstrap",
ConfigFilename: "bootstrap.yaml",
DataDir: "bootstrap",
P2PPort: 4001,
IPFSAPIPort: 4501,
IPFSSwarmPort: 4101,
IPFSGatewayPort: 7501,
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
ClusterPort: 9096,
RQLiteJoinTarget: "",
ClusterJoinTarget: "",
},
{
Name: "bootstrap2",
Role: "bootstrap",
ConfigFilename: "bootstrap2.yaml",
DataDir: "bootstrap2",
P2PPort: 4011,
IPFSAPIPort: 4511,
IPFSSwarmPort: 4111,
IPFSGatewayPort: 7511,
RQLiteHTTPPort: 5011,
RQLiteRaftPort: 7011,
ClusterAPIPort: 9104,
ClusterPort: 9106,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node2",
Role: "node",
ConfigFilename: "node2.yaml",
DataDir: "node2",
P2PPort: 4002,
IPFSAPIPort: 4502,
IPFSSwarmPort: 4102,
IPFSGatewayPort: 7502,
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
ClusterAPIPort: 9114,
ClusterPort: 9116,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node3",
Role: "node",
ConfigFilename: "node3.yaml",
DataDir: "node3",
P2PPort: 4003,
IPFSAPIPort: 4503,
IPFSSwarmPort: 4103,
IPFSGatewayPort: 7503,
RQLiteHTTPPort: 5003,
RQLiteRaftPort: 7003,
ClusterAPIPort: 9124,
ClusterPort: 9126,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node4",
Role: "node",
ConfigFilename: "node4.yaml",
DataDir: "node4",
P2PPort: 4004,
IPFSAPIPort: 4504,
IPFSSwarmPort: 4104,
IPFSGatewayPort: 7504,
RQLiteHTTPPort: 5004,
RQLiteRaftPort: 7004,
ClusterAPIPort: 9134,
ClusterPort: 9136,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node-1",
ConfigFilename: "node-1.yaml",
DataDir: "node-1",
P2PPort: 4001,
IPFSAPIPort: 4501,
IPFSSwarmPort: 4101,
IPFSGatewayPort: 7501,
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
ClusterPort: 9096,
UnifiedGatewayPort: 6001,
RQLiteJoinTarget: "", // First node - creates cluster
ClusterJoinTarget: "",
},
GatewayPort: 6001,
{
Name: "node-2",
ConfigFilename: "node-2.yaml",
DataDir: "node-2",
P2PPort: 4011,
IPFSAPIPort: 4511,
IPFSSwarmPort: 4111,
IPFSGatewayPort: 7511,
RQLiteHTTPPort: 5011,
RQLiteRaftPort: 7011,
ClusterAPIPort: 9104,
ClusterPort: 9106,
UnifiedGatewayPort: 6002,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node-3",
ConfigFilename: "node-3.yaml",
DataDir: "node-3",
P2PPort: 4002,
IPFSAPIPort: 4502,
IPFSSwarmPort: 4102,
IPFSGatewayPort: 7502,
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
ClusterAPIPort: 9114,
ClusterPort: 9116,
UnifiedGatewayPort: 6003,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node-4",
ConfigFilename: "node-4.yaml",
DataDir: "node-4",
P2PPort: 4003,
IPFSAPIPort: 4503,
IPFSSwarmPort: 4103,
IPFSGatewayPort: 7503,
RQLiteHTTPPort: 5003,
RQLiteRaftPort: 7003,
ClusterAPIPort: 9124,
ClusterPort: 9126,
UnifiedGatewayPort: 6004,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node-5",
ConfigFilename: "node-5.yaml",
DataDir: "node-5",
P2PPort: 4004,
IPFSAPIPort: 4504,
IPFSSwarmPort: 4104,
IPFSGatewayPort: 7504,
RQLiteHTTPPort: 5004,
RQLiteRaftPort: 7004,
ClusterAPIPort: 9134,
ClusterPort: 9136,
UnifiedGatewayPort: 6005,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
},
GatewayPort: 6000, // Main gateway on 6000 (nodes use 6001-6005)
OlricHTTPPort: 3320,
OlricMemberPort: 3322,
AnonSOCKSPort: 9050,
@ -136,6 +136,7 @@ func (t *Topology) AllPorts() []int {
node.RQLiteRaftPort,
node.ClusterAPIPort,
node.ClusterPort,
node.UnifiedGatewayPort,
)
}
@ -163,6 +164,7 @@ func (t *Topology) PortMap() map[int]string {
portMap[node.RQLiteRaftPort] = fmt.Sprintf("%s RQLite Raft", node.Name)
portMap[node.ClusterAPIPort] = fmt.Sprintf("%s IPFS Cluster API", node.Name)
portMap[node.ClusterPort] = fmt.Sprintf("%s IPFS Cluster P2P", node.Name)
portMap[node.UnifiedGatewayPort] = fmt.Sprintf("%s Unified Gateway", node.Name)
}
portMap[t.GatewayPort] = "Gateway"
@ -173,26 +175,20 @@ func (t *Topology) PortMap() map[int]string {
return portMap
}
// GetBootstrapNodes returns only the bootstrap nodes
func (t *Topology) GetBootstrapNodes() []NodeSpec {
var bootstraps []NodeSpec
for _, node := range t.Nodes {
if node.Role == "bootstrap" {
bootstraps = append(bootstraps, node)
}
// GetFirstNode returns the first node (the one that creates the cluster)
func (t *Topology) GetFirstNode() *NodeSpec {
if len(t.Nodes) > 0 {
return &t.Nodes[0]
}
return bootstraps
return nil
}
// GetRegularNodes returns only the regular (non-bootstrap) nodes
func (t *Topology) GetRegularNodes() []NodeSpec {
var regulars []NodeSpec
for _, node := range t.Nodes {
if node.Role == "node" {
regulars = append(regulars, node)
}
// GetJoiningNodes returns all nodes except the first one (they join the cluster)
func (t *Topology) GetJoiningNodes() []NodeSpec {
if len(t.Nodes) > 1 {
return t.Nodes[1:]
}
return regulars
return nil
}
// GetNodeByName returns a node by its name, or nil if not found

View File

@ -20,17 +20,17 @@ import (
// ConfigGenerator manages generation of node, gateway, and service configs
type ConfigGenerator struct {
debrosDir string
oramaDir string
}
// NewConfigGenerator creates a new config generator
func NewConfigGenerator(debrosDir string) *ConfigGenerator {
func NewConfigGenerator(oramaDir string) *ConfigGenerator {
return &ConfigGenerator{
debrosDir: debrosDir,
oramaDir: oramaDir,
}
}
// extractIPFromMultiaddr extracts the IP address from a bootstrap peer multiaddr
// extractIPFromMultiaddr extracts the IP address from a peer multiaddr
// Supports IP4, IP6, DNS4, DNS6, and DNSADDR protocols
// Returns the IP address as a string, or empty string if extraction/resolution fails
func extractIPFromMultiaddr(multiaddrStr string) string {
@ -76,12 +76,12 @@ func extractIPFromMultiaddr(multiaddrStr string) string {
return ""
}
// inferBootstrapIP extracts the IP address from bootstrap peer multiaddrs
// Iterates through all bootstrap peers to find a valid IP (supports DNS resolution)
// inferPeerIP extracts the IP address from peer multiaddrs
// Iterates through all peers to find a valid IP (supports DNS resolution)
// Falls back to vpsIP if provided, otherwise returns empty string
func inferBootstrapIP(bootstrapPeers []string, vpsIP string) string {
// Try to extract IP from each bootstrap peer (in order)
for _, peer := range bootstrapPeers {
func inferPeerIP(peers []string, vpsIP string) string {
// Try to extract IP from each peer (in order)
for _, peer := range peers {
if ip := extractIPFromMultiaddr(peer); ip != "" {
return ip
}
@ -93,116 +93,125 @@ func inferBootstrapIP(bootstrapPeers []string, vpsIP string) string {
return ""
}
// GenerateNodeConfig generates node.yaml configuration
func (cg *ConfigGenerator) GenerateNodeConfig(isBootstrap bool, bootstrapPeers []string, vpsIP string, bootstrapJoin string) (string, error) {
var nodeID string
if isBootstrap {
nodeID = "bootstrap"
} else {
nodeID = "node"
// GenerateNodeConfig generates node.yaml configuration (unified architecture)
func (cg *ConfigGenerator) GenerateNodeConfig(peerAddresses []string, vpsIP string, joinAddress string, domain string, enableHTTPS bool) (string, error) {
// Generate node ID from domain or use default
nodeID := "node"
if domain != "" {
// Extract node identifier from domain (e.g., "node-123" from "node-123.orama.network")
parts := strings.Split(domain, ".")
if len(parts) > 0 {
nodeID = parts[0]
}
}
// Determine advertise addresses
// For bootstrap: use vpsIP if provided, otherwise localhost
// For regular nodes: infer from bootstrap peers or use vpsIP
// Determine advertise addresses - use vpsIP if provided
// When HTTPS is enabled, RQLite uses native TLS on port 7002 (not SNI gateway)
// This avoids conflicts between SNI gateway TLS termination and RQLite's native TLS
var httpAdvAddr, raftAdvAddr string
if isBootstrap {
if vpsIP != "" {
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
if vpsIP != "" {
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
if enableHTTPS {
// Use direct IP:7002 for Raft - RQLite handles TLS natively via -node-cert
// This bypasses the SNI gateway which would cause TLS termination conflicts
raftAdvAddr = net.JoinHostPort(vpsIP, "7002")
} else {
httpAdvAddr = "localhost:5001"
raftAdvAddr = "localhost:7001"
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
}
} else {
// Regular node: infer from bootstrap peers or use vpsIP
bootstrapIP := inferBootstrapIP(bootstrapPeers, vpsIP)
if bootstrapIP != "" {
// Use the bootstrap IP for advertise addresses (this node should be reachable at same network)
// If vpsIP is provided, use it; otherwise use bootstrap IP
if vpsIP != "" {
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
} else {
httpAdvAddr = net.JoinHostPort(bootstrapIP, "5001")
raftAdvAddr = net.JoinHostPort(bootstrapIP, "7001")
}
} else {
// Fallback to localhost if nothing can be inferred
httpAdvAddr = "localhost:5001"
raftAdvAddr = "localhost:7001"
}
// Fallback to localhost if no vpsIP
httpAdvAddr = "localhost:5001"
raftAdvAddr = "localhost:7001"
}
if isBootstrap {
// Bootstrap node - populate peer list and optional join address
data := templates.BootstrapConfigData{
NodeID: nodeID,
P2PPort: 4001,
DataDir: filepath.Join(cg.debrosDir, "data", "bootstrap"),
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
BootstrapPeers: bootstrapPeers,
RQLiteJoinAddress: bootstrapJoin,
HTTPAdvAddress: httpAdvAddr,
RaftAdvAddress: raftAdvAddr,
}
return templates.RenderBootstrapConfig(data)
// Determine RQLite join address
// When HTTPS is enabled, use port 7002 (direct RQLite TLS) instead of 7001 (SNI gateway)
joinPort := "7001"
if enableHTTPS {
joinPort = "7002"
}
// Regular node - infer join address from bootstrap peers
// MUST extract from bootstrap_peers - no fallback to vpsIP (would cause self-join)
var rqliteJoinAddr string
bootstrapIP := inferBootstrapIP(bootstrapPeers, "")
if bootstrapIP == "" {
// Try to extract from first bootstrap peer directly as fallback
if len(bootstrapPeers) > 0 {
if extractedIP := extractIPFromMultiaddr(bootstrapPeers[0]); extractedIP != "" {
bootstrapIP = extractedIP
if joinAddress != "" {
// Use explicitly provided join address
// If it contains :7001 and HTTPS is enabled, update to :7002
if enableHTTPS && strings.Contains(joinAddress, ":7001") {
rqliteJoinAddr = strings.Replace(joinAddress, ":7001", ":7002", 1)
} else {
rqliteJoinAddr = joinAddress
}
} else if len(peerAddresses) > 0 {
// Infer join address from peers
peerIP := inferPeerIP(peerAddresses, "")
if peerIP != "" {
rqliteJoinAddr = net.JoinHostPort(peerIP, joinPort)
// Validate that join address doesn't match this node's own raft address (would cause self-join)
if rqliteJoinAddr == raftAdvAddr {
rqliteJoinAddr = "" // Clear it - this is the first node
}
}
}
// If no join address and no peers, this is the first node - it will create the cluster
// If still no IP, fail - we cannot join without a valid bootstrap address
if bootstrapIP == "" {
return "", fmt.Errorf("cannot determine RQLite join address: failed to extract IP from bootstrap peers %v (required for non-bootstrap nodes)", bootstrapPeers)
}
// TLS/ACME configuration
tlsCacheDir := ""
httpPort := 80
httpsPort := 443
if enableHTTPS {
tlsCacheDir = filepath.Join(cg.oramaDir, "tls-cache")
}
rqliteJoinAddr = net.JoinHostPort(bootstrapIP, "7001")
// Validate that join address doesn't match this node's own raft address (would cause self-join)
if rqliteJoinAddr == raftAdvAddr {
return "", fmt.Errorf("invalid configuration: rqlite_join_address (%s) cannot match raft_adv_address (%s) - node cannot join itself", rqliteJoinAddr, raftAdvAddr)
// Unified data directory (all nodes equal)
// When HTTPS/SNI is enabled, use internal port 7002 for RQLite Raft (SNI gateway listens on 7001)
raftInternalPort := 7001
if enableHTTPS {
raftInternalPort = 7002 // Internal port when SNI is enabled
}
data := templates.NodeConfigData{
NodeID: nodeID,
P2PPort: 4001,
DataDir: filepath.Join(cg.debrosDir, "data", "node"),
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
RQLiteJoinAddress: rqliteJoinAddr,
BootstrapPeers: bootstrapPeers,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
HTTPAdvAddress: httpAdvAddr,
RaftAdvAddress: raftAdvAddr,
NodeID: nodeID,
P2PPort: 4001,
DataDir: filepath.Join(cg.oramaDir, "data"),
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001, // External SNI port
RQLiteRaftInternalPort: raftInternalPort, // Internal RQLite binding port
RQLiteJoinAddress: rqliteJoinAddr,
BootstrapPeers: peerAddresses,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
HTTPAdvAddress: httpAdvAddr,
RaftAdvAddress: raftAdvAddr,
UnifiedGatewayPort: 6001,
Domain: domain,
EnableHTTPS: enableHTTPS,
TLSCacheDir: tlsCacheDir,
HTTPPort: httpPort,
HTTPSPort: httpsPort,
}
// When HTTPS is enabled, configure RQLite node-to-node TLS encryption
// RQLite handles TLS natively on port 7002, bypassing the SNI gateway
// This avoids TLS termination conflicts between SNI gateway and RQLite
if enableHTTPS && domain != "" {
data.NodeCert = filepath.Join(tlsCacheDir, domain+".crt")
data.NodeKey = filepath.Join(tlsCacheDir, domain+".key")
// Skip verification since nodes may have different domain certificates
data.NodeNoVerify = true
}
return templates.RenderNodeConfig(data)
}
// GenerateGatewayConfig generates gateway.yaml configuration
func (cg *ConfigGenerator) GenerateGatewayConfig(bootstrapPeers []string, enableHTTPS bool, domain string, olricServers []string) (string, error) {
func (cg *ConfigGenerator) GenerateGatewayConfig(peerAddresses []string, enableHTTPS bool, domain string, olricServers []string) (string, error) {
tlsCacheDir := ""
if enableHTTPS {
tlsCacheDir = filepath.Join(cg.debrosDir, "tls-cache")
tlsCacheDir = filepath.Join(cg.oramaDir, "tls-cache")
}
data := templates.GatewayConfigData{
ListenPort: 6001,
BootstrapPeers: bootstrapPeers,
BootstrapPeers: peerAddresses,
OlricServers: olricServers,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
@ -215,26 +224,26 @@ func (cg *ConfigGenerator) GenerateGatewayConfig(bootstrapPeers []string, enable
}
// GenerateOlricConfig generates Olric configuration
func (cg *ConfigGenerator) GenerateOlricConfig(bindAddr string, httpPort, memberlistPort int) (string, error) {
func (cg *ConfigGenerator) GenerateOlricConfig(serverBindAddr string, httpPort int, memberlistBindAddr string, memberlistPort int, memberlistEnv string) (string, error) {
data := templates.OlricConfigData{
BindAddr: bindAddr,
HTTPPort: httpPort,
MemberlistPort: memberlistPort,
ServerBindAddr: serverBindAddr,
HTTPPort: httpPort,
MemberlistBindAddr: memberlistBindAddr,
MemberlistPort: memberlistPort,
MemberlistEnvironment: memberlistEnv,
}
return templates.RenderOlricConfig(data)
}
// SecretGenerator manages generation of shared secrets and keys
type SecretGenerator struct {
debrosDir string
clusterSecretOverride string
oramaDir string
}
// NewSecretGenerator creates a new secret generator
func NewSecretGenerator(debrosDir string, clusterSecretOverride string) *SecretGenerator {
func NewSecretGenerator(oramaDir string) *SecretGenerator {
return &SecretGenerator{
debrosDir: debrosDir,
clusterSecretOverride: clusterSecretOverride,
oramaDir: oramaDir,
}
}
@ -255,37 +264,16 @@ func ValidateClusterSecret(secret string) error {
// EnsureClusterSecret gets or generates the IPFS Cluster secret
func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
secretPath := filepath.Join(sg.debrosDir, "secrets", "cluster-secret")
secretPath := filepath.Join(sg.oramaDir, "secrets", "cluster-secret")
secretDir := filepath.Dir(secretPath)
// Ensure secrets directory exists
if err := os.MkdirAll(secretDir, 0755); err != nil {
// Ensure secrets directory exists with restricted permissions (0700)
if err := os.MkdirAll(secretDir, 0700); err != nil {
return "", fmt.Errorf("failed to create secrets directory: %w", err)
}
// Use override if provided
if sg.clusterSecretOverride != "" {
secret := strings.TrimSpace(sg.clusterSecretOverride)
if err := ValidateClusterSecret(secret); err != nil {
return "", err
}
needsWrite := true
if data, err := os.ReadFile(secretPath); err == nil {
if strings.TrimSpace(string(data)) == secret {
needsWrite = false
}
}
if needsWrite {
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
return "", fmt.Errorf("failed to save cluster secret override: %w", err)
}
}
if err := ensureSecretFilePermissions(secretPath); err != nil {
return "", err
}
return secret, nil
// Ensure directory permissions are correct even if it already existed
if err := os.Chmod(secretDir, 0700); err != nil {
return "", fmt.Errorf("failed to set secrets directory permissions: %w", err)
}
// Try to read existing secret
@ -341,13 +329,17 @@ func ensureSecretFilePermissions(secretPath string) error {
// EnsureSwarmKey gets or generates the IPFS private swarm key
func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
swarmKeyPath := filepath.Join(sg.debrosDir, "secrets", "swarm.key")
swarmKeyPath := filepath.Join(sg.oramaDir, "secrets", "swarm.key")
secretDir := filepath.Dir(swarmKeyPath)
// Ensure secrets directory exists
if err := os.MkdirAll(secretDir, 0755); err != nil {
// Ensure secrets directory exists with restricted permissions (0700)
if err := os.MkdirAll(secretDir, 0700); err != nil {
return nil, fmt.Errorf("failed to create secrets directory: %w", err)
}
// Ensure directory permissions are correct even if it already existed
if err := os.Chmod(secretDir, 0700); err != nil {
return nil, fmt.Errorf("failed to set secrets directory permissions: %w", err)
}
// Try to read existing key
if data, err := os.ReadFile(swarmKeyPath); err == nil {
@ -373,9 +365,10 @@ func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
return []byte(content), nil
}
// EnsureNodeIdentity gets or generates the node's LibP2P identity
func (sg *SecretGenerator) EnsureNodeIdentity(nodeType string) (peer.ID, error) {
keyDir := filepath.Join(sg.debrosDir, "data", nodeType)
// EnsureNodeIdentity gets or generates the node's LibP2P identity (unified - no bootstrap/node distinction)
func (sg *SecretGenerator) EnsureNodeIdentity() (peer.ID, error) {
// Unified data directory (no bootstrap/node distinction)
keyDir := filepath.Join(sg.oramaDir, "data")
keyPath := filepath.Join(keyDir, "identity.key")
// Ensure data directory exists
@ -419,9 +412,9 @@ func (sg *SecretGenerator) SaveConfig(filename string, content string) error {
var configDir string
// gateway.yaml goes to data/ directory, other configs go to configs/
if filename == "gateway.yaml" {
configDir = filepath.Join(sg.debrosDir, "data")
configDir = filepath.Join(sg.oramaDir, "data")
} else {
configDir = filepath.Join(sg.debrosDir, "configs")
configDir = filepath.Join(sg.oramaDir, "configs")
}
if err := os.MkdirAll(configDir, 0755); err != nil {

View File

@ -3,6 +3,7 @@ package production
import (
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
@ -12,11 +13,11 @@ import (
// BinaryInstaller handles downloading and installing external binaries
type BinaryInstaller struct {
arch string
logWriter interface{} // io.Writer
logWriter io.Writer
}
// NewBinaryInstaller creates a new binary installer
func NewBinaryInstaller(arch string, logWriter interface{}) *BinaryInstaller {
func NewBinaryInstaller(arch string, logWriter io.Writer) *BinaryInstaller {
return &BinaryInstaller{
arch: arch,
logWriter: logWriter,
@ -26,11 +27,11 @@ func NewBinaryInstaller(arch string, logWriter interface{}) *BinaryInstaller {
// InstallRQLite downloads and installs RQLite
func (bi *BinaryInstaller) InstallRQLite() error {
if _, err := exec.LookPath("rqlited"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ RQLite already installed\n")
fmt.Fprintf(bi.logWriter, " ✓ RQLite already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing RQLite...\n")
fmt.Fprintf(bi.logWriter, " Installing RQLite...\n")
version := "8.43.0"
tarball := fmt.Sprintf("rqlite-v%s-linux-%s.tar.gz", version, bi.arch)
@ -53,12 +54,14 @@ func (bi *BinaryInstaller) InstallRQLite() error {
if err := exec.Command("cp", dir+"/rqlited", "/usr/local/bin/").Run(); err != nil {
return fmt.Errorf("failed to copy rqlited binary: %w", err)
}
exec.Command("chmod", "+x", "/usr/local/bin/rqlited").Run()
if err := exec.Command("chmod", "+x", "/usr/local/bin/rqlited").Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chmod rqlited: %v\n", err)
}
// Ensure PATH includes /usr/local/bin
os.Setenv("PATH", os.Getenv("PATH")+":/usr/local/bin")
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ RQLite installed\n")
fmt.Fprintf(bi.logWriter, " ✓ RQLite installed\n")
return nil
}
@ -66,11 +69,11 @@ func (bi *BinaryInstaller) InstallRQLite() error {
// Follows official steps from https://docs.ipfs.tech/install/command-line/
func (bi *BinaryInstaller) InstallIPFS() error {
if _, err := exec.LookPath("ipfs"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS already installed\n")
fmt.Fprintf(bi.logWriter, " ✓ IPFS already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing IPFS (Kubo)...\n")
fmt.Fprintf(bi.logWriter, " Installing IPFS (Kubo)...\n")
// Follow official installation steps in order
kuboVersion := "v0.38.2"
@ -81,7 +84,7 @@ func (bi *BinaryInstaller) InstallIPFS() error {
kuboDir := filepath.Join(tmpDir, "kubo")
// Step 1: Download the Linux binary from dist.ipfs.tech
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 1: Downloading Kubo v%s...\n", kuboVersion)
fmt.Fprintf(bi.logWriter, " Step 1: Downloading Kubo v%s...\n", kuboVersion)
cmd := exec.Command("wget", "-q", url, "-O", tarPath)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to download kubo from %s: %w", url, err)
@ -93,7 +96,7 @@ func (bi *BinaryInstaller) InstallIPFS() error {
}
// Step 2: Unzip the file
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 2: Extracting Kubo archive...\n")
fmt.Fprintf(bi.logWriter, " Step 2: Extracting Kubo archive...\n")
cmd = exec.Command("tar", "-xzf", tarPath, "-C", tmpDir)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to extract kubo tarball: %w", err)
@ -105,7 +108,7 @@ func (bi *BinaryInstaller) InstallIPFS() error {
}
// Step 3: Move into the kubo folder (cd kubo)
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 3: Running installation script...\n")
fmt.Fprintf(bi.logWriter, " Step 3: Running installation script...\n")
// Step 4: Run the installation script (sudo bash install.sh)
installScript := filepath.Join(kuboDir, "install.sh")
@ -120,7 +123,7 @@ func (bi *BinaryInstaller) InstallIPFS() error {
}
// Step 5: Test that Kubo has installed correctly
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 5: Verifying installation...\n")
fmt.Fprintf(bi.logWriter, " Step 5: Verifying installation...\n")
cmd = exec.Command("ipfs", "--version")
output, err := cmd.CombinedOutput()
if err != nil {
@ -141,24 +144,24 @@ func (bi *BinaryInstaller) InstallIPFS() error {
return fmt.Errorf("ipfs binary not found after installation in %v", ipfsLocations)
}
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " %s", string(output))
fmt.Fprintf(bi.logWriter, " %s", string(output))
}
// Ensure PATH is updated for current process
os.Setenv("PATH", os.Getenv("PATH")+":/usr/local/bin")
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS installed successfully\n")
fmt.Fprintf(bi.logWriter, " ✓ IPFS installed successfully\n")
return nil
}
// InstallIPFSCluster downloads and installs IPFS Cluster Service
func (bi *BinaryInstaller) InstallIPFSCluster() error {
if _, err := exec.LookPath("ipfs-cluster-service"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS Cluster already installed\n")
fmt.Fprintf(bi.logWriter, " ✓ IPFS Cluster already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing IPFS Cluster Service...\n")
fmt.Fprintf(bi.logWriter, " Installing IPFS Cluster Service...\n")
// Check if Go is available
if _, err := exec.LookPath("go"); err != nil {
@ -171,18 +174,18 @@ func (bi *BinaryInstaller) InstallIPFSCluster() error {
return fmt.Errorf("failed to install IPFS Cluster: %w", err)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS Cluster installed\n")
fmt.Fprintf(bi.logWriter, " ✓ IPFS Cluster installed\n")
return nil
}
// InstallOlric downloads and installs Olric server
func (bi *BinaryInstaller) InstallOlric() error {
if _, err := exec.LookPath("olric-server"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Olric already installed\n")
fmt.Fprintf(bi.logWriter, " ✓ Olric already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing Olric...\n")
fmt.Fprintf(bi.logWriter, " Installing Olric...\n")
// Check if Go is available
if _, err := exec.LookPath("go"); err != nil {
@ -195,20 +198,20 @@ func (bi *BinaryInstaller) InstallOlric() error {
return fmt.Errorf("failed to install Olric: %w", err)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Olric installed\n")
fmt.Fprintf(bi.logWriter, " ✓ Olric installed\n")
return nil
}
// InstallGo downloads and installs Go toolchain
func (bi *BinaryInstaller) InstallGo() error {
if _, err := exec.LookPath("go"); err == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Go already installed\n")
fmt.Fprintf(bi.logWriter, " ✓ Go already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing Go...\n")
fmt.Fprintf(bi.logWriter, " Installing Go...\n")
goTarball := fmt.Sprintf("go1.21.6.linux-%s.tar.gz", bi.arch)
goTarball := fmt.Sprintf("go1.22.5.linux-%s.tar.gz", bi.arch)
goURL := fmt.Sprintf("https://go.dev/dl/%s", goTarball)
// Download
@ -232,7 +235,7 @@ func (bi *BinaryInstaller) InstallGo() error {
return fmt.Errorf("go installed but not found in PATH after installation")
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Go installed\n")
fmt.Fprintf(bi.logWriter, " ✓ Go installed\n")
return nil
}
@ -275,46 +278,50 @@ func (bi *BinaryInstaller) ResolveBinaryPath(binary string, extraPaths ...string
}
// InstallDeBrosBinaries clones and builds DeBros binaries
func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, debrosHome string, skipRepoUpdate bool) error {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building DeBros binaries...\n")
func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, oramaHome string, skipRepoUpdate bool) error {
fmt.Fprintf(bi.logWriter, " Building DeBros binaries...\n")
srcDir := filepath.Join(debrosHome, "src")
binDir := filepath.Join(debrosHome, "bin")
srcDir := filepath.Join(oramaHome, "src")
binDir := filepath.Join(oramaHome, "bin")
// Ensure directories exist
os.MkdirAll(srcDir, 0755)
os.MkdirAll(binDir, 0755)
if err := os.MkdirAll(srcDir, 0755); err != nil {
return fmt.Errorf("failed to create source directory %s: %w", srcDir, err)
}
if err := os.MkdirAll(binDir, 0755); err != nil {
return fmt.Errorf("failed to create bin directory %s: %w", binDir, err)
}
// Check if source directory has content (either git repo or pre-existing source)
hasSourceContent := false
if entries, err := os.ReadDir(srcDir); err == nil && len(entries) > 0 {
hasSourceContent = true
}
// Check if git repository is already initialized
repoInitialized := false
isGitRepo := false
if _, err := os.Stat(filepath.Join(srcDir, ".git")); err == nil {
repoInitialized = true
isGitRepo = true
}
// Handle repository update/clone based on skipRepoUpdate flag
if skipRepoUpdate {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Skipping repo clone/pull (--no-pull flag)\n")
if !repoInitialized {
return fmt.Errorf("cannot skip pull: repository not found at %s", srcDir)
fmt.Fprintf(bi.logWriter, " Skipping repo clone/pull (--no-pull flag)\n")
if !hasSourceContent {
return fmt.Errorf("cannot skip pull: source directory is empty at %s (need to populate it first)", srcDir)
}
// Verify srcDir exists and has content
if entries, err := os.ReadDir(srcDir); err != nil {
return fmt.Errorf("failed to read source directory %s: %w", srcDir, err)
} else if len(entries) == 0 {
return fmt.Errorf("source directory %s is empty", srcDir)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Using existing repository at %s (skipping git operations)\n", srcDir)
fmt.Fprintf(bi.logWriter, " Using existing source at %s (skipping git operations)\n", srcDir)
// Skip to build step - don't execute any git commands
} else {
// Clone repository if not present, otherwise update it
if !repoInitialized {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Cloning repository...\n")
if !isGitRepo {
fmt.Fprintf(bi.logWriter, " Cloning repository...\n")
cmd := exec.Command("git", "clone", "--branch", branch, "--depth", "1", "https://github.com/DeBrosOfficial/network.git", srcDir)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to clone repository: %w", err)
}
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Updating repository to latest changes...\n")
fmt.Fprintf(bi.logWriter, " Updating repository to latest changes...\n")
if output, err := exec.Command("git", "-C", srcDir, "fetch", "origin", branch).CombinedOutput(); err != nil {
return fmt.Errorf("failed to fetch repository updates: %v\n%s", err, string(output))
}
@ -328,16 +335,16 @@ func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, debrosHome strin
}
// Build binaries
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building binaries...\n")
fmt.Fprintf(bi.logWriter, " Building binaries...\n")
cmd := exec.Command("make", "build")
cmd.Dir = srcDir
cmd.Env = append(os.Environ(), "HOME="+debrosHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin")
cmd.Env = append(os.Environ(), "HOME="+oramaHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin")
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build: %v\n%s", err, string(output))
}
// Copy binaries
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Copying binaries...\n")
fmt.Fprintf(bi.logWriter, " Copying binaries...\n")
srcBinDir := filepath.Join(srcDir, "bin")
// Check if source bin directory exists
@ -374,21 +381,36 @@ func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, debrosHome strin
}
}
exec.Command("chmod", "-R", "755", binDir).Run()
exec.Command("chown", "-R", "debros:debros", binDir).Run()
if err := exec.Command("chmod", "-R", "755", binDir).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chmod bin directory: %v\n", err)
}
if err := exec.Command("chown", "-R", "debros:debros", binDir).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown bin directory: %v\n", err)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ DeBros binaries installed\n")
// Grant CAP_NET_BIND_SERVICE to orama-node to allow binding to ports 80/443 without root
nodeBinary := filepath.Join(binDir, "orama-node")
if _, err := os.Stat(nodeBinary); err == nil {
if err := exec.Command("setcap", "cap_net_bind_service=+ep", nodeBinary).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to setcap on orama-node: %v\n", err)
fmt.Fprintf(bi.logWriter, " ⚠️ Gateway may not be able to bind to port 80/443\n")
} else {
fmt.Fprintf(bi.logWriter, " ✓ Set CAP_NET_BIND_SERVICE on orama-node\n")
}
}
fmt.Fprintf(bi.logWriter, " ✓ DeBros binaries installed\n")
return nil
}
// InstallSystemDependencies installs system-level dependencies via apt
func (bi *BinaryInstaller) InstallSystemDependencies() error {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing system dependencies...\n")
fmt.Fprintf(bi.logWriter, " Installing system dependencies...\n")
// Update package list
cmd := exec.Command("apt-get", "update")
if err := cmd.Run(); err != nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Warning: apt update failed\n")
fmt.Fprintf(bi.logWriter, " Warning: apt update failed\n")
}
// Install dependencies including Node.js for anyone-client
@ -397,19 +419,32 @@ func (bi *BinaryInstaller) InstallSystemDependencies() error {
return fmt.Errorf("failed to install dependencies: %w", err)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ System dependencies installed\n")
fmt.Fprintf(bi.logWriter, " ✓ System dependencies installed\n")
return nil
}
// InitializeIPFSRepo initializes an IPFS repository for a node
func (bi *BinaryInstaller) InitializeIPFSRepo(nodeType, ipfsRepoPath string, swarmKeyPath string, apiPort, gatewayPort, swarmPort int) error {
// IPFSPeerInfo holds IPFS peer information for configuring Peering.Peers
type IPFSPeerInfo struct {
PeerID string
Addrs []string
}
// IPFSClusterPeerInfo contains IPFS Cluster peer information for cluster peer discovery
type IPFSClusterPeerInfo struct {
PeerID string // Cluster peer ID (different from IPFS peer ID)
Addrs []string // Cluster multiaddresses (e.g., /ip4/x.x.x.x/tcp/9098)
}
// InitializeIPFSRepo initializes an IPFS repository for a node (unified - no bootstrap/node distinction)
// If ipfsPeer is provided, configures Peering.Peers for peer discovery in private networks
func (bi *BinaryInstaller) InitializeIPFSRepo(ipfsRepoPath string, swarmKeyPath string, apiPort, gatewayPort, swarmPort int, ipfsPeer *IPFSPeerInfo) error {
configPath := filepath.Join(ipfsRepoPath, "config")
repoExists := false
if _, err := os.Stat(configPath); err == nil {
repoExists = true
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS repo for %s already exists, ensuring configuration...\n", nodeType)
fmt.Fprintf(bi.logWriter, " IPFS repo already exists, ensuring configuration...\n")
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS repo for %s...\n", nodeType)
fmt.Fprintf(bi.logWriter, " Initializing IPFS repo...\n")
}
if err := os.MkdirAll(ipfsRepoPath, 0755); err != nil {
@ -442,7 +477,7 @@ func (bi *BinaryInstaller) InitializeIPFSRepo(nodeType, ipfsRepoPath string, swa
// Configure IPFS addresses (API, Gateway, Swarm) by modifying the config file directly
// This ensures the ports are set correctly and avoids conflicts with RQLite on port 5001
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Configuring IPFS addresses (API: %d, Gateway: %d, Swarm: %d)...\n", apiPort, gatewayPort, swarmPort)
fmt.Fprintf(bi.logWriter, " Configuring IPFS addresses (API: %d, Gateway: %d, Swarm: %d)...\n", apiPort, gatewayPort, swarmPort)
if err := bi.configureIPFSAddresses(ipfsRepoPath, apiPort, gatewayPort, swarmPort); err != nil {
return fmt.Errorf("failed to configure IPFS addresses: %w", err)
}
@ -451,7 +486,7 @@ func (bi *BinaryInstaller) InitializeIPFSRepo(nodeType, ipfsRepoPath string, swa
// This is critical - IPFS will fail to start if AutoConf is enabled on a private network
// We do this even for existing repos to fix repos initialized before this fix was applied
if swarmKeyExists {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Disabling AutoConf for private swarm...\n")
fmt.Fprintf(bi.logWriter, " Disabling AutoConf for private swarm...\n")
cmd := exec.Command(ipfsBinary, "config", "--json", "AutoConf.Enabled", "false")
cmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
if output, err := cmd.CombinedOutput(); err != nil {
@ -460,7 +495,7 @@ func (bi *BinaryInstaller) InitializeIPFSRepo(nodeType, ipfsRepoPath string, swa
// Clear AutoConf placeholders from config to prevent Kubo startup errors
// When AutoConf is disabled, 'auto' placeholders must be replaced with explicit values or empty
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Clearing AutoConf placeholders from IPFS config...\n")
fmt.Fprintf(bi.logWriter, " Clearing AutoConf placeholders from IPFS config...\n")
type configCommand struct {
desc string
@ -476,17 +511,27 @@ func (bi *BinaryInstaller) InitializeIPFSRepo(nodeType, ipfsRepoPath string, swa
}
for _, step := range cleanup {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " %s...\n", step.desc)
fmt.Fprintf(bi.logWriter, " %s...\n", step.desc)
cmd := exec.Command(ipfsBinary, step.args...)
cmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed while %s: %v\n%s", step.desc, err, string(output))
}
}
// Configure Peering.Peers if we have peer info (for private network discovery)
if ipfsPeer != nil && ipfsPeer.PeerID != "" && len(ipfsPeer.Addrs) > 0 {
fmt.Fprintf(bi.logWriter, " Configuring Peering.Peers for private network discovery...\n")
if err := bi.configureIPFSPeering(ipfsRepoPath, ipfsPeer); err != nil {
return fmt.Errorf("failed to configure IPFS peering: %w", err)
}
}
}
// Fix ownership
exec.Command("chown", "-R", "debros:debros", ipfsRepoPath).Run()
// Fix ownership (best-effort, don't fail if it doesn't work)
if err := exec.Command("chown", "-R", "debros:debros", ipfsRepoPath).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown IPFS repo: %v\n", err)
}
return nil
}
@ -506,20 +551,29 @@ func (bi *BinaryInstaller) configureIPFSAddresses(ipfsRepoPath string, apiPort,
return fmt.Errorf("failed to parse IPFS config: %w", err)
}
// Set Addresses
config["Addresses"] = map[string]interface{}{
"API": []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", apiPort),
},
"Gateway": []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", gatewayPort),
},
"Swarm": []string{
// Get existing Addresses section or create new one
// This preserves any existing settings like Announce, AppendAnnounce, NoAnnounce
addresses, ok := config["Addresses"].(map[string]interface{})
if !ok {
addresses = make(map[string]interface{})
}
// Update specific address fields while preserving others
// Bind API and Gateway to localhost only for security
// Swarm binds to all interfaces for peer connections
addresses["API"] = []string{
fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort),
}
addresses["Gateway"] = []string{
fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", gatewayPort),
}
addresses["Swarm"] = []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", swarmPort),
fmt.Sprintf("/ip6/::/tcp/%d", swarmPort),
},
}
config["Addresses"] = addresses
// Write config back
updatedData, err := json.MarshalIndent(config, "", " ")
if err != nil {
@ -533,26 +587,75 @@ func (bi *BinaryInstaller) configureIPFSAddresses(ipfsRepoPath string, apiPort,
return nil
}
// InitializeIPFSClusterConfig initializes IPFS Cluster configuration
// configureIPFSPeering configures Peering.Peers in the IPFS config for private network discovery
// This allows nodes in a private swarm to find each other even without bootstrap peers
func (bi *BinaryInstaller) configureIPFSPeering(ipfsRepoPath string, peer *IPFSPeerInfo) error {
configPath := filepath.Join(ipfsRepoPath, "config")
// Read existing config
data, err := os.ReadFile(configPath)
if err != nil {
return fmt.Errorf("failed to read IPFS config: %w", err)
}
var config map[string]interface{}
if err := json.Unmarshal(data, &config); err != nil {
return fmt.Errorf("failed to parse IPFS config: %w", err)
}
// Get existing Peering section or create new one
peering, ok := config["Peering"].(map[string]interface{})
if !ok {
peering = make(map[string]interface{})
}
// Create peer entry
peerEntry := map[string]interface{}{
"ID": peer.PeerID,
"Addrs": peer.Addrs,
}
// Set Peering.Peers
peering["Peers"] = []interface{}{peerEntry}
config["Peering"] = peering
fmt.Fprintf(bi.logWriter, " Adding peer: %s (%d addresses)\n", peer.PeerID, len(peer.Addrs))
// Write config back
updatedData, err := json.MarshalIndent(config, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal IPFS config: %w", err)
}
if err := os.WriteFile(configPath, updatedData, 0600); err != nil {
return fmt.Errorf("failed to write IPFS config: %w", err)
}
return nil
}
// InitializeIPFSClusterConfig initializes IPFS Cluster configuration (unified - no bootstrap/node distinction)
// This runs `ipfs-cluster-service init` to create the service.json configuration file.
// For existing installations, it ensures the cluster secret is up to date.
// bootstrapClusterPeers should be in format: ["/ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>"]
func (bi *BinaryInstaller) InitializeIPFSClusterConfig(nodeType, clusterPath, clusterSecret string, ipfsAPIPort int, bootstrapClusterPeers []string) error {
// clusterPeers should be in format: ["/ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>"]
func (bi *BinaryInstaller) InitializeIPFSClusterConfig(clusterPath, clusterSecret string, ipfsAPIPort int, clusterPeers []string) error {
serviceJSONPath := filepath.Join(clusterPath, "service.json")
configExists := false
if _, err := os.Stat(serviceJSONPath); err == nil {
configExists = true
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS Cluster config for %s already exists, ensuring it's up to date...\n", nodeType)
fmt.Fprintf(bi.logWriter, " IPFS Cluster config already exists, ensuring it's up to date...\n")
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Preparing IPFS Cluster path for %s...\n", nodeType)
fmt.Fprintf(bi.logWriter, " Preparing IPFS Cluster path...\n")
}
if err := os.MkdirAll(clusterPath, 0755); err != nil {
return fmt.Errorf("failed to create IPFS Cluster directory: %w", err)
}
// Fix ownership before running init
exec.Command("chown", "-R", "debros:debros", clusterPath).Run()
// Fix ownership before running init (best-effort)
if err := exec.Command("chown", "-R", "debros:debros", clusterPath).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown cluster path before init: %v\n", err)
}
// Resolve ipfs-cluster-service binary path
clusterBinary, err := bi.ResolveBinaryPath("ipfs-cluster-service", "/usr/local/bin/ipfs-cluster-service", "/usr/bin/ipfs-cluster-service")
@ -564,7 +667,7 @@ func (bi *BinaryInstaller) InitializeIPFSClusterConfig(nodeType, clusterPath, cl
if !configExists {
// Initialize cluster config with ipfs-cluster-service init
// This creates the service.json file with all required sections
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS Cluster config...\n")
fmt.Fprintf(bi.logWriter, " Initializing IPFS Cluster config...\n")
cmd := exec.Command(clusterBinary, "init", "--force")
cmd.Env = append(os.Environ(), "IPFS_CLUSTER_PATH="+clusterPath)
// Pass CLUSTER_SECRET to init so it writes the correct secret to service.json directly
@ -580,8 +683,8 @@ func (bi *BinaryInstaller) InitializeIPFSClusterConfig(nodeType, clusterPath, cl
// This ensures existing installations get the secret and port synchronized
// We do this AFTER init to ensure our secret takes precedence
if clusterSecret != "" {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Updating cluster secret, IPFS port, and peer addresses...\n")
if err := bi.updateClusterConfig(clusterPath, clusterSecret, ipfsAPIPort, bootstrapClusterPeers); err != nil {
fmt.Fprintf(bi.logWriter, " Updating cluster secret, IPFS port, and peer addresses...\n")
if err := bi.updateClusterConfig(clusterPath, clusterSecret, ipfsAPIPort, clusterPeers); err != nil {
return fmt.Errorf("failed to update cluster config: %w", err)
}
@ -589,11 +692,13 @@ func (bi *BinaryInstaller) InitializeIPFSClusterConfig(nodeType, clusterPath, cl
if err := bi.verifyClusterSecret(clusterPath, clusterSecret); err != nil {
return fmt.Errorf("cluster secret verification failed: %w", err)
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Cluster secret verified\n")
fmt.Fprintf(bi.logWriter, " ✓ Cluster secret verified\n")
}
// Fix ownership again after updates
exec.Command("chown", "-R", "debros:debros", clusterPath).Run()
// Fix ownership again after updates (best-effort)
if err := exec.Command("chown", "-R", "debros:debros", clusterPath).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown cluster path after updates: %v\n", err)
}
return nil
}
@ -614,9 +719,12 @@ func (bi *BinaryInstaller) updateClusterConfig(clusterPath, secret string, ipfsA
return fmt.Errorf("failed to parse service.json: %w", err)
}
// Update cluster secret and peer addresses
// Update cluster secret, listen_multiaddress, and peer addresses
if cluster, ok := config["cluster"].(map[string]interface{}); ok {
cluster["secret"] = secret
// Set consistent listen_multiaddress - port 9098 for cluster LibP2P communication
// This MUST match the port used in GetClusterPeerMultiaddr() and peer_addresses
cluster["listen_multiaddress"] = []interface{}{"/ip4/0.0.0.0/tcp/9098"}
// Configure peer addresses for cluster discovery
// This allows nodes to find and connect to each other
if len(bootstrapClusterPeers) > 0 {
@ -624,7 +732,8 @@ func (bi *BinaryInstaller) updateClusterConfig(clusterPath, secret string, ipfsA
}
} else {
clusterConfig := map[string]interface{}{
"secret": secret,
"secret": secret,
"listen_multiaddress": []interface{}{"/ip4/0.0.0.0/tcp/9098"},
}
if len(bootstrapClusterPeers) > 0 {
clusterConfig["peer_addresses"] = bootstrapClusterPeers
@ -717,26 +826,68 @@ func (bi *BinaryInstaller) GetClusterPeerMultiaddr(clusterPath string, nodeIP st
}
// InitializeRQLiteDataDir initializes RQLite data directory
func (bi *BinaryInstaller) InitializeRQLiteDataDir(nodeType, dataDir string) error {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing RQLite data dir for %s...\n", nodeType)
func (bi *BinaryInstaller) InitializeRQLiteDataDir(dataDir string) error {
fmt.Fprintf(bi.logWriter, " Initializing RQLite data dir...\n")
if err := os.MkdirAll(dataDir, 0755); err != nil {
return fmt.Errorf("failed to create RQLite data directory: %w", err)
}
exec.Command("chown", "-R", "debros:debros", dataDir).Run()
if err := exec.Command("chown", "-R", "debros:debros", dataDir).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown RQLite data dir: %v\n", err)
}
return nil
}
// InstallAnyoneClient installs the anyone-client npm package globally
func (bi *BinaryInstaller) InstallAnyoneClient() error {
// Check if anyone-client is already available via npx (more reliable for scoped packages)
if cmd := exec.Command("npx", "--yes", "@anyone-protocol/anyone-client", "--version"); cmd.Run() == nil {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ anyone-client already installed\n")
// Note: the CLI binary is "anyone-client", not the full scoped package name
if cmd := exec.Command("npx", "anyone-client", "--help"); cmd.Run() == nil {
fmt.Fprintf(bi.logWriter, " ✓ anyone-client already installed\n")
return nil
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing anyone-client...\n")
fmt.Fprintf(bi.logWriter, " Installing anyone-client...\n")
// Initialize NPM cache structure to ensure all directories exist
// This prevents "mkdir" errors when NPM tries to create nested cache directories
fmt.Fprintf(bi.logWriter, " Initializing NPM cache...\n")
// Create nested cache directories with proper permissions
debrosHome := "/home/debros"
npmCacheDirs := []string{
filepath.Join(debrosHome, ".npm"),
filepath.Join(debrosHome, ".npm", "_cacache"),
filepath.Join(debrosHome, ".npm", "_cacache", "tmp"),
filepath.Join(debrosHome, ".npm", "_logs"),
}
for _, dir := range npmCacheDirs {
if err := os.MkdirAll(dir, 0700); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Failed to create %s: %v\n", dir, err)
continue
}
// Fix ownership to debros user (sequential to avoid race conditions)
if err := exec.Command("chown", "debros:debros", dir).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown %s: %v\n", dir, err)
}
if err := exec.Command("chmod", "700", dir).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chmod %s: %v\n", dir, err)
}
}
// Recursively fix ownership of entire .npm directory to ensure all nested files are owned by debros
if err := exec.Command("chown", "-R", "debros:debros", filepath.Join(debrosHome, ".npm")).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown .npm directory: %v\n", err)
}
// Run npm cache verify as debros user with proper environment
cacheInitCmd := exec.Command("sudo", "-u", "debros", "npm", "cache", "verify", "--silent")
cacheInitCmd.Env = append(os.Environ(), "HOME="+debrosHome)
if err := cacheInitCmd.Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ NPM cache verify warning: %v (continuing anyway)\n", err)
}
// Install anyone-client globally via npm (using scoped package name)
cmd := exec.Command("npm", "install", "-g", "@anyone-protocol/anyone-client")
@ -744,8 +895,18 @@ func (bi *BinaryInstaller) InstallAnyoneClient() error {
return fmt.Errorf("failed to install anyone-client: %w\n%s", err, string(output))
}
// Verify installation - try npx first (most reliable for scoped packages)
verifyCmd := exec.Command("npx", "--yes", "@anyone-protocol/anyone-client", "--version")
// Create terms-agreement file to bypass interactive prompt when running as a service
termsFile := filepath.Join(debrosHome, "terms-agreement")
if err := os.WriteFile(termsFile, []byte("agreed"), 0644); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to create terms-agreement: %v\n", err)
} else {
if err := exec.Command("chown", "debros:debros", termsFile).Run(); err != nil {
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown terms-agreement: %v\n", err)
}
}
// Verify installation - try npx with the correct CLI name (anyone-client, not full scoped package name)
verifyCmd := exec.Command("npx", "anyone-client", "--help")
if err := verifyCmd.Run(); err != nil {
// Fallback: check if binary exists in common locations
possiblePaths := []string{
@ -775,6 +936,6 @@ func (bi *BinaryInstaller) InstallAnyoneClient() error {
}
}
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ anyone-client installed\n")
fmt.Fprintf(bi.logWriter, " ✓ anyone-client installed\n")
return nil
}

View File

@ -3,45 +3,44 @@ package production
import (
"fmt"
"io"
"net"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
// ProductionSetup orchestrates the entire production deployment
type ProductionSetup struct {
osInfo *OSInfo
arch string
debrosHome string
debrosDir string
logWriter io.Writer
forceReconfigure bool
skipOptionalDeps bool
skipResourceChecks bool
clusterSecretOverride string
privChecker *PrivilegeChecker
osDetector *OSDetector
archDetector *ArchitectureDetector
resourceChecker *ResourceChecker
portChecker *PortChecker
fsProvisioner *FilesystemProvisioner
userProvisioner *UserProvisioner
stateDetector *StateDetector
configGenerator *ConfigGenerator
secretGenerator *SecretGenerator
serviceGenerator *SystemdServiceGenerator
serviceController *SystemdController
binaryInstaller *BinaryInstaller
branch string
skipRepoUpdate bool
NodePeerID string // Captured during Phase3 for later display
osInfo *OSInfo
arch string
oramaHome string
oramaDir string
logWriter io.Writer
forceReconfigure bool
skipOptionalDeps bool
skipResourceChecks bool
privChecker *PrivilegeChecker
osDetector *OSDetector
archDetector *ArchitectureDetector
resourceChecker *ResourceChecker
portChecker *PortChecker
fsProvisioner *FilesystemProvisioner
userProvisioner *UserProvisioner
stateDetector *StateDetector
configGenerator *ConfigGenerator
secretGenerator *SecretGenerator
serviceGenerator *SystemdServiceGenerator
serviceController *SystemdController
binaryInstaller *BinaryInstaller
branch string
skipRepoUpdate bool
NodePeerID string // Captured during Phase3 for later display
}
// ReadBranchPreference reads the stored branch preference from disk
func ReadBranchPreference(debrosDir string) string {
branchFile := filepath.Join(debrosDir, ".branch")
func ReadBranchPreference(oramaDir string) string {
branchFile := filepath.Join(oramaDir, ".branch")
data, err := os.ReadFile(branchFile)
if err != nil {
return "main" // Default to main if file doesn't exist
@ -54,9 +53,9 @@ func ReadBranchPreference(debrosDir string) string {
}
// SaveBranchPreference saves the branch preference to disk
func SaveBranchPreference(debrosDir, branch string) error {
branchFile := filepath.Join(debrosDir, ".branch")
if err := os.MkdirAll(debrosDir, 0755); err != nil {
func SaveBranchPreference(oramaDir, branch string) error {
branchFile := filepath.Join(oramaDir, ".branch")
if err := os.MkdirAll(oramaDir, 0755); err != nil {
return fmt.Errorf("failed to create debros directory: %w", err)
}
if err := os.WriteFile(branchFile, []byte(branch), 0644); err != nil {
@ -67,39 +66,37 @@ func SaveBranchPreference(debrosDir, branch string) error {
}
// NewProductionSetup creates a new production setup orchestrator
func NewProductionSetup(debrosHome string, logWriter io.Writer, forceReconfigure bool, branch string, skipRepoUpdate bool, skipResourceChecks bool, clusterSecretOverride string) *ProductionSetup {
debrosDir := debrosHome + "/.debros"
func NewProductionSetup(oramaHome string, logWriter io.Writer, forceReconfigure bool, branch string, skipRepoUpdate bool, skipResourceChecks bool) *ProductionSetup {
oramaDir := filepath.Join(oramaHome, ".orama")
arch, _ := (&ArchitectureDetector{}).Detect()
normalizedSecret := strings.TrimSpace(strings.ToLower(clusterSecretOverride))
// If branch is empty, try to read from stored preference, otherwise default to main
if branch == "" {
branch = ReadBranchPreference(debrosDir)
branch = ReadBranchPreference(oramaDir)
}
return &ProductionSetup{
debrosHome: debrosHome,
debrosDir: debrosDir,
logWriter: logWriter,
forceReconfigure: forceReconfigure,
arch: arch,
branch: branch,
skipRepoUpdate: skipRepoUpdate,
skipResourceChecks: skipResourceChecks,
clusterSecretOverride: normalizedSecret,
privChecker: &PrivilegeChecker{},
osDetector: &OSDetector{},
archDetector: &ArchitectureDetector{},
resourceChecker: NewResourceChecker(),
portChecker: NewPortChecker(),
fsProvisioner: NewFilesystemProvisioner(debrosHome),
userProvisioner: NewUserProvisioner("debros", debrosHome, "/bin/bash"),
stateDetector: NewStateDetector(debrosDir),
configGenerator: NewConfigGenerator(debrosDir),
secretGenerator: NewSecretGenerator(debrosDir, normalizedSecret),
serviceGenerator: NewSystemdServiceGenerator(debrosHome, debrosDir),
serviceController: NewSystemdController(),
binaryInstaller: NewBinaryInstaller(arch, logWriter),
oramaHome: oramaHome,
oramaDir: oramaDir,
logWriter: logWriter,
forceReconfigure: forceReconfigure,
arch: arch,
branch: branch,
skipRepoUpdate: skipRepoUpdate,
skipResourceChecks: skipResourceChecks,
privChecker: &PrivilegeChecker{},
osDetector: &OSDetector{},
archDetector: &ArchitectureDetector{},
resourceChecker: NewResourceChecker(),
portChecker: NewPortChecker(),
fsProvisioner: NewFilesystemProvisioner(oramaHome),
userProvisioner: NewUserProvisioner("debros", oramaHome, "/bin/bash"),
stateDetector: NewStateDetector(oramaDir),
configGenerator: NewConfigGenerator(oramaDir),
secretGenerator: NewSecretGenerator(oramaDir),
serviceGenerator: NewSystemdServiceGenerator(oramaHome, oramaDir),
serviceController: NewSystemdController(),
binaryInstaller: NewBinaryInstaller(arch, logWriter),
}
}
@ -168,7 +165,7 @@ func (ps *ProductionSetup) Phase1CheckPrerequisites() error {
if ps.skipResourceChecks {
ps.logf(" ⚠️ Skipping system resource checks (disk, RAM, CPU) due to --ignore-resource-checks flag")
} else {
if err := ps.resourceChecker.CheckDiskSpace(ps.debrosHome); err != nil {
if err := ps.resourceChecker.CheckDiskSpace(ps.oramaHome); err != nil {
ps.logf(" ❌ %v", err)
return err
}
@ -214,8 +211,8 @@ func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
}
}
// Create directory structure (base directories only - node-specific dirs created in Phase2c)
if err := ps.fsProvisioner.EnsureDirectoryStructure(""); err != nil {
// Create directory structure (unified structure)
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
return fmt.Errorf("failed to create directory structure: %w", err)
}
ps.logf(" ✓ Directory structure created")
@ -266,7 +263,7 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
}
// Install DeBros binaries
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.debrosHome, ps.skipRepoUpdate); err != nil {
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.oramaHome, ps.skipRepoUpdate); err != nil {
return fmt.Errorf("failed to install DeBros binaries: %w", err)
}
@ -275,21 +272,23 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
}
// Phase2cInitializeServices initializes service repositories and configurations
func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string, bootstrapPeers []string, vpsIP string) error {
// ipfsPeer can be nil for the first node, or contain peer info for joining nodes
// ipfsClusterPeer can be nil for the first node, or contain IPFS Cluster peer info for joining nodes
func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vpsIP string, ipfsPeer *IPFSPeerInfo, ipfsClusterPeer *IPFSClusterPeerInfo) error {
ps.logf("Phase 2c: Initializing services...")
// Ensure node-specific directories exist
if err := ps.fsProvisioner.EnsureDirectoryStructure(nodeType); err != nil {
return fmt.Errorf("failed to create node-specific directories: %w", err)
// Ensure directories exist (unified structure)
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
return fmt.Errorf("failed to create directories: %w", err)
}
// Build paths with nodeType awareness to match systemd unit definitions
dataDir := filepath.Join(ps.debrosDir, "data", nodeType)
// Build paths - unified data directory (all nodes equal)
dataDir := filepath.Join(ps.oramaDir, "data")
// Initialize IPFS repo with correct path structure
// Use port 4501 for API (to avoid conflict with RQLite on 5001), 8080 for gateway (standard), 4101 for swarm (to avoid conflict with LibP2P on 4001)
ipfsRepoPath := filepath.Join(dataDir, "ipfs", "repo")
if err := ps.binaryInstaller.InitializeIPFSRepo(nodeType, ipfsRepoPath, filepath.Join(ps.debrosDir, "secrets", "swarm.key"), 4501, 8080, 4101); err != nil {
if err := ps.binaryInstaller.InitializeIPFSRepo(ipfsRepoPath, filepath.Join(ps.oramaDir, "secrets", "swarm.key"), 4501, 8080, 4101, ipfsPeer); err != nil {
return fmt.Errorf("failed to initialize IPFS repo: %w", err)
}
@ -300,39 +299,38 @@ func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string, bootstrapP
return fmt.Errorf("failed to get cluster secret: %w", err)
}
// Get bootstrap cluster peer addresses for non-bootstrap nodes
var bootstrapClusterPeers []string
if nodeType != "bootstrap" && len(bootstrapPeers) > 0 {
// Try to read bootstrap cluster peer ID and construct multiaddress
bootstrapClusterPath := filepath.Join(ps.debrosDir, "data", "bootstrap", "ipfs-cluster")
// Infer bootstrap IP from bootstrap peers
bootstrapIP := inferBootstrapIP(bootstrapPeers, vpsIP)
if bootstrapIP != "" {
// Check if bootstrap cluster identity exists
if _, err := os.Stat(filepath.Join(bootstrapClusterPath, "identity.json")); err == nil {
// Bootstrap cluster is initialized, get its multiaddress
if clusterMultiaddr, err := ps.binaryInstaller.GetClusterPeerMultiaddr(bootstrapClusterPath, bootstrapIP); err == nil {
bootstrapClusterPeers = []string{clusterMultiaddr}
ps.logf(" Configured IPFS Cluster to connect to bootstrap: %s", clusterMultiaddr)
} else {
ps.logf(" ⚠️ Could not read bootstrap cluster peer ID: %v", err)
ps.logf(" ⚠️ IPFS Cluster will rely on mDNS discovery (may not work across internet)")
// Get cluster peer addresses from IPFS Cluster peer info if available
var clusterPeers []string
if ipfsClusterPeer != nil && ipfsClusterPeer.PeerID != "" {
// Construct cluster peer multiaddress using the discovered peer ID
// Format: /ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>
peerIP := inferPeerIP(peerAddresses, vpsIP)
if peerIP != "" {
// Construct the bootstrap multiaddress for IPFS Cluster
// Note: IPFS Cluster listens on port 9098 for cluster communication
clusterBootstrapAddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", peerIP, ipfsClusterPeer.PeerID)
clusterPeers = []string{clusterBootstrapAddr}
ps.logf(" IPFS Cluster will connect to peer: %s", clusterBootstrapAddr)
} else if len(ipfsClusterPeer.Addrs) > 0 {
// Fallback: use the addresses from discovery (if they include peer ID)
for _, addr := range ipfsClusterPeer.Addrs {
if strings.Contains(addr, ipfsClusterPeer.PeerID) {
clusterPeers = append(clusterPeers, addr)
}
} else {
ps.logf(" Bootstrap cluster not yet initialized, peer_addresses will be empty")
ps.logf(" IPFS Cluster will rely on mDNS discovery (may not work across internet)")
}
if len(clusterPeers) > 0 {
ps.logf(" IPFS Cluster will connect to discovered peers: %v", clusterPeers)
}
}
}
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(nodeType, clusterPath, clusterSecret, 4501, bootstrapClusterPeers); err != nil {
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(clusterPath, clusterSecret, 4501, clusterPeers); err != nil {
return fmt.Errorf("failed to initialize IPFS Cluster: %w", err)
}
// Initialize RQLite data directory
rqliteDataDir := filepath.Join(dataDir, "rqlite")
if err := ps.binaryInstaller.InitializeRQLiteDataDir(nodeType, rqliteDataDir); err != nil {
if err := ps.binaryInstaller.InitializeRQLiteDataDir(rqliteDataDir); err != nil {
ps.logf(" ⚠️ RQLite initialization warning: %v", err)
}
@ -347,7 +345,7 @@ func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string, bootstrapP
}
// Phase3GenerateSecrets generates shared secrets and keys
func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
func (ps *ProductionSetup) Phase3GenerateSecrets() error {
ps.logf("Phase 3: Generating secrets...")
// Cluster secret
@ -362,13 +360,8 @@ func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
}
ps.logf(" ✓ IPFS swarm key ensured")
// Node identity
nodeType := "node"
if isBootstrap {
nodeType = "bootstrap"
}
peerID, err := ps.secretGenerator.EnsureNodeIdentity(nodeType)
// Node identity (unified architecture)
peerID, err := ps.secretGenerator.EnsureNodeIdentity()
if err != nil {
return fmt.Errorf("failed to ensure node identity: %w", err)
}
@ -380,7 +373,7 @@ func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
}
// Phase4GenerateConfigs generates node, gateway, and service configs
func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeers []string, vpsIP string, enableHTTPS bool, domain string, bootstrapJoin string) error {
func (ps *ProductionSetup) Phase4GenerateConfigs(peerAddresses []string, vpsIP string, enableHTTPS bool, domain string, joinAddress string) error {
if ps.IsUpdate() {
ps.logf("Phase 4: Updating configurations...")
ps.logf(" (Existing configs will be updated to latest format)")
@ -388,77 +381,38 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeer
ps.logf("Phase 4: Generating configurations...")
}
// Node config
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(isBootstrap, bootstrapPeers, vpsIP, bootstrapJoin)
// Node config (unified architecture)
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(peerAddresses, vpsIP, joinAddress, domain, enableHTTPS)
if err != nil {
return fmt.Errorf("failed to generate node config: %w", err)
}
var configFile string
if isBootstrap {
configFile = "bootstrap.yaml"
} else {
configFile = "node.yaml"
}
configFile := "node.yaml"
if err := ps.secretGenerator.SaveConfig(configFile, nodeConfig); err != nil {
return fmt.Errorf("failed to save node config: %w", err)
}
ps.logf(" ✓ Node config generated: %s", configFile)
// Determine Olric servers for gateway config
// Olric will bind to 0.0.0.0 (all interfaces) but gateway needs specific addresses
var olricServers []string
// Gateway configuration is now embedded in each node's config
// No separate gateway.yaml needed - each node runs its own embedded gateway
if isBootstrap {
// Bootstrap node: gateway should connect to vpsIP if provided, otherwise localhost
if vpsIP != "" {
olricServers = []string{net.JoinHostPort(vpsIP, "3320")}
} else {
olricServers = []string{"127.0.0.1:3320"}
}
} else {
// Non-bootstrap node: include bootstrap server and local server
olricServers = []string{"127.0.0.1:3320"} // Default to localhost for single-node
if len(bootstrapPeers) > 0 {
// Try to infer Olric servers from bootstrap peers
bootstrapIP := inferBootstrapIP(bootstrapPeers, vpsIP)
if bootstrapIP != "" {
// Add bootstrap Olric server (use net.JoinHostPort for IPv6 support)
olricServers = []string{net.JoinHostPort(bootstrapIP, "3320")}
// Add local Olric server too
if vpsIP != "" {
olricServers = append(olricServers, net.JoinHostPort(vpsIP, "3320"))
} else {
olricServers = append(olricServers, "127.0.0.1:3320")
}
}
}
}
gatewayConfig, err := ps.configGenerator.GenerateGatewayConfig(bootstrapPeers, enableHTTPS, domain, olricServers)
if err != nil {
return fmt.Errorf("failed to generate gateway config: %w", err)
}
if err := ps.secretGenerator.SaveConfig("gateway.yaml", gatewayConfig); err != nil {
return fmt.Errorf("failed to save gateway config: %w", err)
}
ps.logf(" ✓ Gateway config generated")
// Olric config - bind to vpsIP if provided, otherwise all interfaces
// Gateway will connect using the specific address from olricServers list above
olricBindAddr := vpsIP
if olricBindAddr == "" {
olricBindAddr = "0.0.0.0"
}
olricConfig, err := ps.configGenerator.GenerateOlricConfig(olricBindAddr, 3320, 3322)
// Olric config:
// - HTTP API binds to localhost for security (accessed via gateway)
// - Memberlist binds to 0.0.0.0 for cluster communication across nodes
// - Environment "lan" for production multi-node clustering
olricConfig, err := ps.configGenerator.GenerateOlricConfig(
"127.0.0.1", // HTTP API on localhost
3320,
"0.0.0.0", // Memberlist on all interfaces for clustering
3322,
"lan", // Production environment
)
if err != nil {
return fmt.Errorf("failed to generate olric config: %w", err)
}
// Create olric config directory
olricConfigDir := ps.debrosDir + "/configs/olric"
olricConfigDir := ps.oramaDir + "/configs/olric"
if err := os.MkdirAll(olricConfigDir, 0755); err != nil {
return fmt.Errorf("failed to create olric config directory: %w", err)
}
@ -474,7 +428,8 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeer
}
// Phase5CreateSystemdServices creates and enables systemd units
func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP string) error {
// enableHTTPS determines the RQLite Raft port (7002 when SNI is enabled, 7001 otherwise)
func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
ps.logf("Phase 5: Creating systemd services...")
// Validate all required binaries are available before creating services
@ -486,30 +441,26 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
if err != nil {
return fmt.Errorf("ipfs-cluster-service binary not available: %w", err)
}
// Note: rqlited binary is not needed as a separate service - node manages RQLite internally
olricBinary, err := ps.binaryInstaller.ResolveBinaryPath("olric-server", "/usr/local/bin/olric-server", "/usr/bin/olric-server")
if err != nil {
return fmt.Errorf("olric-server binary not available: %w", err)
}
// IPFS service
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(nodeType, ipfsBinary)
unitName := fmt.Sprintf("debros-ipfs-%s.service", nodeType)
if err := ps.serviceController.WriteServiceUnit(unitName, ipfsUnit); err != nil {
// IPFS service (unified - no bootstrap/node distinction)
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(ipfsBinary)
if err := ps.serviceController.WriteServiceUnit("debros-ipfs.service", ipfsUnit); err != nil {
return fmt.Errorf("failed to write IPFS service: %w", err)
}
ps.logf(" ✓ IPFS service created: %s", unitName)
ps.logf(" ✓ IPFS service created: debros-ipfs.service")
// IPFS Cluster service
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(nodeType, clusterBinary)
clusterUnitName := fmt.Sprintf("debros-ipfs-cluster-%s.service", nodeType)
if err := ps.serviceController.WriteServiceUnit(clusterUnitName, clusterUnit); err != nil {
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(clusterBinary)
if err := ps.serviceController.WriteServiceUnit("debros-ipfs-cluster.service", clusterUnit); err != nil {
return fmt.Errorf("failed to write IPFS Cluster service: %w", err)
}
ps.logf(" ✓ IPFS Cluster service created: %s", clusterUnitName)
ps.logf(" ✓ IPFS Cluster service created: debros-ipfs-cluster.service")
// Note: RQLite is managed internally by the node process, not as a separate systemd service
ps.logf(" RQLite will be managed by the node process")
// RQLite is managed internally by each node - no separate systemd service needed
// Olric service
olricUnit := ps.serviceGenerator.GenerateOlricService(olricBinary)
@ -518,20 +469,12 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
}
ps.logf(" ✓ Olric service created")
// Node service
nodeUnit := ps.serviceGenerator.GenerateNodeService(nodeType)
nodeUnitName := fmt.Sprintf("debros-node-%s.service", nodeType)
if err := ps.serviceController.WriteServiceUnit(nodeUnitName, nodeUnit); err != nil {
// Node service (unified - includes embedded gateway)
nodeUnit := ps.serviceGenerator.GenerateNodeService()
if err := ps.serviceController.WriteServiceUnit("debros-node.service", nodeUnit); err != nil {
return fmt.Errorf("failed to write Node service: %w", err)
}
ps.logf(" ✓ Node service created: %s", nodeUnitName)
// Gateway service (optional, only on specific nodes)
gatewayUnit := ps.serviceGenerator.GenerateGatewayService(nodeType)
if err := ps.serviceController.WriteServiceUnit("debros-gateway.service", gatewayUnit); err != nil {
return fmt.Errorf("failed to write Gateway service: %w", err)
}
ps.logf(" ✓ Gateway service created")
ps.logf(" ✓ Node service created: debros-node.service (with embedded gateway)")
// Anyone Client service (SOCKS5 proxy)
anyoneUnit := ps.serviceGenerator.GenerateAnyoneClientService()
@ -546,8 +489,10 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
}
ps.logf(" ✓ Systemd daemon reloaded")
// Enable services (RQLite is managed by node, not as separate service)
services := []string{unitName, clusterUnitName, "debros-olric.service", nodeUnitName, "debros-gateway.service", "debros-anyone-client.service"}
// Enable services (unified names - no bootstrap/node distinction)
// Note: debros-gateway.service is no longer needed - each node has an embedded gateway
// Note: debros-rqlite.service is NOT created - RQLite is managed by each node internally
services := []string{"debros-ipfs.service", "debros-ipfs-cluster.service", "debros-olric.service", "debros-node.service", "debros-anyone-client.service"}
for _, svc := range services {
if err := ps.serviceController.EnableService(svc); err != nil {
ps.logf(" ⚠️ Failed to enable %s: %v", svc, err)
@ -559,8 +504,8 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
// Start services in dependency order
ps.logf(" Starting services...")
// Start infrastructure first (IPFS, Olric, Anyone Client) - RQLite is managed by node
infraServices := []string{unitName, "debros-olric.service"}
// Start infrastructure first (IPFS, Olric, Anyone Client) - RQLite is managed internally by each node
infraServices := []string{"debros-ipfs.service", "debros-olric.service"}
// Check if port 9050 is already in use (e.g., another anyone-client or similar service)
if ps.portChecker.IsPortInUse(9050) {
@ -579,23 +524,20 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
}
// Wait a moment for infrastructure to stabilize
exec.Command("sleep", "2").Run()
time.Sleep(2 * time.Second)
// Start IPFS Cluster
if err := ps.serviceController.StartService(clusterUnitName); err != nil {
ps.logf(" ⚠️ Failed to start %s: %v", clusterUnitName, err)
if err := ps.serviceController.StartService("debros-ipfs-cluster.service"); err != nil {
ps.logf(" ⚠️ Failed to start debros-ipfs-cluster.service: %v", err)
} else {
ps.logf(" - %s started", clusterUnitName)
ps.logf(" - debros-ipfs-cluster.service started")
}
// Start application services
appServices := []string{nodeUnitName, "debros-gateway.service"}
for _, svc := range appServices {
if err := ps.serviceController.StartService(svc); err != nil {
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
} else {
ps.logf(" - %s started", svc)
}
// Start node service (gateway is embedded in node, no separate service needed)
if err := ps.serviceController.StartService("debros-node.service"); err != nil {
ps.logf(" ⚠️ Failed to start debros-node.service: %v", err)
} else {
ps.logf(" - debros-node.service started (with embedded gateway)")
}
ps.logf(" ✓ All services started")
@ -609,19 +551,18 @@ func (ps *ProductionSetup) LogSetupComplete(peerID string) {
ps.logf(strings.Repeat("=", 70))
ps.logf("\nNode Peer ID: %s", peerID)
ps.logf("\nService Management:")
ps.logf(" systemctl status debros-ipfs-bootstrap")
ps.logf(" journalctl -u debros-node-bootstrap -f")
ps.logf(" tail -f %s/logs/node-bootstrap.log", ps.debrosDir)
ps.logf(" systemctl status debros-ipfs")
ps.logf(" journalctl -u debros-node -f")
ps.logf(" tail -f %s/logs/node.log", ps.oramaDir)
ps.logf("\nLog Files:")
ps.logf(" %s/logs/ipfs-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/ipfs-cluster-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/rqlite-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/olric.log", ps.debrosDir)
ps.logf(" %s/logs/node-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/gateway.log", ps.debrosDir)
ps.logf(" %s/logs/anyone-client.log", ps.debrosDir)
ps.logf(" %s/logs/ipfs.log", ps.oramaDir)
ps.logf(" %s/logs/ipfs-cluster.log", ps.oramaDir)
ps.logf(" %s/logs/olric.log", ps.oramaDir)
ps.logf(" %s/logs/node.log", ps.oramaDir)
ps.logf(" %s/logs/gateway.log", ps.oramaDir)
ps.logf(" %s/logs/anyone-client.log", ps.oramaDir)
ps.logf("\nStart All Services:")
ps.logf(" systemctl start debros-ipfs-bootstrap debros-ipfs-cluster-bootstrap debros-olric debros-anyone-client debros-node-bootstrap debros-gateway")
ps.logf(" systemctl start debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-client debros-node")
ps.logf("\nVerify Installation:")
ps.logf(" curl http://localhost:6001/health")
ps.logf(" curl http://localhost:5001/status")

View File

@ -10,42 +10,36 @@ import (
// FilesystemProvisioner manages directory creation and permissions
type FilesystemProvisioner struct {
debrosHome string
debrosDir string
oramaHome string
oramaDir string
logWriter interface{} // Can be io.Writer for logging
}
// NewFilesystemProvisioner creates a new provisioner
func NewFilesystemProvisioner(debrosHome string) *FilesystemProvisioner {
func NewFilesystemProvisioner(oramaHome string) *FilesystemProvisioner {
return &FilesystemProvisioner{
debrosHome: debrosHome,
debrosDir: filepath.Join(debrosHome, ".debros"),
oramaHome: oramaHome,
oramaDir: filepath.Join(oramaHome, ".orama"),
}
}
// EnsureDirectoryStructure creates all required directories
// nodeType can be "bootstrap", "node", or "" (empty string means create base directories only)
func (fp *FilesystemProvisioner) EnsureDirectoryStructure(nodeType string) error {
// Base directories that are always needed
// EnsureDirectoryStructure creates all required directories (unified structure)
func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
// All directories needed for unified node structure
dirs := []string{
fp.debrosDir,
filepath.Join(fp.debrosDir, "configs"),
filepath.Join(fp.debrosDir, "secrets"),
filepath.Join(fp.debrosDir, "data"),
filepath.Join(fp.debrosDir, "logs"),
filepath.Join(fp.debrosDir, "tls-cache"),
filepath.Join(fp.debrosDir, "backups"),
filepath.Join(fp.debrosHome, "bin"),
filepath.Join(fp.debrosHome, "src"),
}
// Only create directories for the requested node type
if nodeType == "bootstrap" || nodeType == "node" {
dirs = append(dirs,
filepath.Join(fp.debrosDir, "data", nodeType, "ipfs", "repo"),
filepath.Join(fp.debrosDir, "data", nodeType, "ipfs-cluster"),
filepath.Join(fp.debrosDir, "data", nodeType, "rqlite"),
)
fp.oramaDir,
filepath.Join(fp.oramaDir, "configs"),
filepath.Join(fp.oramaDir, "secrets"),
filepath.Join(fp.oramaDir, "data"),
filepath.Join(fp.oramaDir, "data", "ipfs", "repo"),
filepath.Join(fp.oramaDir, "data", "ipfs-cluster"),
filepath.Join(fp.oramaDir, "data", "rqlite"),
filepath.Join(fp.oramaDir, "logs"),
filepath.Join(fp.oramaDir, "tls-cache"),
filepath.Join(fp.oramaDir, "backups"),
filepath.Join(fp.oramaHome, "bin"),
filepath.Join(fp.oramaHome, "src"),
filepath.Join(fp.oramaHome, ".npm"),
}
for _, dir := range dirs {
@ -54,27 +48,24 @@ func (fp *FilesystemProvisioner) EnsureDirectoryStructure(nodeType string) error
}
}
// Remove any stray cluster-secret file from root .orama directory
// The correct location is .orama/secrets/cluster-secret
strayClusterSecret := filepath.Join(fp.oramaDir, "cluster-secret")
if _, err := os.Stat(strayClusterSecret); err == nil {
if err := os.Remove(strayClusterSecret); err != nil {
return fmt.Errorf("failed to remove stray cluster-secret file: %w", err)
}
}
// Create log files with correct permissions so systemd can write to them
// Only create logs for the specific nodeType being installed
logsDir := filepath.Join(fp.debrosDir, "logs")
logsDir := filepath.Join(fp.oramaDir, "logs")
logFiles := []string{
"olric.log",
"gateway.log",
}
// Add node-type-specific log files only if nodeType is specified
if nodeType == "bootstrap" {
logFiles = append(logFiles,
"ipfs-bootstrap.log",
"ipfs-cluster-bootstrap.log",
"node-bootstrap.log",
)
} else if nodeType == "node" {
logFiles = append(logFiles,
"ipfs-node.log",
"ipfs-cluster-node.log",
"node-node.log",
)
"ipfs.log",
"ipfs-cluster.log",
"node.log",
"anyone-client.log",
}
for _, logFile := range logFiles {
@ -90,27 +81,34 @@ func (fp *FilesystemProvisioner) EnsureDirectoryStructure(nodeType string) error
return nil
}
// FixOwnership changes ownership of .debros directory to debros user
// FixOwnership changes ownership of .orama directory to debros user
func (fp *FilesystemProvisioner) FixOwnership() error {
// Fix entire .debros directory recursively (includes all data, configs, logs, etc.)
cmd := exec.Command("chown", "-R", "debros:debros", fp.debrosDir)
// Fix entire .orama directory recursively (includes all data, configs, logs, etc.)
cmd := exec.Command("chown", "-R", "debros:debros", fp.oramaDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.debrosDir, err, string(output))
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaDir, err, string(output))
}
// Also fix home directory ownership
cmd = exec.Command("chown", "debros:debros", fp.debrosHome)
cmd = exec.Command("chown", "debros:debros", fp.oramaHome)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.debrosHome, err, string(output))
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaHome, err, string(output))
}
// Fix bin directory
binDir := filepath.Join(fp.debrosHome, "bin")
binDir := filepath.Join(fp.oramaHome, "bin")
cmd = exec.Command("chown", "-R", "debros:debros", binDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", binDir, err, string(output))
}
// Fix npm cache directory
npmDir := filepath.Join(fp.oramaHome, ".npm")
cmd = exec.Command("chown", "-R", "debros:debros", npmDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", npmDir, err, string(output))
}
return nil
}
@ -186,20 +184,20 @@ func (up *UserProvisioner) SetupSudoersAccess(invokerUser string) error {
// StateDetector checks for existing production state
type StateDetector struct {
debrosDir string
oramaDir string
}
// NewStateDetector creates a state detector
func NewStateDetector(debrosDir string) *StateDetector {
func NewStateDetector(oramaDir string) *StateDetector {
return &StateDetector{
debrosDir: debrosDir,
oramaDir: oramaDir,
}
}
// IsConfigured checks if basic configs exist
func (sd *StateDetector) IsConfigured() bool {
nodeConfig := filepath.Join(sd.debrosDir, "configs", "node.yaml")
gatewayConfig := filepath.Join(sd.debrosDir, "configs", "gateway.yaml")
nodeConfig := filepath.Join(sd.oramaDir, "configs", "node.yaml")
gatewayConfig := filepath.Join(sd.oramaDir, "configs", "gateway.yaml")
_, err1 := os.Stat(nodeConfig)
_, err2 := os.Stat(gatewayConfig)
return err1 == nil || err2 == nil
@ -207,24 +205,36 @@ func (sd *StateDetector) IsConfigured() bool {
// HasSecrets checks if cluster secret and swarm key exist
func (sd *StateDetector) HasSecrets() bool {
clusterSecret := filepath.Join(sd.debrosDir, "secrets", "cluster-secret")
swarmKey := filepath.Join(sd.debrosDir, "secrets", "swarm.key")
clusterSecret := filepath.Join(sd.oramaDir, "secrets", "cluster-secret")
swarmKey := filepath.Join(sd.oramaDir, "secrets", "swarm.key")
_, err1 := os.Stat(clusterSecret)
_, err2 := os.Stat(swarmKey)
return err1 == nil && err2 == nil
}
// HasIPFSData checks if IPFS repo is initialized
// HasIPFSData checks if IPFS repo is initialized (unified path)
func (sd *StateDetector) HasIPFSData() bool {
ipfsRepoPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "ipfs", "repo", "config")
_, err := os.Stat(ipfsRepoPath)
// Check unified path first
ipfsRepoPath := filepath.Join(sd.oramaDir, "data", "ipfs", "repo", "config")
if _, err := os.Stat(ipfsRepoPath); err == nil {
return true
}
// Fallback: check legacy bootstrap path for migration
legacyPath := filepath.Join(sd.oramaDir, "data", "bootstrap", "ipfs", "repo", "config")
_, err := os.Stat(legacyPath)
return err == nil
}
// HasRQLiteData checks if RQLite data exists
// HasRQLiteData checks if RQLite data exists (unified path)
func (sd *StateDetector) HasRQLiteData() bool {
rqliteDataPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "rqlite")
info, err := os.Stat(rqliteDataPath)
// Check unified path first
rqliteDataPath := filepath.Join(sd.oramaDir, "data", "rqlite")
if info, err := os.Stat(rqliteDataPath); err == nil && info.IsDir() {
return true
}
// Fallback: check legacy bootstrap path for migration
legacyPath := filepath.Join(sd.oramaDir, "data", "bootstrap", "rqlite")
info, err := os.Stat(legacyPath)
return err == nil && info.IsDir()
}

View File

@ -10,31 +10,25 @@ import (
// SystemdServiceGenerator generates systemd unit files
type SystemdServiceGenerator struct {
debrosHome string
debrosDir string
oramaHome string
oramaDir string
}
// NewSystemdServiceGenerator creates a new service generator
func NewSystemdServiceGenerator(debrosHome, debrosDir string) *SystemdServiceGenerator {
func NewSystemdServiceGenerator(oramaHome, oramaDir string) *SystemdServiceGenerator {
return &SystemdServiceGenerator{
debrosHome: debrosHome,
debrosDir: debrosDir,
oramaHome: oramaHome,
oramaDir: oramaDir,
}
}
// GenerateIPFSService generates the IPFS daemon systemd unit
func (ssg *SystemdServiceGenerator) GenerateIPFSService(nodeType string, ipfsBinary string) string {
var ipfsRepoPath string
if nodeType == "bootstrap" {
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs", "repo")
} else {
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs", "repo")
}
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("ipfs-%s.log", nodeType))
func (ssg *SystemdServiceGenerator) GenerateIPFSService(ipfsBinary string) string {
ipfsRepoPath := filepath.Join(ssg.oramaDir, "data", "ipfs", "repo")
logFile := filepath.Join(ssg.oramaDir, "logs", "ipfs.log")
return fmt.Sprintf(`[Unit]
Description=IPFS Daemon (%[1]s)
Description=IPFS Daemon
After=network-online.target
Wants=network-online.target
@ -42,83 +36,95 @@ Wants=network-online.target
Type=simple
User=debros
Group=debros
Environment=HOME=%[2]s
Environment=IPFS_PATH=%[3]s
ExecStartPre=/bin/bash -c 'if [ -f %[4]s/secrets/swarm.key ] && [ ! -f %[3]s/swarm.key ]; then cp %[4]s/secrets/swarm.key %[3]s/swarm.key && chmod 600 %[3]s/swarm.key; fi'
ExecStart=%[6]s daemon --enable-pubsub-experiment --repo-dir=%[3]s
Environment=HOME=%[1]s
Environment=IPFS_PATH=%[2]s
ExecStartPre=/bin/bash -c 'if [ -f %[3]s/secrets/swarm.key ] && [ ! -f %[2]s/swarm.key ]; then cp %[3]s/secrets/swarm.key %[2]s/swarm.key && chmod 600 %[2]s/swarm.key; fi'
ExecStart=%[5]s daemon --enable-pubsub-experiment --repo-dir=%[2]s
Restart=always
RestartSec=5
StandardOutput=file:%[5]s
StandardError=file:%[5]s
SyslogIdentifier=ipfs-%[1]s
StandardOutput=append:%[4]s
StandardError=append:%[4]s
SyslogIdentifier=debros-ipfs
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[4]s
ProtectHome=read-only
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
ReadWritePaths=%[3]s
[Install]
WantedBy=multi-user.target
`, nodeType, ssg.debrosHome, ipfsRepoPath, ssg.debrosDir, logFile, ipfsBinary)
`, ssg.oramaHome, ipfsRepoPath, ssg.oramaDir, logFile, ipfsBinary)
}
// GenerateIPFSClusterService generates the IPFS Cluster systemd unit
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(nodeType string, clusterBinary string) string {
var clusterPath string
if nodeType == "bootstrap" {
clusterPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs-cluster")
} else {
clusterPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs-cluster")
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(clusterBinary string) string {
clusterPath := filepath.Join(ssg.oramaDir, "data", "ipfs-cluster")
logFile := filepath.Join(ssg.oramaDir, "logs", "ipfs-cluster.log")
// Read cluster secret from file to pass to daemon
clusterSecretPath := filepath.Join(ssg.oramaDir, "secrets", "cluster-secret")
clusterSecret := ""
if data, err := os.ReadFile(clusterSecretPath); err == nil {
clusterSecret = strings.TrimSpace(string(data))
}
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", nodeType))
return fmt.Sprintf(`[Unit]
Description=IPFS Cluster Service (%[1]s)
After=debros-ipfs-%[1]s.service
Wants=debros-ipfs-%[1]s.service
Requires=debros-ipfs-%[1]s.service
Description=IPFS Cluster Service
After=debros-ipfs.service
Wants=debros-ipfs.service
Requires=debros-ipfs.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%[2]s
Environment=HOME=%[2]s
Environment=IPFS_CLUSTER_PATH=%[3]s
ExecStart=%[6]s daemon
WorkingDirectory=%[1]s
Environment=HOME=%[1]s
Environment=IPFS_CLUSTER_PATH=%[2]s
Environment=CLUSTER_SECRET=%[5]s
ExecStartPre=/bin/bash -c 'mkdir -p %[2]s && chmod 700 %[2]s'
ExecStart=%[4]s daemon
Restart=always
RestartSec=5
StandardOutput=file:%[4]s
StandardError=file:%[4]s
SyslogIdentifier=ipfs-cluster-%[1]s
StandardOutput=append:%[3]s
StandardError=append:%[3]s
SyslogIdentifier=debros-ipfs-cluster
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[5]s
ProtectHome=read-only
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
ReadWritePaths=%[1]s
[Install]
WantedBy=multi-user.target
`, nodeType, ssg.debrosHome, clusterPath, logFile, ssg.debrosDir, clusterBinary)
`, ssg.oramaHome, clusterPath, logFile, clusterBinary, clusterSecret)
}
// GenerateRQLiteService generates the RQLite systemd unit
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(nodeType string, rqliteBinary string, httpPort, raftPort int, joinAddr string, advertiseIP string) string {
var dataDir string
if nodeType == "bootstrap" {
dataDir = filepath.Join(ssg.debrosDir, "data", "bootstrap", "rqlite")
} else {
dataDir = filepath.Join(ssg.debrosDir, "data", "node", "rqlite")
}
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(rqliteBinary string, httpPort, raftPort int, joinAddr string, advertiseIP string) string {
dataDir := filepath.Join(ssg.oramaDir, "data", "rqlite")
logFile := filepath.Join(ssg.oramaDir, "logs", "rqlite.log")
// Use public IP for advertise if provided, otherwise default to localhost
if advertiseIP == "" {
advertiseIP = "127.0.0.1"
}
// Bind RQLite to localhost only - external access via SNI gateway
args := fmt.Sprintf(
`-http-addr 0.0.0.0:%d -http-adv-addr %s:%d -raft-adv-addr %s:%d -raft-addr 0.0.0.0:%d`,
`-http-addr 127.0.0.1:%d -http-adv-addr %s:%d -raft-adv-addr %s:%d -raft-addr 127.0.0.1:%d`,
httpPort, advertiseIP, httpPort, advertiseIP, raftPort, raftPort,
)
@ -128,10 +134,8 @@ func (ssg *SystemdServiceGenerator) GenerateRQLiteService(nodeType string, rqlit
args += fmt.Sprintf(` %s`, dataDir)
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("rqlite-%s.log", nodeType))
return fmt.Sprintf(`[Unit]
Description=RQLite Database (%[1]s)
Description=RQLite Database
After=network-online.target
Wants=network-online.target
@ -139,28 +143,34 @@ Wants=network-online.target
Type=simple
User=debros
Group=debros
Environment=HOME=%[2]s
ExecStart=%[6]s %[3]s
Environment=HOME=%[1]s
ExecStart=%[5]s %[2]s
Restart=always
RestartSec=5
StandardOutput=file:%[4]s
StandardError=file:%[4]s
SyslogIdentifier=rqlite-%[1]s
StandardOutput=append:%[3]s
StandardError=append:%[3]s
SyslogIdentifier=debros-rqlite
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[5]s
ProtectHome=read-only
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
ReadWritePaths=%[4]s
[Install]
WantedBy=multi-user.target
`, nodeType, ssg.debrosHome, args, logFile, ssg.debrosDir, rqliteBinary)
`, ssg.oramaHome, args, logFile, dataDir, rqliteBinary)
}
// GenerateOlricService generates the Olric systemd unit
func (ssg *SystemdServiceGenerator) GenerateOlricService(olricBinary string) string {
olricConfigPath := filepath.Join(ssg.debrosDir, "configs", "olric", "config.yaml")
logFile := filepath.Join(ssg.debrosDir, "logs", "olric.log")
olricConfigPath := filepath.Join(ssg.oramaDir, "configs", "olric", "config.yaml")
logFile := filepath.Join(ssg.oramaDir, "logs", "olric.log")
return fmt.Sprintf(`[Unit]
Description=Olric Cache Server
@ -176,99 +186,113 @@ Environment=OLRIC_SERVER_CONFIG=%[2]s
ExecStart=%[5]s
Restart=always
RestartSec=5
StandardOutput=file:%[3]s
StandardError=file:%[3]s
StandardOutput=append:%[3]s
StandardError=append:%[3]s
SyslogIdentifier=olric
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ProtectHome=read-only
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
ReadWritePaths=%[4]s
[Install]
WantedBy=multi-user.target
`, ssg.debrosHome, olricConfigPath, logFile, ssg.debrosDir, olricBinary)
`, ssg.oramaHome, olricConfigPath, logFile, ssg.oramaDir, olricBinary)
}
// GenerateNodeService generates the DeBros Node systemd unit
func (ssg *SystemdServiceGenerator) GenerateNodeService(nodeType string) string {
var configFile string
if nodeType == "bootstrap" {
configFile = "bootstrap.yaml"
} else {
configFile = "node.yaml"
}
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("node-%s.log", nodeType))
func (ssg *SystemdServiceGenerator) GenerateNodeService() string {
configFile := "node.yaml"
logFile := filepath.Join(ssg.oramaDir, "logs", "node.log")
// Note: systemd StandardOutput/StandardError paths should not contain substitution variables
// Use absolute paths directly as they will be resolved by systemd at runtime
return fmt.Sprintf(`[Unit]
Description=DeBros Network Node (%s)
After=debros-ipfs-cluster-%s.service
Wants=debros-ipfs-cluster-%s.service
Requires=debros-ipfs-cluster-%s.service
Description=DeBros Network Node
After=debros-ipfs-cluster.service debros-olric.service
Wants=debros-ipfs-cluster.service debros-olric.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%s
Environment=HOME=%s
ExecStart=%s/bin/node --config %s/configs/%s
WorkingDirectory=%[1]s
Environment=HOME=%[1]s
ExecStart=%[1]s/bin/orama-node --config %[2]s/configs/%[3]s
Restart=always
RestartSec=5
StandardOutput=file:%s
StandardError=file:%s
SyslogIdentifier=debros-node-%s
StandardOutput=append:%[4]s
StandardError=append:%[4]s
SyslogIdentifier=debros-node
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%s
ProtectHome=read-only
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
ReadWritePaths=%[2]s
[Install]
WantedBy=multi-user.target
`, nodeType, nodeType, nodeType, nodeType, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, configFile, logFile, logFile, nodeType, ssg.debrosDir)
`, ssg.oramaHome, ssg.oramaDir, configFile, logFile)
}
// GenerateGatewayService generates the DeBros Gateway systemd unit
func (ssg *SystemdServiceGenerator) GenerateGatewayService(nodeType string) string {
nodeService := fmt.Sprintf("debros-node-%s.service", nodeType)
olricService := "debros-olric.service"
logFile := filepath.Join(ssg.debrosDir, "logs", "gateway.log")
func (ssg *SystemdServiceGenerator) GenerateGatewayService() string {
logFile := filepath.Join(ssg.oramaDir, "logs", "gateway.log")
return fmt.Sprintf(`[Unit]
Description=DeBros Gateway
After=%s %s
Wants=%s %s
After=debros-node.service debros-olric.service
Wants=debros-node.service debros-olric.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%s
Environment=HOME=%s
ExecStart=%s/bin/gateway --config %s/data/gateway.yaml
WorkingDirectory=%[1]s
Environment=HOME=%[1]s
ExecStart=%[1]s/bin/gateway --config %[2]s/data/gateway.yaml
Restart=always
RestartSec=5
StandardOutput=file:%s
StandardError=file:%s
StandardOutput=append:%[3]s
StandardError=append:%[3]s
SyslogIdentifier=debros-gateway
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=yes
# Note: NoNewPrivileges is omitted because it conflicts with AmbientCapabilities
# The service needs CAP_NET_BIND_SERVICE to bind to privileged ports (80, 443)
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%s
ProtectHome=read-only
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
ReadWritePaths=%[2]s
[Install]
WantedBy=multi-user.target
`, nodeService, olricService, nodeService, olricService, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, logFile, logFile, ssg.debrosDir)
`, ssg.oramaHome, ssg.oramaDir, logFile)
}
// GenerateAnyoneClientService generates the Anyone Client SOCKS5 proxy systemd unit
func (ssg *SystemdServiceGenerator) GenerateAnyoneClientService() string {
logFile := filepath.Join(ssg.debrosDir, "logs", "anyone-client.log")
logFile := filepath.Join(ssg.oramaDir, "logs", "anyone-client.log")
return fmt.Sprintf(`[Unit]
Description=Anyone Client SOCKS5 Proxy
@ -281,21 +305,28 @@ User=debros
Group=debros
Environment=HOME=%[1]s
Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/lib/node_modules/.bin
ExecStart=/usr/bin/npx --yes @anyone-protocol/anyone-client
WorkingDirectory=%[1]s
ExecStart=/usr/bin/npx anyone-client
Restart=always
RestartSec=5
StandardOutput=file:%[2]s
StandardError=file:%[2]s
StandardOutput=append:%[2]s
StandardError=append:%[2]s
SyslogIdentifier=anyone-client
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ProtectHome=no
ProtectKernelTunables=yes
ProtectKernelModules=yes
ProtectControlGroups=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
ReadWritePaths=%[3]s
[Install]
WantedBy=multi-user.target
`, ssg.debrosHome, logFile, ssg.debrosDir)
`, ssg.oramaHome, logFile, ssg.oramaDir)
}
// SystemdController manages systemd service operations

View File

@ -9,23 +9,20 @@ import (
func TestGenerateRQLiteService(t *testing.T) {
tests := []struct {
name string
nodeType string
joinAddr string
advertiseIP string
expectJoinInUnit bool
expectAdvertiseIP string
}{
{
name: "bootstrap with localhost advertise",
nodeType: "bootstrap",
name: "first node with localhost advertise",
joinAddr: "",
advertiseIP: "",
expectJoinInUnit: false,
expectAdvertiseIP: "127.0.0.1",
},
{
name: "bootstrap with public IP advertise",
nodeType: "bootstrap",
name: "first node with public IP advertise",
joinAddr: "",
advertiseIP: "10.0.0.1",
expectJoinInUnit: false,
@ -33,7 +30,6 @@ func TestGenerateRQLiteService(t *testing.T) {
},
{
name: "node joining cluster",
nodeType: "node",
joinAddr: "10.0.0.1:7001",
advertiseIP: "10.0.0.2",
expectJoinInUnit: true,
@ -41,7 +37,6 @@ func TestGenerateRQLiteService(t *testing.T) {
},
{
name: "node with localhost (should still include join)",
nodeType: "node",
joinAddr: "localhost:7001",
advertiseIP: "127.0.0.1",
expectJoinInUnit: true,
@ -52,11 +47,11 @@ func TestGenerateRQLiteService(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ssg := &SystemdServiceGenerator{
debrosHome: "/home/debros",
debrosDir: "/home/debros/.debros",
oramaHome: "/home/debros",
oramaDir: "/home/debros/.orama",
}
unit := ssg.GenerateRQLiteService(tt.nodeType, "/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP)
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP)
// Check advertise IP is present
expectedAdvertise := tt.expectAdvertiseIP + ":5001"
@ -86,21 +81,21 @@ func TestGenerateRQLiteService(t *testing.T) {
// TestGenerateRQLiteServiceArgs verifies the ExecStart command arguments
func TestGenerateRQLiteServiceArgs(t *testing.T) {
ssg := &SystemdServiceGenerator{
debrosHome: "/home/debros",
debrosDir: "/home/debros/.debros",
oramaHome: "/home/debros",
oramaDir: "/home/debros/.orama",
}
unit := ssg.GenerateRQLiteService("node", "/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2")
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2")
// Verify essential flags are present
if !strings.Contains(unit, "-http-addr 0.0.0.0:5001") {
t.Error("missing -http-addr 0.0.0.0:5001")
// Verify essential flags are present (localhost binding for security)
if !strings.Contains(unit, "-http-addr 127.0.0.1:5001") {
t.Error("missing -http-addr 127.0.0.1:5001")
}
if !strings.Contains(unit, "-http-adv-addr 10.0.0.2:5001") {
t.Error("missing -http-adv-addr 10.0.0.2:5001")
}
if !strings.Contains(unit, "-raft-addr 0.0.0.0:7001") {
t.Error("missing -raft-addr 0.0.0.0:7001")
if !strings.Contains(unit, "-raft-addr 127.0.0.1:7001") {
t.Error("missing -raft-addr 127.0.0.1:7001")
}
if !strings.Contains(unit, "-raft-adv-addr 10.0.0.2:7001") {
t.Error("missing -raft-adv-addr 10.0.0.2:7001")

View File

@ -1,43 +0,0 @@
node:
id: "{{.NodeID}}"
type: "bootstrap"
listen_addresses:
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
data_dir: "{{.DataDir}}"
max_connections: 50
database:
data_dir: "{{.DataDir}}/rqlite"
replication_factor: 3
shard_count: 16
max_database_size: 1073741824
backup_interval: "24h"
rqlite_port: {{.RQLiteHTTPPort}}
rqlite_raft_port: {{.RQLiteRaftPort}}
rqlite_join_address: "{{.RQLiteJoinAddress}}"
cluster_sync_interval: "30s"
peer_inactivity_limit: "24h"
min_cluster_size: 3
ipfs:
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
api_url: "http://localhost:{{.IPFSAPIPort}}"
timeout: "60s"
replication_factor: 3
enable_encryption: true
discovery:
bootstrap_peers:
{{range .BootstrapPeers}} - "{{.}}"
{{end}}
discovery_interval: "15s"
bootstrap_port: {{.P2PPort}}
http_adv_address: "{{.HTTPAdvAddress}}"
raft_adv_address: "{{.RaftAdvAddress}}"
node_namespace: "default"
security:
enable_tls: false
logging:
level: "info"
format: "console"

View File

@ -1,10 +1,10 @@
node:
id: "{{.NodeID}}"
type: "node"
listen_addresses:
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
data_dir: "{{.DataDir}}"
max_connections: 50
domain: "{{.Domain}}"
database:
data_dir: "{{.DataDir}}/rqlite"
@ -13,11 +13,16 @@ database:
max_database_size: 1073741824
backup_interval: "24h"
rqlite_port: {{.RQLiteHTTPPort}}
rqlite_raft_port: {{.RQLiteRaftPort}}
rqlite_raft_port: {{.RQLiteRaftInternalPort}}
rqlite_join_address: "{{.RQLiteJoinAddress}}"
cluster_sync_interval: "30s"
{{if .NodeCert}}# Node-to-node TLS encryption for Raft communication (direct RQLite TLS on port 7002)
node_cert: "{{.NodeCert}}"
node_key: "{{.NodeKey}}"
{{if .NodeCACert}}node_ca_cert: "{{.NodeCACert}}"
{{end}}{{if .NodeNoVerify}}node_no_verify: true
{{end}}{{end}}cluster_sync_interval: "30s"
peer_inactivity_limit: "24h"
min_cluster_size: 3
min_cluster_size: 1
ipfs:
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
api_url: "http://localhost:{{.IPFSAPIPort}}"
@ -42,3 +47,42 @@ logging:
level: "info"
format: "console"
http_gateway:
enabled: true
listen_addr: "{{if .EnableHTTPS}}:{{.HTTPSPort}}{{else}}:{{.UnifiedGatewayPort}}{{end}}"
node_name: "{{.NodeID}}"
{{if .EnableHTTPS}}https:
enabled: true
domain: "{{.Domain}}"
auto_cert: true
cache_dir: "{{.TLSCacheDir}}"
http_port: {{.HTTPPort}}
https_port: {{.HTTPSPort}}
email: "admin@{{.Domain}}"
{{end}}
{{if .EnableHTTPS}}sni:
enabled: true
listen_addr: ":{{.RQLiteRaftPort}}"
cert_file: "{{.TLSCacheDir}}/{{.Domain}}.crt"
key_file: "{{.TLSCacheDir}}/{{.Domain}}.key"
routes:
# Note: Raft traffic bypasses SNI gateway - RQLite uses native TLS on port 7002
ipfs.{{.Domain}}: "localhost:4101"
ipfs-cluster.{{.Domain}}: "localhost:9098"
olric.{{.Domain}}: "localhost:3322"
{{end}}
# Full gateway configuration (for API, auth, pubsub, and internal service routing)
client_namespace: "default"
rqlite_dsn: "http://localhost:{{.RQLiteHTTPPort}}"
olric_servers:
- "127.0.0.1:3320"
olric_timeout: "10s"
ipfs_cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
ipfs_api_url: "http://localhost:{{.IPFSAPIPort}}"
ipfs_timeout: "60s"
# Routes for internal service reverse proxy (kept for backwards compatibility but not used by full gateway)
routes: {}

View File

@ -1,8 +1,8 @@
server:
bindAddr: "{{.BindAddr}}"
bindAddr: "{{.ServerBindAddr}}"
bindPort: { { .HTTPPort } }
memberlist:
environment: local
bindAddr: "{{.BindAddr}}"
environment: { { .MemberlistEnvironment } }
bindAddr: "{{.MemberlistBindAddr}}"
bindPort: { { .MemberlistPort } }

View File

@ -11,34 +11,33 @@ import (
//go:embed *.yaml *.service
var templatesFS embed.FS
// BootstrapConfigData holds parameters for bootstrap.yaml rendering
type BootstrapConfigData struct {
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int
ClusterAPIPort int
IPFSAPIPort int // Default: 4501
BootstrapPeers []string // List of bootstrap peer multiaddrs
RQLiteJoinAddress string // Optional: join address for secondary bootstraps
HTTPAdvAddress string // Advertised HTTP address (IP:port)
RaftAdvAddress string // Advertised Raft address (IP:port)
}
// NodeConfigData holds parameters for node.yaml rendering
// NodeConfigData holds parameters for node.yaml rendering (unified - no bootstrap/node distinction)
type NodeConfigData struct {
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int
RQLiteJoinAddress string
BootstrapPeers []string
ClusterAPIPort int
IPFSAPIPort int // Default: 4501+
HTTPAdvAddress string // Advertised HTTP address (IP:port)
RaftAdvAddress string // Advertised Raft address (IP:port)
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int // External Raft port for advertisement (7001 for SNI)
RQLiteRaftInternalPort int // Internal Raft port for local binding (7002 when SNI enabled)
RQLiteJoinAddress string // Optional: join address for joining existing cluster
BootstrapPeers []string // List of peer multiaddrs to connect to
ClusterAPIPort int
IPFSAPIPort int // Default: 4501
HTTPAdvAddress string // Advertised HTTP address (IP:port)
RaftAdvAddress string // Advertised Raft address (IP:port or domain:port for SNI)
UnifiedGatewayPort int // Unified gateway port for all node services
Domain string // Domain for this node (e.g., node-123.orama.network)
EnableHTTPS bool // Enable HTTPS/TLS with ACME
TLSCacheDir string // Directory for ACME certificate cache
HTTPPort int // HTTP port for ACME challenges (usually 80)
HTTPSPort int // HTTPS port (usually 443)
// Node-to-node TLS encryption for RQLite Raft communication
// Required when using SNI gateway for Raft traffic routing
NodeCert string // Path to X.509 certificate for node-to-node communication
NodeKey string // Path to X.509 private key for node-to-node communication
NodeCACert string // Path to CA certificate (optional)
NodeNoVerify bool // Skip certificate verification (for self-signed certs)
}
// GatewayConfigData holds parameters for gateway.yaml rendering
@ -56,63 +55,46 @@ type GatewayConfigData struct {
// OlricConfigData holds parameters for olric.yaml rendering
type OlricConfigData struct {
BindAddr string
HTTPPort int
MemberlistPort int
ServerBindAddr string // HTTP API bind address (127.0.0.1 for security)
HTTPPort int
MemberlistBindAddr string // Memberlist bind address (0.0.0.0 for clustering)
MemberlistPort int
MemberlistEnvironment string // "local", "lan", or "wan"
}
// SystemdIPFSData holds parameters for systemd IPFS service rendering
type SystemdIPFSData struct {
NodeType string
HomeDir string
IPFSRepoPath string
SecretsDir string
DebrosDir string
OramaDir string
}
// SystemdIPFSClusterData holds parameters for systemd IPFS Cluster service rendering
type SystemdIPFSClusterData struct {
NodeType string
HomeDir string
ClusterPath string
DebrosDir string
}
// SystemdRQLiteData holds parameters for systemd RQLite service rendering
type SystemdRQLiteData struct {
NodeType string
HomeDir string
HTTPPort int
RaftPort int
DataDir string
JoinAddr string
DebrosDir string
OramaDir string
}
// SystemdOlricData holds parameters for systemd Olric service rendering
type SystemdOlricData struct {
HomeDir string
ConfigPath string
DebrosDir string
OramaDir string
}
// SystemdNodeData holds parameters for systemd Node service rendering
type SystemdNodeData struct {
NodeType string
HomeDir string
ConfigFile string
DebrosDir string
OramaDir string
}
// SystemdGatewayData holds parameters for systemd Gateway service rendering
type SystemdGatewayData struct {
HomeDir string
DebrosDir string
}
// RenderBootstrapConfig renders the bootstrap config template with the given data
func RenderBootstrapConfig(data BootstrapConfigData) (string, error) {
return renderTemplate("bootstrap.yaml", data)
OramaDir string
}
// RenderNodeConfig renders the node config template with the given data
@ -140,11 +122,6 @@ func RenderIPFSClusterService(data SystemdIPFSClusterData) (string, error) {
return renderTemplate("systemd_ipfs_cluster.service", data)
}
// RenderRQLiteService renders the RQLite systemd service template
func RenderRQLiteService(data SystemdRQLiteData) (string, error) {
return renderTemplate("systemd_rqlite.service", data)
}
// RenderOlricService renders the Olric systemd service template
func RenderOlricService(data SystemdOlricData) (string, error) {
return renderTemplate("systemd_olric.service", data)

View File

@ -5,46 +5,12 @@ import (
"testing"
)
func TestRenderBootstrapConfig(t *testing.T) {
data := BootstrapConfigData{
NodeID: "bootstrap",
P2PPort: 4001,
DataDir: "/home/debros/.debros/bootstrap",
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
IPFSAPIPort: 5001,
}
result, err := RenderBootstrapConfig(data)
if err != nil {
t.Fatalf("RenderBootstrapConfig failed: %v", err)
}
// Check for required fields
checks := []string{
"id: \"bootstrap\"",
"type: \"bootstrap\"",
"tcp/4001",
"rqlite_port: 5001",
"rqlite_raft_port: 7001",
"cluster_api_url: \"http://localhost:9094\"",
"api_url: \"http://localhost:5001\"",
}
for _, check := range checks {
if !strings.Contains(result, check) {
t.Errorf("Bootstrap config missing: %s", check)
}
}
}
func TestRenderNodeConfig(t *testing.T) {
bootstrapMultiaddr := "/ip4/127.0.0.1/tcp/4001/p2p/Qm1234567890"
data := NodeConfigData{
NodeID: "node2",
P2PPort: 4002,
DataDir: "/home/debros/.debros/node2",
DataDir: "/home/debros/.orama/node2",
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
RQLiteJoinAddress: "localhost:5001",
@ -61,10 +27,8 @@ func TestRenderNodeConfig(t *testing.T) {
// Check for required fields
checks := []string{
"id: \"node2\"",
"type: \"node\"",
"tcp/4002",
"rqlite_port: 5002",
"rqlite_raft_port: 7002",
"rqlite_join_address: \"localhost:5001\"",
bootstrapMultiaddr,
"cluster_api_url: \"http://localhost:9104\"",
@ -110,9 +74,11 @@ func TestRenderGatewayConfig(t *testing.T) {
func TestRenderOlricConfig(t *testing.T) {
data := OlricConfigData{
BindAddr: "127.0.0.1",
HTTPPort: 3320,
MemberlistPort: 3322,
ServerBindAddr: "127.0.0.1",
HTTPPort: 3320,
MemberlistBindAddr: "0.0.0.0",
MemberlistPort: 3322,
MemberlistEnvironment: "lan",
}
result, err := RenderOlricConfig(data)
@ -126,6 +92,7 @@ func TestRenderOlricConfig(t *testing.T) {
"bindPort: 3320",
"memberlist",
"bindPort: 3322",
"environment: lan",
}
for _, check := range checks {

View File

@ -1,7 +1,7 @@
[Unit]
Description=DeBros Gateway
After=debros-node-node.service
Wants=debros-node-node.service
After=debros-node.service
Wants=debros-node.service
[Service]
Type=simple
@ -9,7 +9,7 @@ User=debros
Group=debros
WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}}
ExecStart={{.HomeDir}}/bin/gateway --config {{.DebrosDir}}/data/gateway.yaml
ExecStart={{.HomeDir}}/bin/gateway --config {{.OramaDir}}/data/gateway.yaml
Restart=always
RestartSec=5
StandardOutput=journal
@ -22,7 +22,7 @@ CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -20,7 +20,7 @@ SyslogIdentifier=ipfs-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -21,7 +21,7 @@ SyslogIdentifier=ipfs-cluster-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -10,7 +10,7 @@ User=debros
Group=debros
WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}}
ExecStart={{.HomeDir}}/bin/node --config {{.DebrosDir}}/configs/{{.ConfigFile}}
ExecStart={{.HomeDir}}/bin/orama-node --config {{.OramaDir}}/configs/{{.ConfigFile}}
Restart=always
RestartSec=5
StandardOutput=journal
@ -20,7 +20,7 @@ SyslogIdentifier=debros-node-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -19,7 +19,7 @@ SyslogIdentifier=olric
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -1,25 +0,0 @@
[Unit]
Description=RQLite Database ({{.NodeType}})
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=debros
Group=debros
Environment=HOME={{.HomeDir}}
ExecStart=/usr/local/bin/rqlited -http-addr 0.0.0.0:{{.HTTPPort}} -http-adv-addr 127.0.0.1:{{.HTTPPort}} -raft-adv-addr 127.0.0.1:{{.RaftPort}} -raft-addr 0.0.0.0:{{.RaftPort}}{{if .JoinAddr}} -join {{.JoinAddr}} -join-attempts 30 -join-interval 10s{{end}} {{.DataDir}}
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=rqlite-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
[Install]
WantedBy=multi-user.target

View File

@ -37,6 +37,7 @@ type Config struct {
ListenAddr string
ClientNamespace string
BootstrapPeers []string
NodePeerID string // The node's actual peer ID from its identity file
// Optional DSN for rqlite database/sql driver, e.g. "http://localhost:4001"
// If empty, defaults to "http://localhost:4001".
@ -45,7 +46,7 @@ type Config struct {
// HTTPS configuration
EnableHTTPS bool // Enable HTTPS with ACME (Let's Encrypt)
DomainName string // Domain name for HTTPS certificate
TLSCacheDir string // Directory to cache TLS certificates (default: ~/.debros/tls-cache)
TLSCacheDir string // Directory to cache TLS certificates (default: ~/.orama/tls-cache)
// Olric cache configuration
OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"]
@ -60,12 +61,13 @@ type Config struct {
}
type Gateway struct {
logger *logging.ColoredLogger
cfg *Config
client client.NetworkClient
startedAt time.Time
signingKey *rsa.PrivateKey
keyID string
logger *logging.ColoredLogger
cfg *Config
client client.NetworkClient
nodePeerID string // The node's actual peer ID from its identity file (overrides client's peer ID)
startedAt time.Time
signingKey *rsa.PrivateKey
keyID string
// rqlite SQL connection and HTTP ORM gateway
sqlDB *sql.DB
@ -115,7 +117,7 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
logger.ComponentInfo(logging.ComponentClient, "Network client connected",
zap.String("namespace", cliCfg.AppName),
zap.Int("bootstrap_peer_count", len(cliCfg.BootstrapPeers)),
zap.Int("peer_count", len(cliCfg.BootstrapPeers)),
)
logger.ComponentInfo(logging.ComponentGeneral, "Creating gateway instance...")
@ -123,6 +125,7 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
logger: logger,
cfg: cfg,
client: c,
nodePeerID: cfg.NodePeerID,
startedAt: time.Now(),
localSubscribers: make(map[string][]*localSubscriber),
}
@ -465,10 +468,10 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger
}
}
// Also check bootstrap peers from config
// Also check peers from config
if cfg := networkClient.Config(); cfg != nil {
for _, bootstrapAddr := range cfg.BootstrapPeers {
ma, err := multiaddr.NewMultiaddr(bootstrapAddr)
for _, peerAddr := range cfg.BootstrapPeers {
ma, err := multiaddr.NewMultiaddr(peerAddr)
if err != nil {
continue
}
@ -514,7 +517,7 @@ type ipfsDiscoveryResult struct {
}
// discoverIPFSFromNodeConfigs discovers IPFS configuration from node.yaml files
// Checks bootstrap.yaml first, then bootstrap2.yaml, node.yaml, node2.yaml, node3.yaml, node4.yaml
// Checks node-1.yaml through node-5.yaml for IPFS configuration
func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
homeDir, err := os.UserHomeDir()
if err != nil {
@ -522,10 +525,10 @@ func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
return ipfsDiscoveryResult{}
}
configDir := filepath.Join(homeDir, ".debros")
configDir := filepath.Join(homeDir, ".orama")
// Try bootstrap.yaml first, then bootstrap2.yaml, node.yaml, node2.yaml, node3.yaml, node4.yaml
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"}
// Try all node config files for IPFS settings
configFiles := []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"}
for _, filename := range configFiles {
configPath := filepath.Join(configDir, filename)

257
pkg/gateway/http_gateway.go Normal file
View File

@ -0,0 +1,257 @@
package gateway
import (
"context"
"fmt"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"sync"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"go.uber.org/zap"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/logging"
)
// HTTPGateway is the main reverse proxy router
type HTTPGateway struct {
logger *logging.ColoredLogger
config *config.HTTPGatewayConfig
router chi.Router
reverseProxies map[string]*httputil.ReverseProxy
mu sync.RWMutex
server *http.Server
}
// NewHTTPGateway creates a new HTTP reverse proxy gateway
func NewHTTPGateway(logger *logging.ColoredLogger, cfg *config.HTTPGatewayConfig) (*HTTPGateway, error) {
if !cfg.Enabled {
return nil, nil
}
if logger == nil {
var err error
logger, err = logging.NewColoredLogger(logging.ComponentGeneral, true)
if err != nil {
return nil, fmt.Errorf("failed to create logger: %w", err)
}
}
gateway := &HTTPGateway{
logger: logger,
config: cfg,
router: chi.NewRouter(),
reverseProxies: make(map[string]*httputil.ReverseProxy),
}
// Set up router middleware
gateway.router.Use(middleware.RequestID)
gateway.router.Use(middleware.Logger)
gateway.router.Use(middleware.Recoverer)
gateway.router.Use(middleware.Timeout(30 * time.Second))
// Add health check endpoint
gateway.router.Get("/health", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{"status":"ok","node":"%s"}`, cfg.NodeName)
})
// Initialize reverse proxies and routes
if err := gateway.initializeRoutes(); err != nil {
return nil, fmt.Errorf("failed to initialize routes: %w", err)
}
gateway.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway initialized",
zap.String("node_name", cfg.NodeName),
zap.String("listen_addr", cfg.ListenAddr),
zap.Int("routes", len(cfg.Routes)),
)
return gateway, nil
}
// initializeRoutes sets up all reverse proxy routes
func (hg *HTTPGateway) initializeRoutes() error {
hg.mu.Lock()
defer hg.mu.Unlock()
for routeName, routeConfig := range hg.config.Routes {
// Validate backend URL
_, err := url.Parse(routeConfig.BackendURL)
if err != nil {
return fmt.Errorf("invalid backend URL for route %s: %w", routeName, err)
}
// Create reverse proxy with custom transport
proxy := &httputil.ReverseProxy{
Rewrite: func(r *httputil.ProxyRequest) {
// Keep original host for Host header
r.Out.Host = r.In.Host
// Set X-Forwarded-For header for logging
r.Out.Header.Set("X-Forwarded-For", getClientIP(r.In))
},
ErrorHandler: hg.proxyErrorHandler(routeName),
}
// Set timeout on transport
if routeConfig.Timeout > 0 {
proxy.Transport = &http.Transport{
Dial: (&net.Dialer{
Timeout: routeConfig.Timeout,
}).Dial,
ResponseHeaderTimeout: routeConfig.Timeout,
}
}
hg.reverseProxies[routeName] = proxy
// Register route handler
hg.registerRouteHandler(routeName, routeConfig, proxy)
hg.logger.ComponentInfo(logging.ComponentGeneral, "Route initialized",
zap.String("name", routeName),
zap.String("path", routeConfig.PathPrefix),
zap.String("backend", routeConfig.BackendURL),
)
}
return nil
}
// registerRouteHandler registers a route handler with the router
func (hg *HTTPGateway) registerRouteHandler(name string, routeConfig config.RouteConfig, proxy *httputil.ReverseProxy) {
pathPrefix := strings.TrimSuffix(routeConfig.PathPrefix, "/")
// Use Mount instead of Route for wildcard path handling
hg.router.Mount(pathPrefix, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
hg.handleProxyRequest(w, req, routeConfig, proxy)
}))
}
// handleProxyRequest handles a reverse proxy request
func (hg *HTTPGateway) handleProxyRequest(w http.ResponseWriter, req *http.Request, routeConfig config.RouteConfig, proxy *httputil.ReverseProxy) {
// Strip path prefix before forwarding
originalPath := req.URL.Path
pathPrefix := strings.TrimSuffix(routeConfig.PathPrefix, "/")
if strings.HasPrefix(req.URL.Path, pathPrefix) {
// Remove the prefix but keep leading slash
strippedPath := strings.TrimPrefix(req.URL.Path, pathPrefix)
if strippedPath == "" {
strippedPath = "/"
}
req.URL.Path = strippedPath
}
// Update request URL to point to backend
backendURL, _ := url.Parse(routeConfig.BackendURL)
req.URL.Scheme = backendURL.Scheme
req.URL.Host = backendURL.Host
// Log the proxy request
hg.logger.ComponentInfo(logging.ComponentGeneral, "Proxy request",
zap.String("original_path", originalPath),
zap.String("stripped_path", req.URL.Path),
zap.String("backend", routeConfig.BackendURL),
zap.String("method", req.Method),
zap.String("client_ip", getClientIP(req)),
)
// Handle WebSocket upgrades if configured
if routeConfig.WebSocket && isWebSocketRequest(req) {
hg.logger.ComponentInfo(logging.ComponentGeneral, "WebSocket upgrade detected",
zap.String("path", originalPath),
)
}
// Forward the request
proxy.ServeHTTP(w, req)
}
// proxyErrorHandler returns an error handler for the reverse proxy
func (hg *HTTPGateway) proxyErrorHandler(routeName string) func(http.ResponseWriter, *http.Request, error) {
return func(w http.ResponseWriter, r *http.Request, err error) {
hg.logger.ComponentError(logging.ComponentGeneral, "Proxy error",
zap.String("route", routeName),
zap.String("path", r.URL.Path),
zap.String("method", r.Method),
zap.Error(err),
)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadGateway)
fmt.Fprintf(w, `{"error":"gateway error","route":"%s","detail":"%s"}`, routeName, err.Error())
}
}
// Start starts the HTTP gateway server
func (hg *HTTPGateway) Start(ctx context.Context) error {
if hg == nil || !hg.config.Enabled {
return nil
}
hg.server = &http.Server{
Addr: hg.config.ListenAddr,
Handler: hg.router,
}
// Listen for connections
listener, err := net.Listen("tcp", hg.config.ListenAddr)
if err != nil {
return fmt.Errorf("failed to listen on %s: %w", hg.config.ListenAddr, err)
}
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway server starting",
zap.String("node_name", hg.config.NodeName),
zap.String("listen_addr", hg.config.ListenAddr),
)
// Serve in a goroutine
go func() {
if err := hg.server.Serve(listener); err != nil && err != http.ErrServerClosed {
hg.logger.ComponentError(logging.ComponentGeneral, "HTTP Gateway server error", zap.Error(err))
}
}()
// Wait for context cancellation
<-ctx.Done()
return hg.Stop()
}
// Stop gracefully stops the HTTP gateway server
func (hg *HTTPGateway) Stop() error {
if hg == nil || hg.server == nil {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway shutting down")
if err := hg.server.Shutdown(ctx); err != nil {
hg.logger.ComponentError(logging.ComponentGeneral, "HTTP Gateway shutdown error", zap.Error(err))
return err
}
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway shutdown complete")
return nil
}
// Router returns the chi router for testing or extension
func (hg *HTTPGateway) Router() chi.Router {
return hg.router
}
// isWebSocketRequest checks if a request is a WebSocket upgrade request
func isWebSocketRequest(r *http.Request) bool {
return r.Header.Get("Connection") == "Upgrade" &&
r.Header.Get("Upgrade") == "websocket"
}

View File

@ -1,11 +1,11 @@
package gateway
import (
"bufio"
"encoding/json"
"fmt"
"net"
"net/http"
"bufio"
"encoding/json"
"fmt"
"net"
"net/http"
)
type statusResponseWriter struct {
@ -28,23 +28,23 @@ func (w *statusResponseWriter) Write(b []byte) (int, error) {
// Ensure websocket upgrades work by preserving Hijacker/Flusher/Pusher
// interfaces when the underlying ResponseWriter supports them.
func (w *statusResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if h, ok := w.ResponseWriter.(http.Hijacker); ok {
return h.Hijack()
}
return nil, nil, fmt.Errorf("hijacker not supported")
if h, ok := w.ResponseWriter.(http.Hijacker); ok {
return h.Hijack()
}
return nil, nil, fmt.Errorf("hijacker not supported")
}
func (w *statusResponseWriter) Flush() {
if f, ok := w.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
if f, ok := w.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}
func (w *statusResponseWriter) Push(target string, opts *http.PushOptions) error {
if p, ok := w.ResponseWriter.(http.Pusher); ok {
return p.Push(target, opts)
}
return http.ErrNotSupported
if p, ok := w.ResponseWriter.(http.Pusher); ok {
return p.Push(target, opts)
}
return http.ErrNotSupported
}
// writeJSON writes JSON with status code

237
pkg/gateway/https.go Normal file
View File

@ -0,0 +1,237 @@
package gateway
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"strings"
"time"
"go.uber.org/zap"
"golang.org/x/crypto/acme"
"golang.org/x/crypto/acme/autocert"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/logging"
)
// HTTPSGateway extends HTTPGateway with HTTPS/TLS support
type HTTPSGateway struct {
*HTTPGateway
httpsConfig *config.HTTPSConfig
certManager *autocert.Manager
httpsServer *http.Server
httpServer *http.Server // For ACME challenge and redirect
}
// NewHTTPSGateway creates a new HTTPS gateway with Let's Encrypt autocert
func NewHTTPSGateway(logger *logging.ColoredLogger, cfg *config.HTTPGatewayConfig) (*HTTPSGateway, error) {
// First create the base HTTP gateway
base, err := NewHTTPGateway(logger, cfg)
if err != nil {
return nil, err
}
if base == nil {
return nil, nil
}
if !cfg.HTTPS.Enabled {
// Return base gateway wrapped in HTTPSGateway for consistent interface
return &HTTPSGateway{HTTPGateway: base}, nil
}
gateway := &HTTPSGateway{
HTTPGateway: base,
httpsConfig: &cfg.HTTPS,
}
// Check if using self-signed certificates or Let's Encrypt
if cfg.HTTPS.UseSelfSigned || (cfg.HTTPS.CertFile != "" && cfg.HTTPS.KeyFile != "") {
// Using self-signed or pre-existing certificates
logger.ComponentInfo(logging.ComponentGeneral, "Using self-signed or pre-configured certificates for HTTPS",
zap.String("domain", cfg.HTTPS.Domain),
zap.String("cert_file", cfg.HTTPS.CertFile),
zap.String("key_file", cfg.HTTPS.KeyFile),
)
// Don't set certManager - will use CertFile/KeyFile from config
} else if cfg.HTTPS.AutoCert {
// Use Let's Encrypt STAGING (consistent with SNI gateway)
cacheDir := cfg.HTTPS.CacheDir
if cacheDir == "" {
cacheDir = "/home/debros/.orama/tls-cache"
}
// Use Let's Encrypt STAGING - provides higher rate limits for testing/development
directoryURL := "https://acme-staging-v02.api.letsencrypt.org/directory"
logger.ComponentWarn(logging.ComponentGeneral,
"Using Let's Encrypt STAGING - certificates will not be trusted by production clients",
zap.String("domain", cfg.HTTPS.Domain),
)
gateway.certManager = &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(cfg.HTTPS.Domain),
Cache: autocert.DirCache(cacheDir),
Email: cfg.HTTPS.Email,
Client: &acme.Client{
DirectoryURL: directoryURL,
},
}
logger.ComponentInfo(logging.ComponentGeneral, "Let's Encrypt autocert configured",
zap.String("domain", cfg.HTTPS.Domain),
zap.String("cache_dir", cacheDir),
zap.String("acme_environment", "staging"),
)
}
return gateway, nil
}
// Start starts both HTTP (for ACME) and HTTPS servers
func (g *HTTPSGateway) Start(ctx context.Context) error {
if g == nil {
return nil
}
// If HTTPS is not enabled, just start the base HTTP gateway
if !g.httpsConfig.Enabled {
return g.HTTPGateway.Start(ctx)
}
httpPort := g.httpsConfig.HTTPPort
if httpPort == 0 {
httpPort = 80
}
httpsPort := g.httpsConfig.HTTPSPort
if httpsPort == 0 {
httpsPort = 443
}
// Start HTTP server for ACME challenge and redirect
g.httpServer = &http.Server{
Addr: fmt.Sprintf(":%d", httpPort),
Handler: g.httpHandler(),
}
go func() {
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTP server starting (ACME/redirect)",
zap.Int("port", httpPort),
)
if err := g.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
g.logger.ComponentError(logging.ComponentGeneral, "HTTP server error", zap.Error(err))
}
}()
// Set up TLS config
tlsConfig := &tls.Config{
MinVersion: tls.VersionTLS12,
}
if g.certManager != nil {
tlsConfig.GetCertificate = g.certManager.GetCertificate
} else if g.httpsConfig.CertFile != "" && g.httpsConfig.KeyFile != "" {
cert, err := tls.LoadX509KeyPair(g.httpsConfig.CertFile, g.httpsConfig.KeyFile)
if err != nil {
return fmt.Errorf("failed to load TLS certificate: %w", err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
} else {
return fmt.Errorf("HTTPS enabled but no certificate source configured")
}
// Start HTTPS server
g.httpsServer = &http.Server{
Addr: fmt.Sprintf(":%d", httpsPort),
Handler: g.router,
TLSConfig: tlsConfig,
}
listener, err := tls.Listen("tcp", g.httpsServer.Addr, tlsConfig)
if err != nil {
return fmt.Errorf("failed to create TLS listener: %w", err)
}
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway starting",
zap.String("domain", g.httpsConfig.Domain),
zap.Int("port", httpsPort),
)
go func() {
if err := g.httpsServer.Serve(listener); err != nil && err != http.ErrServerClosed {
g.logger.ComponentError(logging.ComponentGeneral, "HTTPS server error", zap.Error(err))
}
}()
// Wait for context cancellation
<-ctx.Done()
return g.Stop()
}
// httpHandler returns a handler for the HTTP server (ACME challenge + redirect)
func (g *HTTPSGateway) httpHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Handle ACME challenge
if g.certManager != nil && strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") {
g.certManager.HTTPHandler(nil).ServeHTTP(w, r)
return
}
// Redirect HTTP to HTTPS
httpsPort := g.httpsConfig.HTTPSPort
if httpsPort == 0 {
httpsPort = 443
}
target := "https://" + r.Host + r.URL.RequestURI()
if httpsPort != 443 {
host := r.Host
if idx := strings.LastIndex(host, ":"); idx > 0 {
host = host[:idx]
}
target = fmt.Sprintf("https://%s:%d%s", host, httpsPort, r.URL.RequestURI())
}
http.Redirect(w, r, target, http.StatusMovedPermanently)
})
}
// Stop gracefully stops both HTTP and HTTPS servers
func (g *HTTPSGateway) Stop() error {
if g == nil {
return nil
}
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway shutting down")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
var errs []error
if g.httpServer != nil {
if err := g.httpServer.Shutdown(ctx); err != nil {
errs = append(errs, fmt.Errorf("HTTP server shutdown: %w", err))
}
}
if g.httpsServer != nil {
if err := g.httpsServer.Shutdown(ctx); err != nil {
errs = append(errs, fmt.Errorf("HTTPS server shutdown: %w", err))
}
}
if g.HTTPGateway.server != nil {
if err := g.HTTPGateway.Stop(); err != nil {
errs = append(errs, fmt.Errorf("base gateway shutdown: %w", err))
}
}
if len(errs) > 0 {
return fmt.Errorf("shutdown errors: %v", errs)
}
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway shutdown complete")
return nil
}

View File

@ -178,8 +178,13 @@ func extractAPIKey(r *http.Request) string {
// isPublicPath returns true for routes that should be accessible without API key auth
func isPublicPath(p string) bool {
// Allow ACME challenges for Let's Encrypt certificate provisioning
if strings.HasPrefix(p, "/.well-known/acme-challenge/") {
return true
}
switch p {
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key", "/v1/auth/simple-key":
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key", "/v1/auth/simple-key", "/v1/network/status", "/v1/network/peers":
return true
default:
return false

View File

@ -60,24 +60,24 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
// Channel to deliver PubSub messages to WS writer
msgs := make(chan []byte, 128)
// NEW: Register as local subscriber for direct message delivery
localSub := &localSubscriber{
msgChan: msgs,
namespace: ns,
}
topicKey := fmt.Sprintf("%s.%s", ns, topic)
g.mu.Lock()
g.localSubscribers[topicKey] = append(g.localSubscribers[topicKey], localSub)
subscriberCount := len(g.localSubscribers[topicKey])
g.mu.Unlock()
g.logger.ComponentInfo("gateway", "pubsub ws: registered local subscriber",
zap.String("topic", topic),
zap.String("namespace", ns),
zap.Int("total_subscribers", subscriberCount))
// Unregister on close
defer func() {
g.mu.Lock()
@ -97,12 +97,12 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
zap.String("topic", topic),
zap.Int("remaining_subscribers", remainingCount))
}()
// Use internal auth context when interacting with client to avoid circular auth requirements
ctx := client.WithInternalAuth(r.Context())
// Apply namespace isolation
ctx = pubsub.WithNamespace(ctx, ns)
// Writer loop - START THIS FIRST before libp2p subscription
done := make(chan struct{})
go func() {
@ -122,11 +122,11 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
close(done)
return
}
g.logger.ComponentInfo("gateway", "pubsub ws: sending message to client",
zap.String("topic", topic),
zap.Int("data_len", len(b)))
// Format message as JSON envelope with data (base64 encoded), timestamp, and topic
// This matches the SDK's Message interface: {data: string, timestamp: number, topic: string}
envelope := map[string]interface{}{
@ -141,11 +141,11 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
zap.Error(err))
continue
}
g.logger.ComponentDebug("gateway", "pubsub ws: envelope created",
zap.String("topic", topic),
zap.Int("envelope_len", len(envelopeJSON)))
conn.SetWriteDeadline(time.Now().Add(30 * time.Second))
if err := conn.WriteMessage(websocket.TextMessage, envelopeJSON); err != nil {
g.logger.ComponentWarn("gateway", "pubsub ws: failed to write to websocket",
@ -154,7 +154,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
close(done)
return
}
g.logger.ComponentInfo("gateway", "pubsub ws: message sent successfully",
zap.String("topic", topic))
case <-ticker.C:
@ -173,7 +173,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
g.logger.ComponentInfo("gateway", "pubsub ws: received message from libp2p",
zap.String("topic", topic),
zap.Int("data_len", len(data)))
select {
case msgs <- data:
g.logger.ComponentInfo("gateway", "pubsub ws: forwarded to client",
@ -195,7 +195,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
}
g.logger.ComponentInfo("gateway", "pubsub ws: libp2p subscription established",
zap.String("topic", topic))
// Keep subscription alive until done
<-done
_ = g.client.PubSub().Unsubscribe(ctx, topic)
@ -212,7 +212,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
if mt != websocket.TextMessage && mt != websocket.BinaryMessage {
continue
}
// Filter out WebSocket heartbeat messages
// Don't publish them to the topic
var msg map[string]interface{}
@ -222,7 +222,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
continue
}
}
if err := g.client.PubSub().Publish(ctx, topic, data); err != nil {
// Best-effort notify client
_ = conn.WriteMessage(websocket.TextMessage, []byte("publish_error"))
@ -259,12 +259,12 @@ func (g *Gateway) pubsubPublishHandler(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusBadRequest, "invalid base64 data")
return
}
// NEW: Check for local websocket subscribers FIRST and deliver directly
g.mu.RLock()
localSubs := g.getLocalSubscribers(body.Topic, ns)
g.mu.RUnlock()
localDeliveryCount := 0
if len(localSubs) > 0 {
for _, sub := range localSubs {
@ -280,20 +280,20 @@ func (g *Gateway) pubsubPublishHandler(w http.ResponseWriter, r *http.Request) {
}
}
}
g.logger.ComponentInfo("gateway", "pubsub publish: processing message",
zap.String("topic", body.Topic),
zap.String("namespace", ns),
zap.Int("data_len", len(data)),
zap.Int("local_subscribers", len(localSubs)),
zap.Int("local_delivered", localDeliveryCount))
// Publish to libp2p asynchronously for cross-node delivery
// This prevents blocking the HTTP response if libp2p network is slow
go func() {
publishCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
ctx := pubsub.WithNamespace(client.WithInternalAuth(publishCtx), ns)
if err := g.client.PubSub().Publish(ctx, body.Topic, data); err != nil {
g.logger.ComponentWarn("gateway", "async libp2p publish failed",
@ -304,7 +304,7 @@ func (g *Gateway) pubsubPublishHandler(w http.ResponseWriter, r *http.Request) {
zap.String("topic", body.Topic))
}
}()
// Return immediately after local delivery
// Local WebSocket subscribers already received the message
writeJSON(w, http.StatusOK, map[string]any{"status": "ok"})

View File

@ -181,4 +181,3 @@ func (pns *PushNotificationService) sendExpoRequest(ctx context.Context, message
return nil
}

View File

@ -386,6 +386,11 @@ func (g *Gateway) networkStatusHandler(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusInternalServerError, err.Error())
return
}
// Override with the node's actual peer ID if available
// (the client's embedded host has a different temporary peer ID)
if g.nodePeerID != "" {
status.PeerID = g.nodePeerID
}
writeJSON(w, http.StatusOK, status)
}

View File

@ -0,0 +1,211 @@
package gateway
import (
"context"
"crypto/tls"
"fmt"
"io"
"net"
"strings"
"sync"
"time"
"go.uber.org/zap"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/logging"
)
// TCPSNIGateway handles SNI-based TCP routing for services like RQLite Raft, IPFS, etc.
type TCPSNIGateway struct {
logger *logging.ColoredLogger
config *config.SNIConfig
listener net.Listener
routes map[string]string
mu sync.RWMutex
running bool
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
tlsConfig *tls.Config
}
// NewTCPSNIGateway creates a new TCP SNI-based gateway
func NewTCPSNIGateway(logger *logging.ColoredLogger, cfg *config.SNIConfig) (*TCPSNIGateway, error) {
if !cfg.Enabled {
return nil, nil
}
if logger == nil {
var err error
logger, err = logging.NewColoredLogger(logging.ComponentGeneral, true)
if err != nil {
return nil, fmt.Errorf("failed to create logger: %w", err)
}
}
cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile)
if err != nil {
return nil, fmt.Errorf("failed to load TLS certificate: %w", err)
}
ctx, cancel := context.WithCancel(context.Background())
gateway := &TCPSNIGateway{
logger: logger,
config: cfg,
routes: make(map[string]string),
ctx: ctx,
cancel: cancel,
tlsConfig: &tls.Config{
Certificates: []tls.Certificate{cert},
},
}
for hostname, backend := range cfg.Routes {
gateway.routes[strings.ToLower(hostname)] = backend
}
logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway initialized",
zap.String("listen_addr", cfg.ListenAddr),
zap.Int("routes", len(cfg.Routes)),
)
return gateway, nil
}
// Start starts the TCP SNI gateway server
func (g *TCPSNIGateway) Start(ctx context.Context) error {
if g == nil || !g.config.Enabled {
return nil
}
listener, err := tls.Listen("tcp", g.config.ListenAddr, g.tlsConfig)
if err != nil {
return fmt.Errorf("failed to listen on %s: %w", g.config.ListenAddr, err)
}
g.listener = listener
g.running = true
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway starting",
zap.String("listen_addr", g.config.ListenAddr),
)
g.wg.Add(1)
go func() {
defer g.wg.Done()
for {
conn, err := listener.Accept()
if err != nil {
select {
case <-g.ctx.Done():
return
default:
g.logger.ComponentError(logging.ComponentGeneral, "Accept error", zap.Error(err))
continue
}
}
g.wg.Add(1)
go func(c net.Conn) {
defer g.wg.Done()
g.handleConnection(c)
}(conn)
}
}()
select {
case <-ctx.Done():
case <-g.ctx.Done():
}
return g.Stop()
}
// handleConnection routes a TCP connection based on SNI
func (g *TCPSNIGateway) handleConnection(conn net.Conn) {
defer conn.Close()
tlsConn, ok := conn.(*tls.Conn)
if !ok {
g.logger.ComponentError(logging.ComponentGeneral, "Expected TLS connection")
return
}
if err := tlsConn.Handshake(); err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "TLS handshake failed", zap.Error(err))
return
}
serverName := strings.ToLower(tlsConn.ConnectionState().ServerName)
if serverName == "" {
g.logger.ComponentError(logging.ComponentGeneral, "No SNI provided")
return
}
g.mu.RLock()
backend, found := g.routes[serverName]
if !found {
for prefix, be := range g.routes {
if strings.HasPrefix(serverName, prefix+".") {
backend = be
found = true
break
}
}
}
g.mu.RUnlock()
if !found {
g.logger.ComponentError(logging.ComponentGeneral, "No route for SNI",
zap.String("server_name", serverName),
)
return
}
g.logger.ComponentInfo(logging.ComponentGeneral, "Routing connection",
zap.String("server_name", serverName),
zap.String("backend", backend),
)
backendConn, err := net.DialTimeout("tcp", backend, 10*time.Second)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "Backend connect failed",
zap.String("backend", backend),
zap.Error(err),
)
return
}
defer backendConn.Close()
errc := make(chan error, 2)
go func() { _, err := io.Copy(backendConn, tlsConn); errc <- err }()
go func() { _, err := io.Copy(tlsConn, backendConn); errc <- err }()
<-errc
}
// Stop gracefully stops the TCP SNI gateway
func (g *TCPSNIGateway) Stop() error {
if g == nil || !g.running {
return nil
}
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway shutting down")
g.cancel()
if g.listener != nil {
g.listener.Close()
}
done := make(chan struct{})
go func() { g.wg.Wait(); close(done) }()
select {
case <-done:
case <-time.After(10 * time.Second):
g.logger.ComponentWarn(logging.ComponentGeneral, "Shutdown timeout")
}
g.running = false
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway shutdown complete")
return nil
}

956
pkg/installer/installer.go Normal file
View File

@ -0,0 +1,956 @@
// Package installer provides an interactive TUI installer for Orama Network
package installer
import (
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/charmbracelet/bubbles/textinput"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/DeBrosOfficial/network/pkg/certutil"
"github.com/DeBrosOfficial/network/pkg/tlsutil"
)
// InstallerConfig holds the configuration gathered from the TUI
type InstallerConfig struct {
VpsIP string
Domain string
PeerDomain string // Domain of existing node to join
PeerIP string // Resolved IP of peer domain (for Raft join)
JoinAddress string // Auto-populated: {PeerIP}:7002 (direct RQLite TLS)
Peers []string // Auto-populated: /dns4/{PeerDomain}/tcp/4001/p2p/{PeerID}
ClusterSecret string
SwarmKeyHex string // 64-hex IPFS swarm key (for joining private network)
IPFSPeerID string // IPFS peer ID (auto-discovered from peer domain)
IPFSSwarmAddrs []string // IPFS swarm addresses (auto-discovered from peer domain)
// IPFS Cluster peer info for cluster discovery
IPFSClusterPeerID string // IPFS Cluster peer ID (auto-discovered from peer domain)
IPFSClusterAddrs []string // IPFS Cluster addresses (auto-discovered from peer domain)
Branch string
IsFirstNode bool
NoPull bool
}
// Step represents a step in the installation wizard
type Step int
const (
StepWelcome Step = iota
StepNodeType
StepVpsIP
StepDomain
StepPeerDomain // Domain of existing node to join (replaces StepJoinAddress)
StepClusterSecret
StepSwarmKey // 64-hex swarm key for IPFS private network
StepBranch
StepNoPull
StepConfirm
StepInstalling
StepDone
)
// Model is the bubbletea model for the installer
type Model struct {
step Step
config InstallerConfig
textInput textinput.Model
err error
width int
height int
installing bool
installOutput []string
cursor int // For selection menus
discovering bool // Whether domain discovery is in progress
discoveryInfo string // Info message during discovery
discoveredPeer string // Discovered peer ID from domain
sniWarning string // Warning about missing SNI DNS records (non-blocking)
}
// Styles
var (
titleStyle = lipgloss.NewStyle().
Bold(true).
Foreground(lipgloss.Color("#00D4AA")).
MarginBottom(1)
subtitleStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#888888")).
MarginBottom(1)
focusedStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#00D4AA"))
blurredStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#666666"))
cursorStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#00D4AA"))
helpStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#626262")).
MarginTop(1)
errorStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#FF6B6B")).
Bold(true)
successStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#00D4AA")).
Bold(true)
boxStyle = lipgloss.NewStyle().
Border(lipgloss.RoundedBorder()).
BorderForeground(lipgloss.Color("#00D4AA")).
Padding(1, 2)
)
// NewModel creates a new installer model
func NewModel() Model {
ti := textinput.New()
ti.Focus()
ti.CharLimit = 256
ti.Width = 50
return Model{
step: StepWelcome,
textInput: ti,
config: InstallerConfig{
Branch: "main",
},
}
}
// Init initializes the model
func (m Model) Init() tea.Cmd {
return textinput.Blink
}
// Update handles messages
func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.WindowSizeMsg:
m.width = msg.Width
m.height = msg.Height
return m, nil
case installCompleteMsg:
m.step = StepDone
return m, nil
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "q":
if m.step != StepInstalling {
return m, tea.Quit
}
case "enter":
return m.handleEnter()
case "up", "k":
if m.step == StepNodeType || m.step == StepBranch || m.step == StepNoPull {
if m.cursor > 0 {
m.cursor--
}
}
case "down", "j":
if m.step == StepNodeType || m.step == StepBranch || m.step == StepNoPull {
if m.cursor < 1 {
m.cursor++
}
}
case "esc":
if m.step > StepWelcome && m.step < StepInstalling {
m.step--
m.err = nil
m.setupStepInput()
}
}
}
// Update text input for input steps
if m.step == StepVpsIP || m.step == StepDomain || m.step == StepPeerDomain || m.step == StepClusterSecret || m.step == StepSwarmKey {
var cmd tea.Cmd
m.textInput, cmd = m.textInput.Update(msg)
return m, cmd
}
return m, nil
}
func (m *Model) handleEnter() (tea.Model, tea.Cmd) {
switch m.step {
case StepWelcome:
m.step = StepNodeType
m.cursor = 0
case StepNodeType:
m.config.IsFirstNode = m.cursor == 0
m.step = StepVpsIP
m.setupStepInput()
case StepVpsIP:
ip := strings.TrimSpace(m.textInput.Value())
if err := validateIP(ip); err != nil {
m.err = err
return m, nil
}
m.config.VpsIP = ip
m.err = nil
m.step = StepDomain
m.setupStepInput()
case StepDomain:
domain := strings.TrimSpace(m.textInput.Value())
if err := validateDomain(domain); err != nil {
m.err = err
return m, nil
}
// Check SNI DNS records for this domain (non-blocking warning)
m.discovering = true
m.discoveryInfo = "Checking SNI DNS records for " + domain + "..."
if warning := validateSNIDNSRecords(domain); warning != "" {
// Log warning but continue - SNI DNS is optional for single-node setups
m.sniWarning = warning
}
m.discovering = false
m.config.Domain = domain
m.err = nil
// Auto-generate self-signed certificates for this domain
m.discovering = true
m.discoveryInfo = "Generating SSL certificates for " + domain + "..."
if err := ensureCertificatesForDomain(domain); err != nil {
m.discovering = false
m.err = fmt.Errorf("failed to generate certificates: %w", err)
return m, nil
}
m.discovering = false
if m.config.IsFirstNode {
m.step = StepBranch
m.cursor = 0
} else {
m.step = StepPeerDomain
m.setupStepInput()
}
case StepPeerDomain:
peerDomain := strings.TrimSpace(m.textInput.Value())
if err := validateDomain(peerDomain); err != nil {
m.err = err
return m, nil
}
// Check SNI DNS records for peer domain (non-blocking warning)
m.discovering = true
m.discoveryInfo = "Checking SNI DNS records for " + peerDomain + "..."
if warning := validateSNIDNSRecords(peerDomain); warning != "" {
// Log warning but continue - peer might have different DNS setup
m.sniWarning = warning
}
// Discover peer info from domain (try HTTPS first, then HTTP)
m.discovering = true
m.discoveryInfo = "Discovering peer from " + peerDomain + "..."
discovery, err := discoverPeerFromDomain(peerDomain)
m.discovering = false
if err != nil {
m.err = fmt.Errorf("failed to discover peer: %w", err)
return m, nil
}
// Store discovered info
m.config.PeerDomain = peerDomain
m.discoveredPeer = discovery.PeerID
// Resolve peer domain to IP for direct RQLite TLS connection
// RQLite uses native TLS on port 7002 (not SNI gateway on 7001)
peerIPs, err := net.LookupIP(peerDomain)
if err != nil || len(peerIPs) == 0 {
m.err = fmt.Errorf("failed to resolve peer domain %s to IP: %w", peerDomain, err)
return m, nil
}
// Prefer IPv4
var peerIP string
for _, ip := range peerIPs {
if ip.To4() != nil {
peerIP = ip.String()
break
}
}
if peerIP == "" {
peerIP = peerIPs[0].String()
}
m.config.PeerIP = peerIP
// Auto-populate join address (direct RQLite TLS on port 7002) and bootstrap peers
m.config.JoinAddress = fmt.Sprintf("%s:7002", peerIP)
m.config.Peers = []string{
fmt.Sprintf("/dns4/%s/tcp/4001/p2p/%s", peerDomain, discovery.PeerID),
}
// Store IPFS peer info for Peering.Peers configuration
if discovery.IPFSPeerID != "" {
m.config.IPFSPeerID = discovery.IPFSPeerID
m.config.IPFSSwarmAddrs = discovery.IPFSSwarmAddrs
}
// Store IPFS Cluster peer info for cluster peer_addresses configuration
if discovery.IPFSClusterPeerID != "" {
m.config.IPFSClusterPeerID = discovery.IPFSClusterPeerID
m.config.IPFSClusterAddrs = discovery.IPFSClusterAddrs
}
m.err = nil
m.step = StepClusterSecret
m.setupStepInput()
case StepClusterSecret:
secret := strings.TrimSpace(m.textInput.Value())
if err := validateClusterSecret(secret); err != nil {
m.err = err
return m, nil
}
m.config.ClusterSecret = secret
m.err = nil
m.step = StepSwarmKey
m.setupStepInput()
case StepSwarmKey:
swarmKey := strings.TrimSpace(m.textInput.Value())
if err := validateSwarmKey(swarmKey); err != nil {
m.err = err
return m, nil
}
m.config.SwarmKeyHex = swarmKey
m.err = nil
m.step = StepBranch
m.cursor = 0
case StepBranch:
if m.cursor == 0 {
m.config.Branch = "main"
} else {
m.config.Branch = "nightly"
}
m.cursor = 0 // Reset cursor for next step
m.step = StepNoPull
case StepNoPull:
if m.cursor == 0 {
m.config.NoPull = false
} else {
m.config.NoPull = true
}
m.step = StepConfirm
case StepConfirm:
m.step = StepInstalling
return m, m.startInstallation()
case StepDone:
return m, tea.Quit
}
return m, nil
}
func (m *Model) setupStepInput() {
m.textInput.Reset()
m.textInput.Focus()
m.textInput.EchoMode = textinput.EchoNormal // Reset echo mode
switch m.step {
case StepVpsIP:
m.textInput.Placeholder = "e.g., 203.0.113.1"
// Try to auto-detect public IP
if ip := detectPublicIP(); ip != "" {
m.textInput.SetValue(ip)
}
case StepDomain:
m.textInput.Placeholder = "e.g., node-1.orama.network"
case StepPeerDomain:
m.textInput.Placeholder = "e.g., node-123.orama.network"
case StepClusterSecret:
m.textInput.Placeholder = "64 hex characters"
m.textInput.EchoMode = textinput.EchoPassword
case StepSwarmKey:
m.textInput.Placeholder = "64 hex characters"
m.textInput.EchoMode = textinput.EchoPassword
}
}
func (m Model) startInstallation() tea.Cmd {
return func() tea.Msg {
// This would trigger the actual installation
// For now, we return the config for the CLI to handle
return installCompleteMsg{config: m.config}
}
}
type installCompleteMsg struct {
config InstallerConfig
}
// View renders the UI
func (m Model) View() string {
var s strings.Builder
// Header
s.WriteString(renderHeader())
s.WriteString("\n\n")
switch m.step {
case StepWelcome:
s.WriteString(m.viewWelcome())
case StepNodeType:
s.WriteString(m.viewNodeType())
case StepVpsIP:
s.WriteString(m.viewVpsIP())
case StepDomain:
s.WriteString(m.viewDomain())
case StepPeerDomain:
s.WriteString(m.viewPeerDomain())
case StepClusterSecret:
s.WriteString(m.viewClusterSecret())
case StepSwarmKey:
s.WriteString(m.viewSwarmKey())
case StepBranch:
s.WriteString(m.viewBranch())
case StepNoPull:
s.WriteString(m.viewNoPull())
case StepConfirm:
s.WriteString(m.viewConfirm())
case StepInstalling:
s.WriteString(m.viewInstalling())
case StepDone:
s.WriteString(m.viewDone())
}
return s.String()
}
func renderHeader() string {
logo := `
___ ____ _ __ __ _
/ _ \| _ \ / \ | \/ | / \
| | | | |_) | / _ \ | |\/| | / _ \
| |_| | _ < / ___ \| | | |/ ___ \
\___/|_| \_\/_/ \_\_| |_/_/ \_\
`
return titleStyle.Render(logo) + "\n" + subtitleStyle.Render("Network Installation Wizard")
}
func (m Model) viewWelcome() string {
var s strings.Builder
s.WriteString(boxStyle.Render(
titleStyle.Render("Welcome to Orama Network!") + "\n\n" +
"This wizard will guide you through setting up your node.\n\n" +
"You'll need:\n" +
" • A public IP address for your server\n" +
" • A domain name (e.g., node-1.orama.network)\n" +
" • For joining: cluster secret from existing node\n",
))
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Press Enter to continue • q to quit"))
return s.String()
}
func (m Model) viewNodeType() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Node Type") + "\n\n")
s.WriteString("Is this the first node in a new cluster?\n\n")
options := []string{"Yes, create new cluster", "No, join existing cluster"}
for i, opt := range options {
if i == m.cursor {
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
} else {
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
}
}
s.WriteString("\n")
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewVpsIP() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Server IP Address") + "\n\n")
s.WriteString("Enter your server's public IP address:\n\n")
s.WriteString(m.textInput.View())
if m.err != nil {
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewDomain() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Domain Name") + "\n\n")
s.WriteString("Enter the domain for this node:\n\n")
s.WriteString(m.textInput.View())
if m.err != nil {
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewPeerDomain() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Existing Node Domain") + "\n\n")
s.WriteString("Enter the domain of an existing node to join:\n")
s.WriteString(subtitleStyle.Render("The installer will auto-discover peer info via HTTPS/HTTP") + "\n\n")
s.WriteString(m.textInput.View())
if m.discovering {
s.WriteString("\n\n" + subtitleStyle.Render("🔍 "+m.discoveryInfo))
}
if m.discoveredPeer != "" && m.err == nil {
s.WriteString("\n\n" + successStyle.Render("✓ Discovered peer: "+m.discoveredPeer[:12]+"..."))
}
if m.err != nil {
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Enter to discover & continue • Esc to go back"))
return s.String()
}
func (m Model) viewClusterSecret() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Cluster Secret") + "\n\n")
s.WriteString("Enter the cluster secret from an existing node:\n")
s.WriteString(subtitleStyle.Render("Get it with: cat ~/.orama/secrets/cluster-secret") + "\n\n")
s.WriteString(m.textInput.View())
if m.err != nil {
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewSwarmKey() string {
var s strings.Builder
s.WriteString(titleStyle.Render("IPFS Swarm Key") + "\n\n")
s.WriteString("Enter the swarm key from an existing node:\n")
s.WriteString(subtitleStyle.Render("Get it with: cat ~/.orama/secrets/swarm.key | tail -1") + "\n\n")
s.WriteString(m.textInput.View())
if m.err != nil {
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewBranch() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Release Channel") + "\n\n")
s.WriteString("Select the release channel:\n\n")
options := []string{"main (stable)", "nightly (latest features)"}
for i, opt := range options {
if i == m.cursor {
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
} else {
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
}
}
s.WriteString("\n")
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewNoPull() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Git Repository") + "\n\n")
s.WriteString("Pull latest changes from repository?\n\n")
options := []string{"Pull latest (recommended)", "Skip git pull (use existing source)"}
for i, opt := range options {
if i == m.cursor {
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
} else {
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
}
}
s.WriteString("\n")
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewConfirm() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Confirm Installation") + "\n\n")
noPullStr := "Pull latest"
if m.config.NoPull {
noPullStr = "Skip git pull"
}
config := fmt.Sprintf(
" VPS IP: %s\n"+
" Domain: %s\n"+
" Branch: %s\n"+
" Git Pull: %s\n"+
" Node Type: %s\n",
m.config.VpsIP,
m.config.Domain,
m.config.Branch,
noPullStr,
map[bool]string{true: "First node (new cluster)", false: "Join existing cluster"}[m.config.IsFirstNode],
)
if !m.config.IsFirstNode {
config += fmt.Sprintf(" Peer Node: %s\n", m.config.PeerDomain)
config += fmt.Sprintf(" Join Addr: %s\n", m.config.JoinAddress)
if len(m.config.Peers) > 0 {
config += fmt.Sprintf(" Bootstrap: %s...\n", m.config.Peers[0][:40])
}
if len(m.config.ClusterSecret) >= 8 {
config += fmt.Sprintf(" Secret: %s...\n", m.config.ClusterSecret[:8])
}
if len(m.config.SwarmKeyHex) >= 8 {
config += fmt.Sprintf(" Swarm Key: %s...\n", m.config.SwarmKeyHex[:8])
}
if m.config.IPFSPeerID != "" {
config += fmt.Sprintf(" IPFS Peer: %s...\n", m.config.IPFSPeerID[:16])
}
}
s.WriteString(boxStyle.Render(config))
// Show SNI DNS warning if present
if m.sniWarning != "" {
s.WriteString("\n\n")
s.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#FFA500")).Render(m.sniWarning))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Press Enter to install • Esc to go back"))
return s.String()
}
func (m Model) viewInstalling() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Installing...") + "\n\n")
s.WriteString("Please wait while the node is being configured.\n\n")
for _, line := range m.installOutput {
s.WriteString(line + "\n")
}
return s.String()
}
func (m Model) viewDone() string {
var s strings.Builder
s.WriteString(successStyle.Render("✓ Installation Complete!") + "\n\n")
s.WriteString("Your node is now running.\n\n")
s.WriteString("Useful commands:\n")
s.WriteString(" orama status - Check service status\n")
s.WriteString(" orama logs node - View node logs\n")
s.WriteString(" orama logs gateway - View gateway logs\n")
s.WriteString("\n")
s.WriteString(helpStyle.Render("Press Enter or q to exit"))
return s.String()
}
// GetConfig returns the installer configuration after the TUI completes
func (m Model) GetConfig() InstallerConfig {
return m.config
}
// Validation helpers
func validateIP(ip string) error {
if ip == "" {
return fmt.Errorf("IP address is required")
}
if net.ParseIP(ip) == nil {
return fmt.Errorf("invalid IP address format")
}
return nil
}
func validateDomain(domain string) error {
if domain == "" {
return fmt.Errorf("domain is required")
}
// Basic domain validation
domainRegex := regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?)*$`)
if !domainRegex.MatchString(domain) {
return fmt.Errorf("invalid domain format")
}
return nil
}
// DiscoveryResult contains all information discovered from a peer node
type DiscoveryResult struct {
PeerID string // LibP2P peer ID
IPFSPeerID string // IPFS peer ID
IPFSSwarmAddrs []string // IPFS swarm addresses
// IPFS Cluster info for cluster peer discovery
IPFSClusterPeerID string // IPFS Cluster peer ID
IPFSClusterAddrs []string // IPFS Cluster multiaddresses
}
// discoverPeerFromDomain queries an existing node to get its peer ID and IPFS info
// Tries HTTPS first, then falls back to HTTP
// Respects DEBROS_TRUSTED_TLS_DOMAINS and DEBROS_CA_CERT_PATH environment variables for certificate verification
func discoverPeerFromDomain(domain string) (*DiscoveryResult, error) {
// Use centralized TLS configuration that respects CA certificates and trusted domains
client := tlsutil.NewHTTPClientForDomain(10*time.Second, domain)
// Try HTTPS first
url := fmt.Sprintf("https://%s/v1/network/status", domain)
resp, err := client.Get(url)
// If HTTPS fails, try HTTP
if err != nil {
// Finally try plain HTTP
url = fmt.Sprintf("http://%s/v1/network/status", domain)
resp, err = client.Get(url)
if err != nil {
return nil, fmt.Errorf("could not connect to %s (tried HTTPS and HTTP): %w", domain, err)
}
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status from %s: %s", domain, resp.Status)
}
// Parse response including IPFS and IPFS Cluster info
var status struct {
PeerID string `json:"peer_id"`
NodeID string `json:"node_id"` // fallback for backward compatibility
IPFS *struct {
PeerID string `json:"peer_id"`
SwarmAddresses []string `json:"swarm_addresses"`
} `json:"ipfs,omitempty"`
IPFSCluster *struct {
PeerID string `json:"peer_id"`
Addresses []string `json:"addresses"`
} `json:"ipfs_cluster,omitempty"`
}
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
return nil, fmt.Errorf("failed to parse response from %s: %w", domain, err)
}
// Use peer_id if available, otherwise fall back to node_id for backward compatibility
peerID := status.PeerID
if peerID == "" {
peerID = status.NodeID
}
if peerID == "" {
return nil, fmt.Errorf("no peer_id or node_id in response from %s", domain)
}
result := &DiscoveryResult{
PeerID: peerID,
}
// Include IPFS info if available
if status.IPFS != nil {
result.IPFSPeerID = status.IPFS.PeerID
result.IPFSSwarmAddrs = status.IPFS.SwarmAddresses
}
// Include IPFS Cluster info if available
if status.IPFSCluster != nil {
result.IPFSClusterPeerID = status.IPFSCluster.PeerID
result.IPFSClusterAddrs = status.IPFSCluster.Addresses
}
return result, nil
}
func validateClusterSecret(secret string) error {
if len(secret) != 64 {
return fmt.Errorf("cluster secret must be 64 hex characters")
}
secretRegex := regexp.MustCompile(`^[a-fA-F0-9]{64}$`)
if !secretRegex.MatchString(secret) {
return fmt.Errorf("cluster secret must be valid hexadecimal")
}
return nil
}
func validateSwarmKey(key string) error {
if len(key) != 64 {
return fmt.Errorf("swarm key must be 64 hex characters")
}
keyRegex := regexp.MustCompile(`^[a-fA-F0-9]{64}$`)
if !keyRegex.MatchString(key) {
return fmt.Errorf("swarm key must be valid hexadecimal")
}
return nil
}
// ensureCertificatesForDomain generates self-signed certificates for the domain
func ensureCertificatesForDomain(domain string) error {
// Get home directory
home, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("failed to get home directory: %w", err)
}
// Create cert directory
certDir := filepath.Join(home, ".orama", "certs")
if err := os.MkdirAll(certDir, 0700); err != nil {
return fmt.Errorf("failed to create cert directory: %w", err)
}
// Create certificate manager
cm := certutil.NewCertificateManager(certDir)
// Ensure CA certificate exists
caCertPEM, caKeyPEM, err := cm.EnsureCACertificate()
if err != nil {
return fmt.Errorf("failed to ensure CA certificate: %w", err)
}
// Ensure node certificate exists for the domain
_, _, err = cm.EnsureNodeCertificate(domain, caCertPEM, caKeyPEM)
if err != nil {
return fmt.Errorf("failed to ensure node certificate: %w", err)
}
// Also create wildcard certificate if domain is not already wildcard
if !strings.HasPrefix(domain, "*.") {
wildcardDomain := "*." + domain
_, _, err = cm.EnsureNodeCertificate(wildcardDomain, caCertPEM, caKeyPEM)
if err != nil {
return fmt.Errorf("failed to ensure wildcard certificate: %w", err)
}
}
return nil
}
func detectPublicIP() string {
// Try to detect public IP from common interfaces
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil && !ipnet.IP.IsPrivate() {
return ipnet.IP.String()
}
}
}
return ""
}
// validateSNIDNSRecords checks if the required SNI DNS records exist
// It tries to resolve the key SNI hostnames for IPFS, IPFS Cluster, and Olric
// Note: Raft no longer uses SNI - it uses direct RQLite TLS on port 7002
// All should resolve to the same IP (the node's public IP or domain)
// Returns a warning string if records are missing (empty string if all OK)
func validateSNIDNSRecords(domain string) string {
// List of SNI services that need DNS records
// Note: raft.domain is NOT included - RQLite uses direct TLS on port 7002
sniServices := []string{
fmt.Sprintf("ipfs.%s", domain),
fmt.Sprintf("ipfs-cluster.%s", domain),
fmt.Sprintf("olric.%s", domain),
}
// Try to resolve the main domain first to get baseline
mainIPs, err := net.LookupHost(domain)
if err != nil {
// Main domain doesn't resolve - this is just a warning now
return fmt.Sprintf("Warning: could not resolve main domain %s: %v", domain, err)
}
if len(mainIPs) == 0 {
return fmt.Sprintf("Warning: main domain %s resolved to no IP addresses", domain)
}
// Check each SNI service
var unresolvedServices []string
for _, service := range sniServices {
ips, err := net.LookupHost(service)
if err != nil || len(ips) == 0 {
unresolvedServices = append(unresolvedServices, service)
}
}
if len(unresolvedServices) > 0 {
serviceList := strings.Join(unresolvedServices, ", ")
return fmt.Sprintf(
"⚠️ SNI DNS records not found for: %s\n"+
" For multi-node clustering, add wildcard CNAME: *.%s -> %s\n"+
" (Continuing anyway - single-node setup will work)",
serviceList, domain, domain,
)
}
return ""
}
// Run starts the TUI installer and returns the configuration
func Run() (*InstallerConfig, error) {
// Check if running as root
if os.Geteuid() != 0 {
return nil, fmt.Errorf("installer must be run as root (use sudo)")
}
model := NewModel()
p := tea.NewProgram(&model, tea.WithAltScreen())
finalModel, err := p.Run()
if err != nil {
return nil, err
}
m := finalModel.(*Model)
if m.step == StepInstalling || m.step == StepDone {
config := m.GetConfig()
return &config, nil
}
return nil, fmt.Errorf("installation cancelled")
}

View File

@ -19,6 +19,7 @@ import (
"go.uber.org/zap"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/tlsutil"
"github.com/libp2p/go-libp2p/core/host"
"github.com/multiformats/go-multiaddr"
)
@ -86,9 +87,9 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
}
// Determine cluster path based on data directory structure
// Check if dataDir contains specific node names (e.g., ~/.debros/bootstrap, ~/.debros/bootstrap2, ~/.debros/node2-4)
// Check if dataDir contains specific node names (e.g., ~/.orama/node-1, ~/.orama/node-2, etc.)
clusterPath := filepath.Join(dataDir, "ipfs-cluster")
nodeNames := []string{"bootstrap", "bootstrap2", "node2", "node3", "node4"}
nodeNames := []string{"node-1", "node-2", "node-3", "node-4", "node-5"}
for _, nodeName := range nodeNames {
if strings.Contains(dataDir, nodeName) {
// Check if this is a direct child
@ -102,12 +103,17 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
}
// Load or generate cluster secret
// Always use ~/.orama/secrets/cluster-secret (new standard location)
secretPath := filepath.Join(dataDir, "..", "cluster-secret")
if strings.Contains(dataDir, ".debros") {
// Try to find cluster-secret in ~/.debros
if strings.Contains(dataDir, ".orama") {
// Use the secrets directory for proper file organization
home, err := os.UserHomeDir()
if err == nil {
secretPath = filepath.Join(home, ".debros", "cluster-secret")
secretsDir := filepath.Join(home, ".orama", "secrets")
// Ensure secrets directory exists
if err := os.MkdirAll(secretsDir, 0700); err == nil {
secretPath = filepath.Join(secretsDir, "cluster-secret")
}
}
}
@ -144,20 +150,23 @@ func (cm *ClusterConfigManager) EnsureConfig() error {
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
}
// Determine node name
nodeName := cm.cfg.Node.Type
if nodeName == "node" || nodeName == "bootstrap" {
// Try to extract from data dir or ID
possibleNames := []string{"bootstrap", "bootstrap2", "node2", "node3", "node4"}
// Determine node name from ID or DataDir
nodeName := "node-1" // Default fallback
possibleNames := []string{"node-1", "node-2", "node-3", "node-4", "node-5"}
for _, name := range possibleNames {
if strings.Contains(cm.cfg.Node.DataDir, name) || strings.Contains(cm.cfg.Node.ID, name) {
nodeName = name
break
}
}
// If ID contains a node identifier, use it
if cm.cfg.Node.ID != "" {
for _, name := range possibleNames {
if strings.Contains(cm.cfg.Node.DataDir, name) || strings.Contains(cm.cfg.Node.ID, name) {
if strings.Contains(cm.cfg.Node.ID, name) {
nodeName = name
break
}
}
if nodeName == "node" || nodeName == "bootstrap" {
nodeName = cm.cfg.Node.Type
}
}
// Calculate ports based on pattern
@ -219,52 +228,52 @@ func (cm *ClusterConfigManager) EnsureConfig() error {
return nil
}
// UpdateBootstrapPeers updates peer_addresses and peerstore with bootstrap peer information
// Returns true if update was successful, false if bootstrap is not available yet (non-fatal)
func (cm *ClusterConfigManager) UpdateBootstrapPeers(bootstrapAPIURL string) (bool, error) {
// UpdatePeerAddresses updates peer_addresses and peerstore with peer information
// Returns true if update was successful, false if peer is not available yet (non-fatal)
func (cm *ClusterConfigManager) UpdatePeerAddresses(peerAPIURL string) (bool, error) {
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
return false, nil // IPFS not configured
}
// Skip if this is the bootstrap node itself
if cm.cfg.Node.Type == "bootstrap" {
// Skip if this is the first node (creates the cluster, no join address)
if cm.cfg.Database.RQLiteJoinAddress == "" {
return false, nil
}
// Query bootstrap cluster API to get peer ID
peerID, err := getBootstrapPeerID(bootstrapAPIURL)
// Query peer cluster API to get peer ID
peerID, err := getPeerID(peerAPIURL)
if err != nil {
// Non-fatal: bootstrap might not be available yet
cm.logger.Debug("Bootstrap peer not available yet, will retry",
zap.String("bootstrap_api", bootstrapAPIURL),
// Non-fatal: peer might not be available yet
cm.logger.Debug("Peer not available yet, will retry",
zap.String("peer_api", peerAPIURL),
zap.Error(err))
return false, nil
}
if peerID == "" {
cm.logger.Debug("Bootstrap peer ID not available yet")
cm.logger.Debug("Peer ID not available yet")
return false, nil
}
// Extract bootstrap host and cluster port from URL
bootstrapHost, clusterPort, err := parseBootstrapHostAndPort(bootstrapAPIURL)
// Extract peer host and cluster port from URL
peerHost, clusterPort, err := parsePeerHostAndPort(peerAPIURL)
if err != nil {
return false, fmt.Errorf("failed to parse bootstrap cluster API URL: %w", err)
return false, fmt.Errorf("failed to parse peer cluster API URL: %w", err)
}
// Bootstrap cluster LibP2P listens on clusterPort + 4
// Peer cluster LibP2P listens on clusterPort + 4
// (REST API is 9094, LibP2P is 9098 = 9094 + 4)
bootstrapClusterPort := clusterPort + 4
peerClusterPort := clusterPort + 4
// Determine IP protocol (ip4 or ip6) based on the host
var ipProtocol string
if net.ParseIP(bootstrapHost).To4() != nil {
if net.ParseIP(peerHost).To4() != nil {
ipProtocol = "ip4"
} else {
ipProtocol = "ip6"
}
bootstrapPeerAddr := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", ipProtocol, bootstrapHost, bootstrapClusterPort, peerID)
peerAddr := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", ipProtocol, peerHost, peerClusterPort, peerID)
// Load current config
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
@ -284,32 +293,32 @@ func (cm *ClusterConfigManager) UpdateBootstrapPeers(bootstrapAPIURL string) (bo
if peerstoreData, err := os.ReadFile(peerstorePath); err == nil {
// Only skip update if peerstore contains EXACTLY the correct address and nothing else
existingAddrs := strings.Split(strings.TrimSpace(string(peerstoreData)), "\n")
if len(existingAddrs) == 1 && strings.TrimSpace(existingAddrs[0]) == bootstrapPeerAddr {
cm.logger.Debug("Bootstrap peer address already correct in peerstore", zap.String("addr", bootstrapPeerAddr))
if len(existingAddrs) == 1 && strings.TrimSpace(existingAddrs[0]) == peerAddr {
cm.logger.Debug("Peer address already correct in peerstore", zap.String("addr", peerAddr))
needsUpdate = false
}
}
if needsUpdate {
// Write ONLY the correct bootstrap peer address, removing any stale entries
if err := os.WriteFile(peerstorePath, []byte(bootstrapPeerAddr+"\n"), 0644); err != nil {
// Write ONLY the correct peer address, removing any stale entries
if err := os.WriteFile(peerstorePath, []byte(peerAddr+"\n"), 0644); err != nil {
return false, fmt.Errorf("failed to write peerstore: %w", err)
}
cm.logger.Info("Updated peerstore with bootstrap peer (cleaned stale entries)",
zap.String("addr", bootstrapPeerAddr),
cm.logger.Info("Updated peerstore with peer (cleaned stale entries)",
zap.String("addr", peerAddr),
zap.String("peerstore_path", peerstorePath))
}
// Then sync service.json from peerstore to keep them in sync
cfg.Cluster.PeerAddresses = []string{bootstrapPeerAddr}
cfg.Cluster.PeerAddresses = []string{peerAddr}
// Save config
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
return false, fmt.Errorf("failed to save config: %w", err)
}
cm.logger.Info("Updated bootstrap peer configuration",
zap.String("bootstrap_peer_addr", bootstrapPeerAddr),
cm.logger.Info("Updated peer configuration",
zap.String("peer_addr", peerAddr),
zap.String("peerstore_path", peerstorePath))
return true, nil
@ -325,7 +334,7 @@ func (cm *ClusterConfigManager) UpdateAllClusterPeers() (bool, error) {
}
// Query local cluster API to get all peers
client := &standardHTTPClient{}
client := newStandardHTTPClient()
peersURL := fmt.Sprintf("%s/peers", cm.cfg.Database.IPFS.ClusterAPIURL)
resp, err := client.Get(peersURL)
if err != nil {
@ -480,56 +489,221 @@ func (cm *ClusterConfigManager) UpdateAllClusterPeers() (bool, error) {
return true, nil
}
// RepairBootstrapPeers automatically discovers and repairs bootstrap peer configuration
// Tries multiple methods: config-based discovery, bootstrap peer multiaddr, or discovery service
func (cm *ClusterConfigManager) RepairBootstrapPeers() (bool, error) {
// RepairPeerConfiguration automatically discovers and repairs peer configuration
// Tries multiple methods: gateway /v1/network/status, config-based discovery, peer multiaddr
func (cm *ClusterConfigManager) RepairPeerConfiguration() (bool, error) {
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
return false, nil // IPFS not configured
}
// Skip if this is the bootstrap node itself
if cm.cfg.Node.Type == "bootstrap" {
// Method 1: Try to discover cluster peers via /v1/network/status endpoint
// This is the most reliable method as it uses the HTTPS gateway
if len(cm.cfg.Discovery.BootstrapPeers) > 0 {
success, err := cm.DiscoverClusterPeersFromGateway()
if err != nil {
cm.logger.Debug("Gateway discovery failed, trying direct API", zap.Error(err))
} else if success {
cm.logger.Info("Successfully discovered cluster peers from gateway")
return true, nil
}
}
// Skip direct API method if this is the first node (creates the cluster, no join address)
if cm.cfg.Database.RQLiteJoinAddress == "" {
return false, nil
}
// Method 1: Try to use bootstrap API URL from config if available
// Check if we have a bootstrap node's cluster API URL in discovery metadata
// For now, we'll infer from bootstrap peers multiaddr
// Method 2: Try direct cluster API (fallback)
var peerAPIURL string
var bootstrapAPIURL string
// Try to extract from bootstrap peers multiaddr
// Try to extract from peers multiaddr
if len(cm.cfg.Discovery.BootstrapPeers) > 0 {
if ip := extractIPFromMultiaddrForCluster(cm.cfg.Discovery.BootstrapPeers[0]); ip != "" {
// Default cluster API port is 9094
bootstrapAPIURL = fmt.Sprintf("http://%s:9094", ip)
cm.logger.Debug("Inferred bootstrap cluster API from bootstrap peer",
zap.String("bootstrap_api", bootstrapAPIURL))
peerAPIURL = fmt.Sprintf("http://%s:9094", ip)
cm.logger.Debug("Inferred peer cluster API from peer",
zap.String("peer_api", peerAPIURL))
}
}
// Fallback to localhost if nothing found (for local development)
if bootstrapAPIURL == "" {
bootstrapAPIURL = "http://localhost:9094"
cm.logger.Debug("Using localhost fallback for bootstrap cluster API")
if peerAPIURL == "" {
peerAPIURL = "http://localhost:9094"
cm.logger.Debug("Using localhost fallback for peer cluster API")
}
// Try to update bootstrap peers
success, err := cm.UpdateBootstrapPeers(bootstrapAPIURL)
// Try to update peers
success, err := cm.UpdatePeerAddresses(peerAPIURL)
if err != nil {
return false, err
}
if success {
cm.logger.Info("Successfully repaired bootstrap peer configuration")
cm.logger.Info("Successfully repaired peer configuration via direct API")
return true, nil
}
// If update failed (bootstrap not available), return false but no error
// If update failed (peer not available), return false but no error
// This allows retries later
return false, nil
}
// DiscoverClusterPeersFromGateway queries bootstrap peers' /v1/network/status endpoint
// to discover IPFS Cluster peer information and updates the local service.json
func (cm *ClusterConfigManager) DiscoverClusterPeersFromGateway() (bool, error) {
if len(cm.cfg.Discovery.BootstrapPeers) == 0 {
cm.logger.Debug("No bootstrap peers configured, skipping gateway discovery")
return false, nil
}
var discoveredPeers []string
seenPeers := make(map[string]bool)
for _, peerAddr := range cm.cfg.Discovery.BootstrapPeers {
// Extract domain or IP from multiaddr
domain := extractDomainFromMultiaddr(peerAddr)
if domain == "" {
continue
}
// Query /v1/network/status endpoint
statusURL := fmt.Sprintf("https://%s/v1/network/status", domain)
cm.logger.Debug("Querying peer network status", zap.String("url", statusURL))
// Use TLS-aware HTTP client (handles staging certs for *.debros.network)
client := tlsutil.NewHTTPClientForDomain(10*time.Second, domain)
resp, err := client.Get(statusURL)
if err != nil {
// Try HTTP fallback
statusURL = fmt.Sprintf("http://%s/v1/network/status", domain)
resp, err = client.Get(statusURL)
if err != nil {
cm.logger.Debug("Failed to query peer status", zap.String("domain", domain), zap.Error(err))
continue
}
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
cm.logger.Debug("Peer returned non-OK status", zap.String("domain", domain), zap.Int("status", resp.StatusCode))
continue
}
// Parse response
var status struct {
IPFSCluster *struct {
PeerID string `json:"peer_id"`
Addresses []string `json:"addresses"`
} `json:"ipfs_cluster"`
}
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
cm.logger.Debug("Failed to decode peer status", zap.String("domain", domain), zap.Error(err))
continue
}
if status.IPFSCluster == nil || status.IPFSCluster.PeerID == "" {
cm.logger.Debug("Peer has no IPFS Cluster info", zap.String("domain", domain))
continue
}
// Extract IP from domain or addresses
peerIP := extractIPFromMultiaddrForCluster(peerAddr)
if peerIP == "" {
// Try to resolve domain
ips, err := net.LookupIP(domain)
if err == nil && len(ips) > 0 {
for _, ip := range ips {
if ip.To4() != nil {
peerIP = ip.String()
break
}
}
}
}
if peerIP == "" {
cm.logger.Debug("Could not determine peer IP", zap.String("domain", domain))
continue
}
// Construct cluster multiaddr
// IPFS Cluster listens on port 9098 (REST API port 9094 + 4)
clusterAddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", peerIP, status.IPFSCluster.PeerID)
if !seenPeers[clusterAddr] {
discoveredPeers = append(discoveredPeers, clusterAddr)
seenPeers[clusterAddr] = true
cm.logger.Info("Discovered cluster peer from gateway",
zap.String("domain", domain),
zap.String("peer_id", status.IPFSCluster.PeerID),
zap.String("cluster_addr", clusterAddr))
}
}
if len(discoveredPeers) == 0 {
cm.logger.Debug("No cluster peers discovered from gateway")
return false, nil
}
// Load current config
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
if err != nil {
return false, fmt.Errorf("failed to load config: %w", err)
}
// Update peerstore file
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
peerstoreContent := strings.Join(discoveredPeers, "\n") + "\n"
if err := os.WriteFile(peerstorePath, []byte(peerstoreContent), 0644); err != nil {
cm.logger.Warn("Failed to update peerstore file", zap.Error(err))
}
// Update peer_addresses in config
cfg.Cluster.PeerAddresses = discoveredPeers
// Save config
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
return false, fmt.Errorf("failed to save config: %w", err)
}
cm.logger.Info("Updated cluster peer addresses from gateway discovery",
zap.Int("peer_count", len(discoveredPeers)),
zap.Strings("peer_addresses", discoveredPeers))
return true, nil
}
// extractDomainFromMultiaddr extracts domain or IP from a multiaddr string
// Handles formats like /dns4/domain/tcp/port/p2p/id or /ip4/ip/tcp/port/p2p/id
func extractDomainFromMultiaddr(multiaddrStr string) string {
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
if err != nil {
return ""
}
// Try DNS4 first (domain name)
if domain, err := ma.ValueForProtocol(multiaddr.P_DNS4); err == nil && domain != "" {
return domain
}
// Try DNS6
if domain, err := ma.ValueForProtocol(multiaddr.P_DNS6); err == nil && domain != "" {
return domain
}
// Try IP4
if ip, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ip != "" {
return ip
}
// Try IP6
if ip, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ip != "" {
return ip
}
return ""
}
// DiscoverClusterPeersFromLibP2P loads IPFS cluster peer addresses from the peerstore file.
// If peerstore is empty, it means there are no peers to connect to.
// Returns true if peers were loaded and configured, false otherwise (non-fatal)
@ -817,16 +991,16 @@ func ensureRequiredSection(parent map[string]interface{}, key string, defaults m
}
}
// parseBootstrapHostAndPort extracts host and REST API port from bootstrap API URL
func parseBootstrapHostAndPort(bootstrapAPIURL string) (host string, restAPIPort int, err error) {
u, err := url.Parse(bootstrapAPIURL)
// parsePeerHostAndPort extracts host and REST API port from peer API URL
func parsePeerHostAndPort(peerAPIURL string) (host string, restAPIPort int, err error) {
u, err := url.Parse(peerAPIURL)
if err != nil {
return "", 0, err
}
host = u.Hostname()
if host == "" {
return "", 0, fmt.Errorf("no host in URL: %s", bootstrapAPIURL)
return "", 0, fmt.Errorf("no host in URL: %s", peerAPIURL)
}
portStr := u.Port()
@ -908,17 +1082,17 @@ func parseIPFSPort(apiURL string) (int, error) {
return port, nil
}
// getBootstrapPeerID queries the bootstrap cluster API to get the peer ID
func getBootstrapPeerID(apiURL string) (string, error) {
// getPeerID queries the cluster API to get the peer ID
func getPeerID(apiURL string) (string, error) {
// Simple HTTP client to query /peers endpoint
client := &standardHTTPClient{}
client := newStandardHTTPClient()
resp, err := client.Get(fmt.Sprintf("%s/peers", apiURL))
if err != nil {
return "", err
}
// The /peers endpoint returns NDJSON (newline-delimited JSON)
// We need to read the first peer object to get the bootstrap peer ID
// We need to read the first peer object to get the peer ID
dec := json.NewDecoder(bytes.NewReader(resp))
var firstPeer struct {
ID string `json:"id"`
@ -963,11 +1137,19 @@ func generateRandomSecret(length int) string {
return hex.EncodeToString(bytes)
}
// standardHTTPClient implements HTTP client using net/http
type standardHTTPClient struct{}
// standardHTTPClient implements HTTP client using net/http with centralized TLS configuration
type standardHTTPClient struct {
client *http.Client
}
func newStandardHTTPClient() *standardHTTPClient {
return &standardHTTPClient{
client: tlsutil.NewHTTPClient(30 * time.Second),
}
}
func (c *standardHTTPClient) Get(url string) ([]byte, error) {
resp, err := http.Get(url)
resp, err := c.client.Get(url)
if err != nil {
return nil, err
}
@ -1026,15 +1208,15 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
}
// Try to find IPFS repo path
// Check common locations: dataDir/ipfs/repo, or dataDir/bootstrap/ipfs/repo, etc.
// Check common locations: dataDir/ipfs/repo, dataDir/node-1/ipfs/repo, etc.
possiblePaths := []string{
filepath.Join(dataDir, "ipfs", "repo"),
filepath.Join(dataDir, "bootstrap", "ipfs", "repo"),
filepath.Join(dataDir, "node2", "ipfs", "repo"),
filepath.Join(dataDir, "node3", "ipfs", "repo"),
filepath.Join(filepath.Dir(dataDir), "bootstrap", "ipfs", "repo"),
filepath.Join(filepath.Dir(dataDir), "node2", "ipfs", "repo"),
filepath.Join(filepath.Dir(dataDir), "node3", "ipfs", "repo"),
filepath.Join(dataDir, "node-1", "ipfs", "repo"),
filepath.Join(dataDir, "node-2", "ipfs", "repo"),
filepath.Join(dataDir, "node-3", "ipfs", "repo"),
filepath.Join(filepath.Dir(dataDir), "node-1", "ipfs", "repo"),
filepath.Join(filepath.Dir(dataDir), "node-2", "ipfs", "repo"),
filepath.Join(filepath.Dir(dataDir), "node-3", "ipfs", "repo"),
}
var ipfsRepoPath string
@ -1056,7 +1238,7 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
}
// Determine gateway port (typically API port + 3079, or 8080 for bootstrap, 8081 for node2, etc.)
// Determine gateway port (typically API port + 3079, or 8080 for node-1, 8081 for node-2, etc.)
gatewayPort := 8080
if strings.Contains(dataDir, "node2") {
gatewayPort = 8081

View File

@ -54,6 +54,7 @@ const (
ComponentClient Component = "CLIENT"
ComponentGeneral Component = "GENERAL"
ComponentAnyone Component = "ANYONE"
ComponentGateway Component = "GATEWAY"
)
// getComponentColor returns the color for a specific component
@ -75,6 +76,8 @@ func getComponentColor(component Component) string {
return Yellow
case ComponentAnyone:
return Cyan
case ComponentGateway:
return BrightGreen
default:
return White
}
@ -179,6 +182,33 @@ func NewDefaultLogger(component Component) (*ColoredLogger, error) {
return NewColoredLogger(component, true)
}
// NewFileLogger creates a logger that writes to a file
func NewFileLogger(component Component, filePath string, enableColors bool) (*ColoredLogger, error) {
// Create encoder
encoder := coloredConsoleEncoder(enableColors)
// Create file writer
file, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return nil, fmt.Errorf("failed to open log file %s: %w", filePath, err)
}
// Create core
core := zapcore.NewCore(
encoder,
zapcore.AddSync(file),
zapcore.DebugLevel,
)
// Create logger with caller information
logger := zap.New(core, zap.AddCaller(), zap.AddCallerSkip(1))
return &ColoredLogger{
Logger: logger,
enableColors: enableColors,
}, nil
}
// Component-specific logging methods
func (l *ColoredLogger) ComponentInfo(component Component, msg string, fields ...zap.Field) {
if l.enableColors {

View File

@ -236,13 +236,11 @@ func (n *Node) startConnectionMonitoring() {
n.logger.ComponentInfo(logging.ComponentNode, "Cluster peer addresses updated during monitoring")
}
// Also try to repair bootstrap peers if this is not a bootstrap node
if n.config.Node.Type != "bootstrap" {
if success, err := n.clusterConfigManager.RepairBootstrapPeers(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair bootstrap peers during monitoring", zap.Error(err))
} else if success {
n.logger.ComponentInfo(logging.ComponentNode, "Bootstrap peer configuration repaired during monitoring")
}
// Try to repair peer configuration
if success, err := n.clusterConfigManager.RepairPeerConfiguration(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair peer addresses during monitoring", zap.Error(err))
} else if success {
n.logger.ComponentInfo(logging.ComponentNode, "Peer configuration repaired during monitoring")
}
}
}

View File

@ -2,9 +2,13 @@ package node
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
mathrand "math/rand"
"net"
"net/http"
"os"
"path/filepath"
"strings"
@ -19,10 +23,13 @@ import (
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
"github.com/multiformats/go-multiaddr"
"go.uber.org/zap"
"golang.org/x/crypto/acme"
"golang.org/x/crypto/acme/autocert"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/discovery"
"github.com/DeBrosOfficial/network/pkg/encryption"
"github.com/DeBrosOfficial/network/pkg/gateway"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/pubsub"
@ -40,7 +47,7 @@ type Node struct {
clusterDiscovery *database.ClusterDiscoveryService
// Peer discovery
bootstrapCancel context.CancelFunc
peerDiscoveryCancel context.CancelFunc
// PubSub
pubsub *pubsub.ClientAdapter
@ -50,6 +57,20 @@ type Node struct {
// IPFS Cluster config manager
clusterConfigManager *ipfs.ClusterConfigManager
// Full gateway (for API, auth, pubsub, and internal service routing)
apiGateway *gateway.Gateway
apiGatewayServer *http.Server
// SNI gateway (for TCP routing of raft, ipfs, olric, etc.)
sniGateway *gateway.TCPSNIGateway
// Shared certificate manager for HTTPS and SNI
certManager *autocert.Manager
// Certificate ready signal - closed when TLS certificates are extracted and ready for use
// Used to coordinate RQLite node-to-node TLS startup with certificate provisioning
certReady chan struct{}
}
// NewNode creates a new network node
@ -73,11 +94,8 @@ func (n *Node) startRQLite(ctx context.Context) error {
// Determine node identifier for log filename - use node ID for unique filenames
nodeID := n.config.Node.ID
if nodeID == "" {
// Fallback to type if ID is not set
nodeID = n.config.Node.Type
if nodeID == "" {
nodeID = "node"
}
// Default to "node" if ID is not set
nodeID = "node"
}
// Create RQLite manager
@ -86,19 +104,13 @@ func (n *Node) startRQLite(ctx context.Context) error {
// Initialize cluster discovery service if LibP2P host is available
if n.host != nil && n.discoveryManager != nil {
// Determine node type for cluster discovery (bootstrap or node)
discoveryNodeType := "node"
if n.config.Node.Type == "bootstrap" {
discoveryNodeType = "bootstrap"
}
// Create cluster discovery service
// Create cluster discovery service (all nodes are unified)
n.clusterDiscovery = database.NewClusterDiscoveryService(
n.host,
n.discoveryManager,
n.rqliteManager,
n.config.Node.ID,
discoveryNodeType,
"node", // Unified node type
n.config.Discovery.RaftAdvAddress,
n.config.Discovery.HttpAdvAddress,
n.config.Node.DataDir,
@ -121,6 +133,25 @@ func (n *Node) startRQLite(ctx context.Context) error {
n.logger.Info("Cluster discovery service started (waiting for RQLite)")
}
// If node-to-node TLS is configured, wait for certificates to be provisioned
// This ensures RQLite can start with TLS when joining through the SNI gateway
if n.config.Database.NodeCert != "" && n.config.Database.NodeKey != "" && n.certReady != nil {
n.logger.Info("RQLite node TLS configured, waiting for certificates to be provisioned...",
zap.String("node_cert", n.config.Database.NodeCert),
zap.String("node_key", n.config.Database.NodeKey))
// Wait for certificate ready signal with timeout
certTimeout := 5 * time.Minute
select {
case <-n.certReady:
n.logger.Info("Certificates ready, proceeding with RQLite startup")
case <-time.After(certTimeout):
return fmt.Errorf("timeout waiting for TLS certificates after %v - ensure HTTPS is configured and ports 80/443 are accessible for ACME challenges", certTimeout)
case <-ctx.Done():
return fmt.Errorf("context cancelled while waiting for certificates: %w", ctx.Err())
}
}
// Start RQLite FIRST before updating metadata
if err := n.rqliteManager.Start(ctx); err != nil {
return err
@ -143,7 +174,7 @@ func (n *Node) startRQLite(ctx context.Context) error {
return nil
}
// extractIPFromMultiaddr extracts the IP address from a bootstrap peer multiaddr
// extractIPFromMultiaddr extracts the IP address from a peer multiaddr
// Supports IP4, IP6, DNS4, DNS6, and DNSADDR protocols
func extractIPFromMultiaddr(multiaddrStr string) string {
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
@ -188,25 +219,25 @@ func extractIPFromMultiaddr(multiaddrStr string) string {
return ""
}
// bootstrapPeerSource returns a PeerSource that yields peers from BootstrapPeers.
func bootstrapPeerSource(bootstrapAddrs []string, logger *zap.Logger) func(context.Context, int) <-chan peer.AddrInfo {
// peerSource returns a PeerSource that yields peers from configured peers.
func peerSource(peerAddrs []string, logger *zap.Logger) func(context.Context, int) <-chan peer.AddrInfo {
return func(ctx context.Context, num int) <-chan peer.AddrInfo {
out := make(chan peer.AddrInfo, num)
go func() {
defer close(out)
count := 0
for _, s := range bootstrapAddrs {
for _, s := range peerAddrs {
if count >= num {
return
}
ma, err := multiaddr.NewMultiaddr(s)
if err != nil {
logger.Debug("invalid bootstrap multiaddr", zap.String("addr", s), zap.Error(err))
logger.Debug("invalid peer multiaddr", zap.String("addr", s), zap.Error(err))
continue
}
ai, err := peer.AddrInfoFromP2pAddr(ma)
if err != nil {
logger.Debug("failed to parse bootstrap peer", zap.String("addr", s), zap.Error(err))
logger.Debug("failed to parse peer address", zap.String("addr", s), zap.Error(err))
continue
}
select {
@ -221,8 +252,8 @@ func bootstrapPeerSource(bootstrapAddrs []string, logger *zap.Logger) func(conte
}
}
// hasBootstrapConnections checks if we're connected to any bootstrap peers
func (n *Node) hasBootstrapConnections() bool {
// hasPeerConnections checks if we're connected to any peers
func (n *Node) hasPeerConnections() bool {
if n.host == nil || len(n.config.Discovery.BootstrapPeers) == 0 {
return false
}
@ -232,10 +263,10 @@ func (n *Node) hasBootstrapConnections() bool {
return false
}
// Parse bootstrap peer IDs
bootstrapPeerIDs := make(map[peer.ID]bool)
for _, bootstrapAddr := range n.config.Discovery.BootstrapPeers {
ma, err := multiaddr.NewMultiaddr(bootstrapAddr)
// Parse peer IDs
peerIDs := make(map[peer.ID]bool)
for _, peerAddr := range n.config.Discovery.BootstrapPeers {
ma, err := multiaddr.NewMultiaddr(peerAddr)
if err != nil {
continue
}
@ -243,12 +274,12 @@ func (n *Node) hasBootstrapConnections() bool {
if err != nil {
continue
}
bootstrapPeerIDs[peerInfo.ID] = true
peerIDs[peerInfo.ID] = true
}
// Check if any connected peer is a bootstrap peer
// Check if any connected peer is in our peer list
for _, peerID := range connectedPeers {
if bootstrapPeerIDs[peerID] {
if peerIDs[peerID] {
return true
}
}
@ -283,8 +314,8 @@ func addJitter(interval time.Duration) time.Duration {
return result
}
// connectToBootstrapPeer connects to a single bootstrap peer
func (n *Node) connectToBootstrapPeer(ctx context.Context, addr string) error {
// connectToPeerAddr connects to a single peer address
func (n *Node) connectToPeerAddr(ctx context.Context, addr string) error {
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return fmt.Errorf("invalid multiaddr: %w", err)
@ -296,16 +327,16 @@ func (n *Node) connectToBootstrapPeer(ctx context.Context, addr string) error {
return fmt.Errorf("failed to extract peer info: %w", err)
}
// Avoid dialing ourselves: if the bootstrap address resolves to our own peer ID, skip.
// Avoid dialing ourselves: if the address resolves to our own peer ID, skip.
if n.host != nil && peerInfo.ID == n.host.ID() {
n.logger.ComponentDebug(logging.ComponentNode, "Skipping bootstrap address because it resolves to self",
n.logger.ComponentDebug(logging.ComponentNode, "Skipping peer address because it resolves to self",
zap.String("addr", addr),
zap.String("peer_id", peerInfo.ID.String()))
return nil
}
// Log resolved peer info prior to connect
n.logger.ComponentDebug(logging.ComponentNode, "Resolved bootstrap peer",
n.logger.ComponentDebug(logging.ComponentNode, "Resolved peer",
zap.String("peer_id", peerInfo.ID.String()),
zap.String("addr", addr),
zap.Int("addr_count", len(peerInfo.Addrs)),
@ -316,28 +347,28 @@ func (n *Node) connectToBootstrapPeer(ctx context.Context, addr string) error {
return fmt.Errorf("failed to connect to peer: %w", err)
}
n.logger.Info("Connected to bootstrap peer",
n.logger.Info("Connected to peer",
zap.String("peer", peerInfo.ID.String()),
zap.String("addr", addr))
return nil
}
// connectToBootstrapPeers connects to configured LibP2P bootstrap peers
func (n *Node) connectToBootstrapPeers(ctx context.Context) error {
// connectToPeers connects to configured LibP2P peers
func (n *Node) connectToPeers(ctx context.Context) error {
if len(n.config.Discovery.BootstrapPeers) == 0 {
n.logger.ComponentDebug(logging.ComponentNode, "No bootstrap peers configured")
n.logger.ComponentDebug(logging.ComponentNode, "No peers configured")
return nil
}
// Use passed context with a reasonable timeout for bootstrap connections
// Use passed context with a reasonable timeout for peer connections
connectCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
for _, bootstrapAddr := range n.config.Discovery.BootstrapPeers {
if err := n.connectToBootstrapPeer(connectCtx, bootstrapAddr); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to bootstrap peer",
zap.String("addr", bootstrapAddr),
for _, peerAddr := range n.config.Discovery.BootstrapPeers {
if err := n.connectToPeerAddr(connectCtx, peerAddr); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to peer",
zap.String("addr", peerAddr),
zap.Error(err))
continue
}
@ -396,7 +427,7 @@ func (n *Node) startLibP2P() error {
libp2p.EnableRelay(),
libp2p.NATPortMap(),
libp2p.EnableAutoRelayWithPeerSource(
bootstrapPeerSource(n.config.Discovery.BootstrapPeers, n.logger.Logger),
peerSource(n.config.Discovery.BootstrapPeers, n.logger.Logger),
),
)
}
@ -422,59 +453,59 @@ func (n *Node) startLibP2P() error {
n.pubsub = pubsub.NewClientAdapter(ps, n.config.Discovery.NodeNamespace)
n.logger.Info("Initialized pubsub adapter on namespace", zap.String("namespace", n.config.Discovery.NodeNamespace))
// Log configured bootstrap peers
// Log configured peers
if len(n.config.Discovery.BootstrapPeers) > 0 {
n.logger.ComponentInfo(logging.ComponentNode, "Configured bootstrap peers",
n.logger.ComponentInfo(logging.ComponentNode, "Configured peers",
zap.Strings("peers", n.config.Discovery.BootstrapPeers))
} else {
n.logger.ComponentDebug(logging.ComponentNode, "No bootstrap peers configured")
n.logger.ComponentDebug(logging.ComponentNode, "No peers configured")
}
// Connect to LibP2P bootstrap peers if configured
if err := n.connectToBootstrapPeers(context.Background()); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to bootstrap peers", zap.Error(err))
// Don't fail - continue without bootstrap connections
// Connect to LibP2P peers if configured
if err := n.connectToPeers(context.Background()); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to peers", zap.Error(err))
// Don't fail - continue without peer connections
}
// Start exponential backoff reconnection for bootstrap peers
// Start exponential backoff reconnection for peers
if len(n.config.Discovery.BootstrapPeers) > 0 {
bootstrapCtx, cancel := context.WithCancel(context.Background())
n.bootstrapCancel = cancel
peerCtx, cancel := context.WithCancel(context.Background())
n.peerDiscoveryCancel = cancel
go func() {
interval := 5 * time.Second
consecutiveFailures := 0
n.logger.ComponentInfo(logging.ComponentNode, "Starting bootstrap peer reconnection with exponential backoff",
n.logger.ComponentInfo(logging.ComponentNode, "Starting peer reconnection with exponential backoff",
zap.Duration("initial_interval", interval),
zap.Duration("max_interval", 10*time.Minute))
for {
select {
case <-bootstrapCtx.Done():
n.logger.ComponentDebug(logging.ComponentNode, "Bootstrap reconnection loop stopped")
case <-peerCtx.Done():
n.logger.ComponentDebug(logging.ComponentNode, "Peer reconnection loop stopped")
return
default:
}
// Check if we need to attempt connection
if !n.hasBootstrapConnections() {
n.logger.ComponentDebug(logging.ComponentNode, "Attempting bootstrap peer connection",
if !n.hasPeerConnections() {
n.logger.ComponentDebug(logging.ComponentNode, "Attempting peer connection",
zap.Duration("current_interval", interval),
zap.Int("consecutive_failures", consecutiveFailures))
if err := n.connectToBootstrapPeers(context.Background()); err != nil {
if err := n.connectToPeers(context.Background()); err != nil {
consecutiveFailures++
// Calculate next backoff interval
jitteredInterval := addJitter(interval)
n.logger.ComponentDebug(logging.ComponentNode, "Bootstrap connection failed, backing off",
n.logger.ComponentDebug(logging.ComponentNode, "Peer connection failed, backing off",
zap.Error(err),
zap.Duration("next_attempt_in", jitteredInterval),
zap.Int("consecutive_failures", consecutiveFailures))
// Sleep with jitter
select {
case <-bootstrapCtx.Done():
case <-peerCtx.Done():
return
case <-time.After(jitteredInterval):
}
@ -484,14 +515,14 @@ func (n *Node) startLibP2P() error {
// Log interval increases occasionally to show progress
if consecutiveFailures%5 == 0 {
n.logger.ComponentInfo(logging.ComponentNode, "Bootstrap connection still failing",
n.logger.ComponentInfo(logging.ComponentNode, "Peer connection still failing",
zap.Int("consecutive_failures", consecutiveFailures),
zap.Duration("current_interval", interval))
}
} else {
// Success! Reset interval and counters
if consecutiveFailures > 0 {
n.logger.ComponentInfo(logging.ComponentNode, "Successfully connected to bootstrap peers",
n.logger.ComponentInfo(logging.ComponentNode, "Successfully connected to peers",
zap.Int("failures_overcome", consecutiveFailures))
}
interval = 5 * time.Second
@ -499,15 +530,15 @@ func (n *Node) startLibP2P() error {
// Wait 30 seconds before checking connection again
select {
case <-bootstrapCtx.Done():
case <-peerCtx.Done():
return
case <-time.After(30 * time.Second):
}
}
} else {
// We have bootstrap connections, just wait and check periodically
// We have peer connections, just wait and check periodically
select {
case <-bootstrapCtx.Done():
case <-peerCtx.Done():
return
case <-time.After(30 * time.Second):
}
@ -516,15 +547,15 @@ func (n *Node) startLibP2P() error {
}()
}
// Add bootstrap peers to peerstore for peer exchange
// Add peers to peerstore for peer exchange
if len(n.config.Discovery.BootstrapPeers) > 0 {
n.logger.ComponentInfo(logging.ComponentNode, "Adding bootstrap peers to peerstore")
for _, bootstrapAddr := range n.config.Discovery.BootstrapPeers {
if ma, err := multiaddr.NewMultiaddr(bootstrapAddr); err == nil {
n.logger.ComponentInfo(logging.ComponentNode, "Adding peers to peerstore")
for _, peerAddr := range n.config.Discovery.BootstrapPeers {
if ma, err := multiaddr.NewMultiaddr(peerAddr); err == nil {
if peerInfo, err := peer.AddrInfoFromP2pAddr(ma); err == nil {
// Add to peerstore with longer TTL for peer exchange
n.host.Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, time.Hour*24)
n.logger.ComponentDebug(logging.ComponentNode, "Added bootstrap peer to peerstore",
n.logger.ComponentDebug(logging.ComponentNode, "Added peer to peerstore",
zap.String("peer", peerInfo.ID.String()))
}
}
@ -637,14 +668,33 @@ func (n *Node) stopPeerDiscovery() {
func (n *Node) Stop() error {
n.logger.ComponentInfo(logging.ComponentNode, "Stopping network node")
// Stop HTTP Gateway server
if n.apiGatewayServer != nil {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_ = n.apiGatewayServer.Shutdown(ctx)
}
// Close Gateway client
if n.apiGateway != nil {
n.apiGateway.Close()
}
// Stop SNI Gateway
if n.sniGateway != nil {
if err := n.sniGateway.Stop(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "SNI Gateway stop error", zap.Error(err))
}
}
// Stop cluster discovery
if n.clusterDiscovery != nil {
n.clusterDiscovery.Stop()
}
// Stop bootstrap reconnection loop
if n.bootstrapCancel != nil {
n.bootstrapCancel()
// Stop peer reconnection loop
if n.peerDiscoveryCancel != nil {
n.peerDiscoveryCancel()
}
// Stop peer discovery
@ -667,6 +717,457 @@ func (n *Node) Stop() error {
return nil
}
// loadNodePeerIDFromIdentity safely loads the node's peer ID from its identity file
// This is needed before the host is initialized, so we read directly from the file
func loadNodePeerIDFromIdentity(dataDir string) string {
identityFile := filepath.Join(os.ExpandEnv(dataDir), "identity.key")
// Expand ~ in path
if strings.HasPrefix(identityFile, "~") {
home, err := os.UserHomeDir()
if err != nil {
return ""
}
identityFile = filepath.Join(home, identityFile[1:])
}
// Load identity from file
if info, err := encryption.LoadIdentity(identityFile); err == nil {
return info.PeerID.String()
}
return "" // Return empty string if can't load (gateway will work without it)
}
// startHTTPGateway initializes and starts the full API gateway with auth, pubsub, and API endpoints
func (n *Node) startHTTPGateway(ctx context.Context) error {
if !n.config.HTTPGateway.Enabled {
n.logger.ComponentInfo(logging.ComponentNode, "HTTP Gateway disabled in config")
return nil
}
// Create separate logger for gateway
logFile := filepath.Join(os.ExpandEnv(n.config.Node.DataDir), "..", "logs", "gateway.log")
// Ensure logs directory exists
logsDir := filepath.Dir(logFile)
if err := os.MkdirAll(logsDir, 0755); err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}
gatewayLogger, err := logging.NewFileLogger(logging.ComponentGeneral, logFile, false)
if err != nil {
return fmt.Errorf("failed to create gateway logger: %w", err)
}
// Create full API Gateway for auth, pubsub, rqlite, and API endpoints
// This replaces both the old reverse proxy gateway and the standalone gateway
gwCfg := &gateway.Config{
ListenAddr: n.config.HTTPGateway.ListenAddr,
ClientNamespace: n.config.HTTPGateway.ClientNamespace,
BootstrapPeers: n.config.Discovery.BootstrapPeers,
NodePeerID: loadNodePeerIDFromIdentity(n.config.Node.DataDir), // Load the node's actual peer ID from its identity file
RQLiteDSN: n.config.HTTPGateway.RQLiteDSN,
OlricServers: n.config.HTTPGateway.OlricServers,
OlricTimeout: n.config.HTTPGateway.OlricTimeout,
IPFSClusterAPIURL: n.config.HTTPGateway.IPFSClusterAPIURL,
IPFSAPIURL: n.config.HTTPGateway.IPFSAPIURL,
IPFSTimeout: n.config.HTTPGateway.IPFSTimeout,
// HTTPS/TLS configuration
EnableHTTPS: n.config.HTTPGateway.HTTPS.Enabled,
DomainName: n.config.HTTPGateway.HTTPS.Domain,
TLSCacheDir: n.config.HTTPGateway.HTTPS.CacheDir,
}
apiGateway, err := gateway.New(gatewayLogger, gwCfg)
if err != nil {
return fmt.Errorf("failed to create full API gateway: %w", err)
}
n.apiGateway = apiGateway
// Check if HTTPS is enabled and set up certManager BEFORE starting goroutine
// This ensures n.certManager is set before SNI gateway initialization checks it
var certManager *autocert.Manager
var tlsCacheDir string
if gwCfg.EnableHTTPS && gwCfg.DomainName != "" {
tlsCacheDir = gwCfg.TLSCacheDir
if tlsCacheDir == "" {
tlsCacheDir = "/home/debros/.orama/tls-cache"
}
// Ensure TLS cache directory exists and is writable
if err := os.MkdirAll(tlsCacheDir, 0700); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to create TLS cache directory",
zap.String("dir", tlsCacheDir),
zap.Error(err),
)
} else {
n.logger.ComponentInfo(logging.ComponentNode, "TLS cache directory ready",
zap.String("dir", tlsCacheDir),
)
}
// Create TLS configuration with Let's Encrypt autocert
// Using STAGING environment to avoid rate limits during development/testing
// TODO: Switch to production when ready (remove Client field)
certManager = &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(gwCfg.DomainName),
Cache: autocert.DirCache(tlsCacheDir),
Email: fmt.Sprintf("admin@%s", gwCfg.DomainName),
Client: &acme.Client{
DirectoryURL: "https://acme-staging-v02.api.letsencrypt.org/directory",
},
}
// Store certificate manager for use by SNI gateway
n.certManager = certManager
// Initialize certificate ready channel - will be closed when certs are extracted
// This allows RQLite to wait for certificates before starting with node TLS
n.certReady = make(chan struct{})
}
// Channel to signal when HTTP server is ready for ACME challenges
httpReady := make(chan struct{})
// Start API Gateway in a goroutine
go func() {
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Starting full API gateway",
zap.String("listen_addr", gwCfg.ListenAddr),
)
// Check if HTTPS is enabled
if gwCfg.EnableHTTPS && gwCfg.DomainName != "" && certManager != nil {
// Start HTTPS server with automatic certificate provisioning
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTPS enabled, starting secure gateway",
zap.String("domain", gwCfg.DomainName),
)
// Determine HTTPS and HTTP ports
httpsPort := 443
httpPort := 80
// Start HTTP server for ACME challenges and redirects
// certManager.HTTPHandler() must be the main handler, with a fallback for other requests
httpServer := &http.Server{
Addr: fmt.Sprintf(":%d", httpPort),
Handler: certManager.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Fallback for non-ACME requests: redirect to HTTPS
target := fmt.Sprintf("https://%s%s", r.Host, r.URL.RequestURI())
http.Redirect(w, r, target, http.StatusMovedPermanently)
})),
}
// Create HTTP listener first to ensure port 80 is bound before signaling ready
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Binding HTTP listener for ACME challenges",
zap.Int("port", httpPort),
)
httpListener, err := net.Listen("tcp", fmt.Sprintf(":%d", httpPort))
if err != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "failed to bind HTTP listener for ACME", zap.Error(err))
close(httpReady) // Signal even on failure so SNI goroutine doesn't hang
return
}
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTP server ready for ACME challenges",
zap.Int("port", httpPort),
zap.String("tls_cache_dir", tlsCacheDir),
)
// Start HTTP server in background for ACME challenges
go func() {
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTP server serving ACME challenges",
zap.String("addr", httpServer.Addr),
)
if err := httpServer.Serve(httpListener); err != nil && err != http.ErrServerClosed {
gatewayLogger.ComponentError(logging.ComponentGateway, "HTTP server error", zap.Error(err))
}
}()
// Pre-provision the certificate BEFORE starting HTTPS server
// This ensures we don't accept HTTPS connections without a valid certificate
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Pre-provisioning TLS certificate...",
zap.String("domain", gwCfg.DomainName),
)
// Use a timeout context for certificate provisioning
// If Let's Encrypt is rate-limited or unreachable, don't block forever
certCtx, certCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer certCancel()
certReq := &tls.ClientHelloInfo{
ServerName: gwCfg.DomainName,
}
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Initiating certificate request to Let's Encrypt",
zap.String("domain", gwCfg.DomainName),
zap.String("acme_environment", "staging"),
)
// Try to get certificate with timeout
certProvisionChan := make(chan error, 1)
go func() {
gatewayLogger.ComponentInfo(logging.ComponentGateway, "GetCertificate goroutine started")
_, err := certManager.GetCertificate(certReq)
if err != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "GetCertificate returned error",
zap.Error(err),
)
} else {
gatewayLogger.ComponentInfo(logging.ComponentGateway, "GetCertificate succeeded")
}
certProvisionChan <- err
}()
var certErr error
select {
case err := <-certProvisionChan:
certErr = err
if certErr != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "Certificate provisioning failed",
zap.String("domain", gwCfg.DomainName),
zap.Error(certErr),
)
}
case <-certCtx.Done():
certErr = fmt.Errorf("certificate provisioning timeout (Let's Encrypt may be rate-limited or unreachable)")
gatewayLogger.ComponentError(logging.ComponentGateway, "Certificate provisioning timeout",
zap.String("domain", gwCfg.DomainName),
zap.Duration("timeout", 30*time.Second),
zap.Error(certErr),
)
}
if certErr != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to provision TLS certificate - HTTPS disabled",
zap.String("domain", gwCfg.DomainName),
zap.Error(certErr),
zap.String("http_server_status", "running on port 80 for HTTP fallback"),
)
// Signal ready for SNI goroutine (even though we're failing)
close(httpReady)
// HTTP server on port 80 is already running, but it's configured to redirect to HTTPS
// Replace its handler to serve the gateway directly instead of redirecting
httpServer.Handler = apiGateway.Routes()
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTP gateway available on port 80 only",
zap.String("port", "80"),
)
return
}
gatewayLogger.ComponentInfo(logging.ComponentGateway, "TLS certificate provisioned successfully",
zap.String("domain", gwCfg.DomainName),
)
// Signal that HTTP server is ready for ACME challenges
close(httpReady)
tlsConfig := &tls.Config{
MinVersion: tls.VersionTLS12,
GetCertificate: certManager.GetCertificate,
}
// Start HTTPS server
httpsServer := &http.Server{
Addr: fmt.Sprintf(":%d", httpsPort),
TLSConfig: tlsConfig,
Handler: apiGateway.Routes(),
}
n.apiGatewayServer = httpsServer
listener, err := tls.Listen("tcp", fmt.Sprintf(":%d", httpsPort), tlsConfig)
if err != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "failed to create TLS listener", zap.Error(err))
return
}
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTPS gateway listener bound",
zap.String("domain", gwCfg.DomainName),
zap.Int("port", httpsPort),
)
// Serve HTTPS
if err := httpsServer.Serve(listener); err != nil && err != http.ErrServerClosed {
gatewayLogger.ComponentError(logging.ComponentGateway, "HTTPS Gateway error", zap.Error(err))
}
} else {
// No HTTPS - signal ready immediately (no ACME needed)
close(httpReady)
// Start plain HTTP server
server := &http.Server{
Addr: gwCfg.ListenAddr,
Handler: apiGateway.Routes(),
}
n.apiGatewayServer = server
// Try to bind listener
ln, err := net.Listen("tcp", gwCfg.ListenAddr)
if err != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "failed to bind API gateway listener", zap.Error(err))
return
}
gatewayLogger.ComponentInfo(logging.ComponentGateway, "API gateway listener bound", zap.String("listen_addr", ln.Addr().String()))
// Serve HTTP
if err := server.Serve(ln); err != nil && err != http.ErrServerClosed {
gatewayLogger.ComponentError(logging.ComponentGateway, "API Gateway error", zap.Error(err))
}
}
}()
// Initialize and start SNI gateway if HTTPS is enabled and SNI is configured
// This runs in a separate goroutine that waits for HTTP server to be ready
if n.config.HTTPGateway.SNI.Enabled && n.certManager != nil {
go func() {
// Wait for HTTP server to be ready for ACME challenges
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Waiting for HTTP server before SNI initialization...")
<-httpReady
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Initializing SNI gateway",
zap.String("listen_addr", n.config.HTTPGateway.SNI.ListenAddr),
)
// Provision the certificate from Let's Encrypt cache
// This ensures the certificate file is downloaded and cached
domain := n.config.HTTPGateway.HTTPS.Domain
if domain != "" {
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Provisioning certificate for SNI",
zap.String("domain", domain))
certReq := &tls.ClientHelloInfo{
ServerName: domain,
}
if tlsCert, err := n.certManager.GetCertificate(certReq); err != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to provision certificate for SNI",
zap.String("domain", domain), zap.Error(err))
return // Can't start SNI without certificate
} else {
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Certificate provisioned for SNI",
zap.String("domain", domain))
// Extract certificate to PEM files for SNI gateway
// SNI gateway needs standard PEM cert files, not autocert cache format
tlsCacheDir := n.config.HTTPGateway.HTTPS.CacheDir
if tlsCacheDir == "" {
tlsCacheDir = "/home/debros/.orama/tls-cache"
}
certPath := filepath.Join(tlsCacheDir, domain+".crt")
keyPath := filepath.Join(tlsCacheDir, domain+".key")
if err := extractPEMFromTLSCert(tlsCert, certPath, keyPath); err != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to extract PEM from TLS cert for SNI",
zap.Error(err))
return // Can't start SNI without PEM files
}
gatewayLogger.ComponentInfo(logging.ComponentGateway, "PEM certificates extracted for SNI",
zap.String("cert_path", certPath), zap.String("key_path", keyPath))
// Signal that certificates are ready for RQLite node-to-node TLS
if n.certReady != nil {
close(n.certReady)
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Certificate ready signal sent for RQLite node TLS")
}
}
} else {
gatewayLogger.ComponentError(logging.ComponentGateway, "No domain configured for SNI certificate")
return
}
// Create SNI config with certificate files
sniCfg := n.config.HTTPGateway.SNI
// Use the same gateway logger for SNI gateway (writes to gateway.log)
sniGateway, err := gateway.NewTCPSNIGateway(gatewayLogger, &sniCfg)
if err != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to initialize SNI gateway", zap.Error(err))
return
}
n.sniGateway = sniGateway
gatewayLogger.ComponentInfo(logging.ComponentGateway, "SNI gateway initialized, starting...")
// Start SNI gateway (this blocks until shutdown)
if err := n.sniGateway.Start(ctx); err != nil {
gatewayLogger.ComponentError(logging.ComponentGateway, "SNI Gateway error", zap.Error(err))
}
}()
}
return nil
}
// extractPEMFromTLSCert extracts certificate and private key from tls.Certificate to PEM files
func extractPEMFromTLSCert(tlsCert *tls.Certificate, certPath, keyPath string) error {
if tlsCert == nil || len(tlsCert.Certificate) == 0 {
return fmt.Errorf("invalid tls certificate")
}
// Write certificate chain to PEM file
certFile, err := os.Create(certPath)
if err != nil {
return fmt.Errorf("failed to create cert file: %w", err)
}
defer certFile.Close()
// Write all certificates in the chain
for _, certBytes := range tlsCert.Certificate {
if err := pem.Encode(certFile, &pem.Block{
Type: "CERTIFICATE",
Bytes: certBytes,
}); err != nil {
return fmt.Errorf("failed to encode certificate: %w", err)
}
}
// Write private key to PEM file
if tlsCert.PrivateKey == nil {
return fmt.Errorf("private key is nil")
}
keyFile, err := os.Create(keyPath)
if err != nil {
return fmt.Errorf("failed to create key file: %w", err)
}
defer keyFile.Close()
// Handle different key types
var keyBytes []byte
switch key := tlsCert.PrivateKey.(type) {
case *x509.Certificate:
keyBytes, err = x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return fmt.Errorf("failed to marshal private key: %w", err)
}
default:
// Try to marshal as PKCS8
keyBytes, err = x509.MarshalPKCS8PrivateKey(tlsCert.PrivateKey)
if err != nil {
return fmt.Errorf("failed to marshal private key: %w", err)
}
}
if err := pem.Encode(keyFile, &pem.Block{
Type: "PRIVATE KEY",
Bytes: keyBytes,
}); err != nil {
return fmt.Errorf("failed to encode private key: %w", err)
}
// Set proper permissions
os.Chmod(certPath, 0644)
os.Chmod(keyPath, 0600)
return nil
}
// Starts the network node
func (n *Node) Start(ctx context.Context) error {
n.logger.Info("Starting network node", zap.String("data_dir", n.config.Node.DataDir))
@ -687,6 +1188,12 @@ func (n *Node) Start(ctx context.Context) error {
return fmt.Errorf("failed to create data directory: %w", err)
}
// Start HTTP Gateway first (doesn't depend on other services)
if err := n.startHTTPGateway(ctx); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to start HTTP Gateway", zap.Error(err))
// Don't fail node startup if gateway fails
}
// Start LibP2P host first (needed for cluster discovery)
if err := n.startLibP2P(); err != nil {
return fmt.Errorf("failed to start LibP2P: %w", err)
@ -743,16 +1250,14 @@ func (n *Node) startIPFSClusterConfig() error {
return fmt.Errorf("failed to ensure cluster config: %w", err)
}
// Try to repair bootstrap peer configuration automatically
// This will be retried periodically if bootstrap is not available yet
if n.config.Node.Type != "bootstrap" {
if success, err := cm.RepairBootstrapPeers(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair bootstrap peers, will retry later", zap.Error(err))
} else if success {
n.logger.ComponentInfo(logging.ComponentNode, "Bootstrap peer configuration repaired successfully")
} else {
n.logger.ComponentDebug(logging.ComponentNode, "Bootstrap peer not available yet, will retry periodically")
}
// Try to repair peer configuration automatically
// This will be retried periodically if peer is not available yet
if success, err := cm.RepairPeerConfiguration(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair peer configuration, will retry later", zap.Error(err))
} else if success {
n.logger.ComponentInfo(logging.ComponentNode, "Peer configuration repaired successfully")
} else {
n.logger.ComponentDebug(logging.ComponentNode, "Peer not available yet, will retry periodically")
}
n.logger.ComponentInfo(logging.ComponentNode, "IPFS Cluster configuration initialized")

View File

@ -140,7 +140,7 @@ func TestLoadOrCreateIdentity(t *testing.T) {
})
}
func TestHashBootstrapConnections(t *testing.T) {
func TestHasPeerConnections(t *testing.T) {
cfg := &config.Config{}
n, err := NewNode(cfg)
@ -148,8 +148,8 @@ func TestHashBootstrapConnections(t *testing.T) {
t.Fatalf("NewNode() error: %v", err)
}
// Assert: Does not have bootstrap connections
conns := n.hasBootstrapConnections()
// Assert: Does not have peer connections
conns := n.hasPeerConnections()
if conns != false {
t.Fatalf("expected false, got %v", conns)
}
@ -162,13 +162,13 @@ func TestHashBootstrapConnections(t *testing.T) {
defer h.Close()
n.host = h
conns = n.hasBootstrapConnections()
conns = n.hasPeerConnections()
if conns != false {
t.Fatalf("expected false, got %v", conns)
}
// Assert: Return true if connected to at least one bootstrap peer
t.Run("returns true when connected to at least one configured bootstrap peer", func(t *testing.T) {
// Assert: Return true if connected to at least one peer
t.Run("returns true when connected to at least one configured peer", func(t *testing.T) {
// Fresh node and config
cfg := &config.Config{}
n2, err := NewNode(cfg)
@ -189,7 +189,7 @@ func TestHashBootstrapConnections(t *testing.T) {
}
defer hB.Close()
// Build B's bootstrap multiaddr: <one-of-B.Addrs>/p2p/<B.ID>
// Build B's peer multiaddr: <one-of-B.Addrs>/p2p/<B.ID>
var base multiaddr.Multiaddr
for _, a := range hB.Addrs() {
if strings.Contains(a.String(), "/tcp/") {
@ -204,11 +204,11 @@ func TestHashBootstrapConnections(t *testing.T) {
if err != nil {
t.Fatalf("NewMultiaddr(/p2p/<id>): %v", err)
}
bootstrap := base.Encapsulate(pidMA).String()
peerAddr := base.Encapsulate(pidMA).String()
// Configure node A with B as a bootstrap peer
// Configure node A with B as a peer
n2.host = hA
n2.config.Discovery.BootstrapPeers = []string{bootstrap}
n2.config.Discovery.BootstrapPeers = []string{peerAddr}
// Connect A -> B
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
@ -229,13 +229,13 @@ func TestHashBootstrapConnections(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
// Assert: hasBootstrapConnections returns true
if !n2.hasBootstrapConnections() {
t.Fatalf("expected hasBootstrapConnections() to be true")
// Assert: hasPeerConnections returns true
if !n2.hasPeerConnections() {
t.Fatalf("expected hasPeerConnections() to be true")
}
})
t.Run("returns false when connected peers are not in the bootstrap list", func(t *testing.T) {
t.Run("returns false when connected peers are not in the peer list", func(t *testing.T) {
// Fresh node and config
cfg := &config.Config{}
n2, err := NewNode(cfg)
@ -262,7 +262,7 @@ func TestHashBootstrapConnections(t *testing.T) {
}
defer hC.Close()
// Build C's bootstrap multiaddr: <one-of-C.Addrs>/p2p/<C.ID>
// Build C's peer multiaddr: <one-of-C.Addrs>/p2p/<C.ID>
var baseC multiaddr.Multiaddr
for _, a := range hC.Addrs() {
if strings.Contains(a.String(), "/tcp/") {
@ -277,13 +277,13 @@ func TestHashBootstrapConnections(t *testing.T) {
if err != nil {
t.Fatalf("NewMultiaddr(/p2p/<id>): %v", err)
}
bootstrapC := baseC.Encapsulate(pidC).String()
peerC := baseC.Encapsulate(pidC).String()
// Configure node A with ONLY C as a bootstrap peer
// Configure node A with ONLY C as a peer
n2.host = hA
n2.config.Discovery.BootstrapPeers = []string{bootstrapC}
n2.config.Discovery.BootstrapPeers = []string{peerC}
// Connect A -> B (but C is in the bootstrap list, not B)
// Connect A -> B (but C is in the peer list, not B)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
if err := hA.Connect(ctx, peer.AddrInfo{ID: hB.ID(), Addrs: hB.Addrs()}); err != nil {
@ -302,9 +302,9 @@ func TestHashBootstrapConnections(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
// Assert: hasBootstrapConnections should be false (connected peer is not in bootstrap list)
if n2.hasBootstrapConnections() {
t.Fatalf("expected hasBootstrapConnections() to be false")
// Assert: hasPeerConnections should be false (connected peer is not in peer list)
if n2.hasPeerConnections() {
t.Fatalf("expected hasPeerConnections() to be false")
}
})

View File

@ -433,7 +433,7 @@ func (c *ClusterDiscoveryService) getPeersJSONUnlocked() []map[string]interface{
for _, peer := range c.knownPeers {
// CRITICAL FIX: Include ALL peers (including self) in peers.json
// When using bootstrap-expect with recovery, RQLite needs the complete
// When using expect configuration with recovery, RQLite needs the complete
// expected cluster configuration to properly form consensus.
// The peers.json file is used by RQLite's recovery mechanism to know
// what the full cluster membership should be, including the local node.
@ -584,25 +584,34 @@ func (c *ClusterDiscoveryService) HasRecentPeersJSON() bool {
return time.Since(c.lastUpdate) < 5*time.Minute
}
// FindJoinTargets discovers join targets via LibP2P, prioritizing bootstrap nodes
// FindJoinTargets discovers join targets via LibP2P
func (c *ClusterDiscoveryService) FindJoinTargets() []string {
c.mu.RLock()
defer c.mu.RUnlock()
targets := []string{}
// Prioritize bootstrap nodes
// All nodes are equal - prioritize by Raft log index (more advanced = better)
type nodeWithIndex struct {
address string
logIndex uint64
}
var nodes []nodeWithIndex
for _, peer := range c.knownPeers {
if peer.NodeType == "bootstrap" {
targets = append(targets, peer.RaftAddress)
nodes = append(nodes, nodeWithIndex{peer.RaftAddress, peer.RaftLogIndex})
}
// Sort by log index descending (higher log index = more up-to-date)
for i := 0; i < len(nodes)-1; i++ {
for j := i + 1; j < len(nodes); j++ {
if nodes[j].logIndex > nodes[i].logIndex {
nodes[i], nodes[j] = nodes[j], nodes[i]
}
}
}
// Add other nodes as fallback
for _, peer := range c.knownPeers {
if peer.NodeType != "bootstrap" {
targets = append(targets, peer.RaftAddress)
}
for _, n := range nodes {
targets = append(targets, n.address)
}
return targets

View File

@ -8,18 +8,18 @@ import (
func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
c.mu.RLock()
defer c.mu.RUnlock()
activeCount := 0
inactiveCount := 0
totalHealth := 0.0
currentLeader := ""
now := time.Now()
for nodeID, health := range c.peerHealth {
if health.Status == "active" {
activeCount++
// Calculate health score (0-100) based on last seen
timeSinceLastSeen := now.Sub(health.LastSeen)
healthScore := 100.0
@ -34,22 +34,22 @@ func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
} else {
inactiveCount++
}
// Try to determine leader
// Try to determine leader (highest log index is likely the leader)
if peer, ok := c.knownPeers[nodeID]; ok {
// We'd need to check the actual leader status from RQLite
// For now, bootstrap nodes are more likely to be leader
if peer.NodeType == "bootstrap" && currentLeader == "" {
// For now, use highest log index as heuristic
if currentLeader == "" || peer.RaftLogIndex > c.knownPeers[currentLeader].RaftLogIndex {
currentLeader = nodeID
}
}
}
averageHealth := 0.0
if activeCount > 0 {
averageHealth = totalHealth / float64(activeCount)
}
// Determine discovery status
discoveryStatus := "healthy"
if len(c.knownPeers) == 0 {
@ -59,7 +59,7 @@ func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
} else if averageHealth < 50 {
discoveryStatus = "degraded"
}
return &ClusterMetrics{
ClusterSize: len(c.knownPeers),
ActiveNodes: activeCount,
@ -71,4 +71,3 @@ func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
AveragePeerHealth: averageHealth,
}
}

View File

@ -18,6 +18,7 @@ import (
"go.uber.org/zap"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/tlsutil"
)
// RQLiteManager manages an RQLite node instance
@ -25,7 +26,7 @@ type RQLiteManager struct {
config *config.DatabaseConfig
discoverConfig *config.DiscoveryConfig
dataDir string
nodeType string // "bootstrap" or "node"
nodeType string // Node type identifier
logger *zap.Logger
cmd *exec.Cmd
connection *gorqlite.Connection
@ -81,7 +82,7 @@ func (r *RQLiteManager) SetDiscoveryService(service *ClusterDiscoveryService) {
r.discoveryService = service
}
// SetNodeType sets the node type for this RQLite manager ("bootstrap" or "node")
// SetNodeType sets the node type for this RQLite manager
func (r *RQLiteManager) SetNodeType(nodeType string) {
if nodeType != "" {
r.nodeType = nodeType
@ -120,7 +121,7 @@ func (r *RQLiteManager) Start(ctx context.Context) error {
// CRITICAL FIX: Ensure peers.json exists with minimum cluster size BEFORE starting RQLite
// This prevents split-brain where each node starts as a single-node cluster
// We NEVER start as a single-node cluster - we wait indefinitely until minimum cluster size is met
// This applies to ALL nodes (bootstrap AND regular nodes with join addresses)
// This applies to ALL nodes (with or without join addresses)
if r.discoveryService != nil {
r.logger.Info("Ensuring peers.json exists with minimum cluster size before RQLite startup",
zap.String("policy", "will wait indefinitely - never start as single-node cluster"),
@ -240,6 +241,27 @@ func (r *RQLiteManager) launchProcess(ctx context.Context, rqliteDataDir string)
"-raft-addr", fmt.Sprintf("0.0.0.0:%d", r.config.RQLiteRaftPort),
}
// Add node-to-node TLS encryption if configured
// This enables TLS for Raft inter-node communication, required for SNI gateway routing
// See: https://rqlite.io/docs/guides/security/#encrypting-node-to-node-communication
if r.config.NodeCert != "" && r.config.NodeKey != "" {
r.logger.Info("Enabling node-to-node TLS encryption",
zap.String("node_cert", r.config.NodeCert),
zap.String("node_key", r.config.NodeKey),
zap.String("node_ca_cert", r.config.NodeCACert),
zap.Bool("node_no_verify", r.config.NodeNoVerify))
args = append(args, "-node-cert", r.config.NodeCert)
args = append(args, "-node-key", r.config.NodeKey)
if r.config.NodeCACert != "" {
args = append(args, "-node-ca-cert", r.config.NodeCACert)
}
if r.config.NodeNoVerify {
args = append(args, "-node-no-verify")
}
}
// All nodes follow the same join logic - either join specified address or start as single-node cluster
if r.config.RQLiteJoinAddress != "" {
r.logger.Info("Joining RQLite cluster", zap.String("join_address", r.config.RQLiteJoinAddress))
@ -264,7 +286,8 @@ func (r *RQLiteManager) launchProcess(ctx context.Context, rqliteDataDir string)
// Always add the join parameter in host:port form - let rqlited handle the rest
// Add retry parameters to handle slow cluster startup (e.g., during recovery)
args = append(args, "-join", joinArg, "-join-attempts", "30", "-join-interval", "10s")
// Include -join-as with the raft advertise address so the leader knows which node this is
args = append(args, "-join", joinArg, "-join-as", r.discoverConfig.RaftAdvAddress, "-join-attempts", "30", "-join-interval", "10s")
} else {
r.logger.Info("No join address specified - starting as single-node cluster")
// When no join address is provided, rqlited will start as a single-node cluster
@ -289,23 +312,23 @@ func (r *RQLiteManager) launchProcess(ctx context.Context, rqliteDataDir string)
if nodeType == "" {
nodeType = "node"
}
// Create logs directory
logsDir := filepath.Join(filepath.Dir(r.dataDir), "logs")
if err := os.MkdirAll(logsDir, 0755); err != nil {
return fmt.Errorf("failed to create logs directory at %s: %w", logsDir, err)
}
// Open log file for RQLite output
logPath := filepath.Join(logsDir, fmt.Sprintf("rqlite-%s.log", nodeType))
logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return fmt.Errorf("failed to open RQLite log file at %s: %w", logPath, err)
}
r.logger.Info("RQLite logs will be written to file",
zap.String("path", logPath))
r.cmd.Stdout = logFile
r.cmd.Stderr = logFile
@ -460,11 +483,11 @@ func (r *RQLiteManager) hasExistingState(rqliteDataDir string) bool {
// For joining nodes in recovery, this may take longer (up to 3 minutes)
func (r *RQLiteManager) waitForReady(ctx context.Context) error {
url := fmt.Sprintf("http://localhost:%d/status", r.config.RQLitePort)
client := &http.Client{Timeout: 2 * time.Second}
client := tlsutil.NewHTTPClient(2 * time.Second)
// All nodes may need time to open the store during recovery
// Use consistent timeout for cluster consistency
maxAttempts := 180 // 180 seconds (3 minutes) for all nodes
maxAttempts := 180 // 180 seconds (3 minutes) for all nodes
for i := 0; i < maxAttempts; i++ {
select {
@ -473,6 +496,11 @@ func (r *RQLiteManager) waitForReady(ctx context.Context) error {
default:
}
// Use centralized TLS configuration
if client == nil {
client = tlsutil.NewHTTPClient(2 * time.Second)
}
resp, err := client.Get(url)
if err == nil && resp.StatusCode == http.StatusOK {
// Parse the response to check for valid raft state
@ -517,7 +545,7 @@ func (r *RQLiteManager) waitForReady(ctx context.Context) error {
return fmt.Errorf("RQLite did not become ready within timeout")
}
// waitForLeadership waits for RQLite to establish leadership (for bootstrap nodes)
// GetConnection returns the RQLite connection
// GetConnection returns the RQLite connection
func (r *RQLiteManager) GetConnection() *gorqlite.Connection {
return r.connection
@ -680,7 +708,7 @@ func (r *RQLiteManager) testJoinAddress(joinAddress string) error {
// Determine the HTTP status URL to probe.
// If joinAddress contains a scheme, use it directly. Otherwise treat joinAddress
// as host:port (Raft) and probe the standard HTTP API port 5001 on that host.
client := &http.Client{Timeout: 5 * time.Second}
client := tlsutil.NewHTTPClient(5 * time.Second)
var statusURL string
if strings.HasPrefix(joinAddress, "http://") || strings.HasPrefix(joinAddress, "https://") {
@ -708,7 +736,6 @@ func (r *RQLiteManager) testJoinAddress(joinAddress string) error {
return nil
}
// exponentialBackoff calculates exponential backoff duration with jitter
func (r *RQLiteManager) exponentialBackoff(attempt int, baseDelay time.Duration, maxDelay time.Duration) time.Duration {
// Calculate exponential backoff: baseDelay * 2^attempt
@ -745,7 +772,7 @@ func (r *RQLiteManager) recoverCluster(ctx context.Context, peersJSONPath string
}
// Restart RQLite using launchProcess to ensure all join/backoff logic is applied
// This includes: join address handling, join retries, bootstrap-expect, etc.
// This includes: join address handling, join retries, expect configuration, etc.
r.logger.Info("Restarting RQLite (will auto-recover using peers.json)")
if err := r.launchProcess(ctx, rqliteDataDir); err != nil {
return fmt.Errorf("failed to restart RQLite process: %w", err)
@ -864,7 +891,6 @@ func (r *RQLiteManager) clearRaftState(rqliteDataDir string) error {
return nil
}
// isInSplitBrainState detects if we're in a split-brain scenario where all nodes
// are followers with no peers (each node thinks it's alone)
func (r *RQLiteManager) isInSplitBrainState() bool {
@ -1182,9 +1208,9 @@ func (r *RQLiteManager) performPreStartClusterDiscovery(ctx context.Context, rql
}
// CRITICAL FIX: Skip recovery if no peers were discovered (other than ourselves)
// Only ourselves in the cluster means this is a fresh bootstrap, not a recovery scenario
// Only ourselves in the cluster means this is a fresh cluster, not a recovery scenario
if discoveredPeers <= 1 {
r.logger.Info("No peers discovered during pre-start discovery window - skipping recovery (fresh bootstrap)",
r.logger.Info("No peers discovered during pre-start discovery window - skipping recovery (fresh cluster)",
zap.Int("discovered_peers", discoveredPeers))
return nil
}

122
pkg/tlsutil/client.go Normal file
View File

@ -0,0 +1,122 @@
// Package tlsutil provides centralized TLS configuration for trusting specific domains
package tlsutil
import (
"crypto/tls"
"crypto/x509"
"net/http"
"os"
"strings"
"time"
)
var (
// Global cache of trusted domains loaded from environment
trustedDomains []string
// CA certificate pool for trusting self-signed certs
caCertPool *x509.CertPool
initialized bool
)
// Default trusted domains - always trust debros.network for staging/development
var defaultTrustedDomains = []string{
"*.debros.network",
}
// init loads trusted domains and CA certificate from environment and files
func init() {
// Start with default trusted domains
trustedDomains = append(trustedDomains, defaultTrustedDomains...)
// Add any additional domains from environment
domains := os.Getenv("DEBROS_TRUSTED_TLS_DOMAINS")
if domains != "" {
for _, d := range strings.Split(domains, ",") {
d = strings.TrimSpace(d)
if d != "" {
trustedDomains = append(trustedDomains, d)
}
}
}
// Try to load CA certificate
caCertPath := os.Getenv("DEBROS_CA_CERT_PATH")
if caCertPath == "" {
caCertPath = "/etc/debros/ca.crt"
}
if caCertData, err := os.ReadFile(caCertPath); err == nil {
caCertPool = x509.NewCertPool()
if caCertPool.AppendCertsFromPEM(caCertData) {
// Successfully loaded CA certificate
}
}
initialized = true
}
// GetTrustedDomains returns the list of domains to skip TLS verification for
func GetTrustedDomains() []string {
return trustedDomains
}
// ShouldSkipTLSVerify checks if TLS verification should be skipped for this domain
func ShouldSkipTLSVerify(domain string) bool {
for _, trusted := range trustedDomains {
if strings.HasPrefix(trusted, "*.") {
// Handle wildcards like *.debros.network
suffix := strings.TrimPrefix(trusted, "*")
if strings.HasSuffix(domain, suffix) || domain == strings.TrimPrefix(suffix, ".") {
return true
}
} else if domain == trusted {
return true
}
}
return false
}
// GetTLSConfig returns a TLS config with appropriate verification settings
func GetTLSConfig() *tls.Config {
config := &tls.Config{
MinVersion: tls.VersionTLS12,
}
// If we have a CA cert pool, use it
if caCertPool != nil {
config.RootCAs = caCertPool
} else if len(trustedDomains) > 0 {
// Fallback: skip verification if trusted domains are configured but no CA pool
config.InsecureSkipVerify = true
}
return config
}
// NewHTTPClient creates an HTTP client with TLS verification for trusted domains
func NewHTTPClient(timeout time.Duration) *http.Client {
return &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: GetTLSConfig(),
},
}
}
// NewHTTPClientForDomain creates an HTTP client configured for a specific domain
func NewHTTPClientForDomain(timeout time.Duration, hostname string) *http.Client {
tlsConfig := GetTLSConfig()
// If this domain is in trusted list and we don't have a CA pool, allow insecure
if caCertPool == nil && ShouldSkipTLSVerify(hostname) {
tlsConfig.InsecureSkipVerify = true
}
return &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
},
}
}

View File

@ -52,7 +52,7 @@ SPECIFIC_PATTERNS=(
"ipfs daemon"
"ipfs-cluster-service daemon"
"olric-server"
"bin/node"
"bin/orama-node"
"bin/gateway"
"anyone-client"
)
@ -75,7 +75,7 @@ for pattern in "${SPECIFIC_PATTERNS[@]}"; do
done
# Method 3: Kill processes using PID files
PIDS_DIR="$HOME/.debros/.pids"
PIDS_DIR="$HOME/.orama/.pids"
if [[ -d "$PIDS_DIR" ]]; then
for pidfile in "$PIDS_DIR"/*.pid; do
if [[ -f "$pidfile" ]]; then

View File

@ -1,222 +0,0 @@
#!/bin/bash
# DeBros Network Installation Script
# Downloads dbn from GitHub releases and runs the new 'dbn prod install' flow
#
# Supported: Ubuntu 20.04+, Debian 11+
#
# Usage:
# curl -fsSL https://install.debros.network | bash
# OR
# bash scripts/install-debros-network.sh
# OR with specific flags:
# bash scripts/install-debros-network.sh --bootstrap
# bash scripts/install-debros-network.sh --vps-ip 1.2.3.4 --peers /ip4/1.2.3.4/tcp/4001/p2p/Qm...
# bash scripts/install-debros-network.sh --domain example.com
set -e
set -o pipefail
trap 'error "An error occurred. Installation aborted."; exit 1' ERR
# Color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
CYAN='\033[0;36m'
BLUE='\033[38;2;2;128;175m'
YELLOW='\033[1;33m'
NOCOLOR='\033[0m'
# Configuration
GITHUB_REPO="DeBrosOfficial/network"
GITHUB_API="https://api.github.com/repos/$GITHUB_REPO"
INSTALL_DIR="/usr/local/bin"
log() { echo -e "${CYAN}[$(date '+%Y-%m-%d %H:%M:%S')]${NOCOLOR} $1"; }
error() { echo -e "${RED}[ERROR]${NOCOLOR} $1" >&2; }
success() { echo -e "${GREEN}[SUCCESS]${NOCOLOR} $1"; }
warning() { echo -e "${YELLOW}[WARNING]${NOCOLOR} $1" >&2; }
display_banner() {
echo -e "${BLUE}========================================================================${NOCOLOR}"
echo -e "${CYAN}
____ ____ _ _ _ _
| _ \\ ___| __ ) _ __ ___ ___ | \\ | | ___| |___ _____ _ __| | __
| | | |/ _ \\ _ \\| __/ _ \\/ __| | \\| |/ _ \\ __\\ \\ /\\ / / _ \\| __| |/ /
| |_| | __/ |_) | | | (_) \\__ \\ | |\\ | __/ |_ \\ V V / (_) | | | <
|____/ \\___|____/|_| \\___/|___/ |_| \\_|\\___|\\__| \\_/\\_/ \\___/|_| |_|\\_\\
${NOCOLOR}"
echo -e "${BLUE}========================================================================${NOCOLOR}"
echo -e "${GREEN} Production Installation ${NOCOLOR}"
echo -e "${BLUE}========================================================================${NOCOLOR}"
}
detect_os() {
if [ ! -f /etc/os-release ]; then
error "Cannot detect operating system"
exit 1
fi
. /etc/os-release
OS=$ID
VERSION=$VERSION_ID
# Support Debian and Ubuntu
case $OS in
ubuntu|debian)
log "Detected OS: $OS ${VERSION:-unknown}"
;;
*)
warning "Unsupported operating system: $OS (may not work)"
;;
esac
}
check_architecture() {
ARCH=$(uname -m)
case $ARCH in
x86_64)
GITHUB_ARCH="amd64"
;;
aarch64|arm64)
GITHUB_ARCH="arm64"
;;
*)
error "Unsupported architecture: $ARCH"
echo -e "${YELLOW}Supported: x86_64, aarch64/arm64${NOCOLOR}"
exit 1
;;
esac
log "Architecture: $ARCH (using $GITHUB_ARCH)"
}
check_root() {
if [[ $EUID -ne 0 ]]; then
error "This script must be run as root"
echo -e "${YELLOW}Please run with sudo:${NOCOLOR}"
echo -e "${CYAN} sudo bash <(curl -fsSL https://install.debros.network)${NOCOLOR}"
exit 1
fi
}
get_latest_release() {
log "Fetching latest release..."
# Try to get latest release with better error handling
RELEASE_DATA=""
if command -v jq &>/dev/null; then
# Get the latest release (including pre-releases/nightly)
RELEASE_DATA=$(curl -fsSL -H "Accept: application/vnd.github+json" "$GITHUB_API/releases" 2>&1)
if [ $? -ne 0 ]; then
error "Failed to fetch release data from GitHub API"
error "Response: $RELEASE_DATA"
exit 1
fi
LATEST_RELEASE=$(echo "$RELEASE_DATA" | jq -r '.[0] | .tag_name' 2>/dev/null)
else
RELEASE_DATA=$(curl -fsSL "$GITHUB_API/releases" 2>&1)
if [ $? -ne 0 ]; then
error "Failed to fetch release data from GitHub API"
error "Response: $RELEASE_DATA"
exit 1
fi
LATEST_RELEASE=$(echo "$RELEASE_DATA" | grep '"tag_name"' | head -1 | cut -d'"' -f4)
fi
if [ -z "$LATEST_RELEASE" ] || [ "$LATEST_RELEASE" = "null" ]; then
error "Could not determine latest release version"
error "GitHub API response may be empty or rate-limited"
exit 1
fi
log "Latest release: $LATEST_RELEASE"
}
download_and_install_cli() {
BINARY_NAME="debros-network_${LATEST_RELEASE#v}_linux_${GITHUB_ARCH}.tar.gz"
DOWNLOAD_URL="$GITHUB_REPO/releases/download/$LATEST_RELEASE/$BINARY_NAME"
log "Downloading dbn from GitHub releases..."
log "URL: https://github.com/$DOWNLOAD_URL"
# Clean up any stale binaries
rm -f /tmp/network-cli /tmp/dbn.tar.gz "$INSTALL_DIR/dbn"
if ! curl -fsSL -o /tmp/dbn.tar.gz "https://github.com/$DOWNLOAD_URL"; then
error "Failed to download dbn"
exit 1
fi
# Verify the download was successful
if [ ! -f /tmp/dbn.tar.gz ]; then
error "Download file not found"
exit 1
fi
log "Extracting dbn..."
# Extract to /tmp
tar -xzf /tmp/dbn.tar.gz -C /tmp/
# Check for extracted binary (could be named network-cli or dbn)
EXTRACTED_BINARY=""
if [ -f /tmp/network-cli ]; then
EXTRACTED_BINARY="/tmp/network-cli"
elif [ -f /tmp/dbn ]; then
EXTRACTED_BINARY="/tmp/dbn"
else
error "Failed to extract binary (neither network-cli nor dbn found)"
ls -la /tmp/ | grep -E "(network|cli|dbn)"
exit 1
fi
chmod +x "$EXTRACTED_BINARY"
log "Installing dbn to $INSTALL_DIR..."
# Always rename to dbn during installation
mv "$EXTRACTED_BINARY" "$INSTALL_DIR/dbn"
# Sanity check: verify the installed binary is functional and reports correct version
if ! "$INSTALL_DIR/dbn" version &>/dev/null; then
error "Installed dbn failed sanity check (version command failed)"
rm -f "$INSTALL_DIR/dbn"
exit 1
fi
# Clean up
rm -f /tmp/dbn.tar.gz
success "dbn installed successfully"
}
# Main flow
display_banner
# Check prerequisites
check_root
detect_os
check_architecture
# Download and install
get_latest_release
download_and_install_cli
# Show next steps
echo ""
echo -e "${GREEN}Installation complete!${NOCOLOR}"
echo ""
echo -e "${CYAN}Next, run the production setup:${NOCOLOR}"
echo ""
echo "Bootstrap node (first node, main branch):"
echo -e " ${BLUE}sudo dbn prod install --bootstrap${NOCOLOR}"
echo ""
echo "Bootstrap node (nightly branch):"
echo -e " ${BLUE}sudo dbn prod install --bootstrap --branch nightly${NOCOLOR}"
echo ""
echo "Secondary node (join existing cluster):"
echo -e " ${BLUE}sudo dbn prod install --vps-ip <bootstrap_ip> --peers <multiaddr>${NOCOLOR}"
echo ""
echo "With HTTPS/domain:"
echo -e " ${BLUE}sudo dbn prod install --bootstrap --domain example.com${NOCOLOR}"
echo ""
echo "For more help:"
echo -e " ${BLUE}dbn prod --help${NOCOLOR}"
echo ""

View File

@ -0,0 +1,53 @@
#!/bin/bash
# Setup local domains for DeBros Network development
# Adds entries to /etc/hosts for node-1.local through node-5.local
# Maps them to 127.0.0.1 for local development
set -e
HOSTS_FILE="/etc/hosts"
NODES=("node-1" "node-2" "node-3" "node-4" "node-5")
# Check if we have sudo access
if [ "$EUID" -ne 0 ]; then
echo "This script requires sudo to modify /etc/hosts"
echo "Please run: sudo bash scripts/setup-local-domains.sh"
exit 1
fi
# Function to add or update domain entry
add_domain() {
local domain=$1
local ip="127.0.0.1"
# Check if domain already exists
if grep -q "^[[:space:]]*$ip[[:space:]]\+$domain" "$HOSTS_FILE"; then
echo "$domain already configured"
return 0
fi
# Add domain to /etc/hosts
echo "$ip $domain" >> "$HOSTS_FILE"
echo "✓ Added $domain -> $ip"
}
echo "Setting up local domains for DeBros Network..."
echo ""
# Add each node domain
for node in "${NODES[@]}"; do
add_domain "${node}.local"
done
echo ""
echo "✓ Local domains configured successfully!"
echo ""
echo "You can now access nodes via:"
for node in "${NODES[@]}"; do
echo " - ${node}.local (HTTP Gateway)"
done
echo ""
echo "Example: curl http://node-1.local:8080/rqlite/http/db/status"

View File

@ -0,0 +1,85 @@
#!/bin/bash
# Test local domain routing for DeBros Network
# Validates that all HTTP gateway routes are working
set -e
NODES=("1" "2" "3" "4" "5")
GATEWAY_PORTS=(8080 8081 8082 8083 8084)
# Color codes
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Counters
PASSED=0
FAILED=0
# Test a single endpoint
test_endpoint() {
local node=$1
local port=$2
local path=$3
local description=$4
local url="http://node-${node}.local:${port}${path}"
printf "Testing %-50s ... " "$description"
if curl -s -f "$url" > /dev/null 2>&1; then
echo -e "${GREEN}✓ PASS${NC}"
((PASSED++))
return 0
else
echo -e "${RED}✗ FAIL${NC}"
((FAILED++))
return 1
fi
}
echo "=========================================="
echo "DeBros Network Local Domain Tests"
echo "=========================================="
echo ""
# Test each node's HTTP gateway
for i in "${!NODES[@]}"; do
node=${NODES[$i]}
port=${GATEWAY_PORTS[$i]}
echo "Testing node-${node}.local (port ${port}):"
# Test health endpoint
test_endpoint "$node" "$port" "/health" "Node-${node} health check"
# Test RQLite HTTP endpoint
test_endpoint "$node" "$port" "/rqlite/http/db/execute" "Node-${node} RQLite HTTP"
# Test IPFS API endpoint (may fail if IPFS not running, but at least connection should work)
test_endpoint "$node" "$port" "/ipfs/api/v0/version" "Node-${node} IPFS API" || true
# Test Cluster API endpoint (may fail if Cluster not running, but at least connection should work)
test_endpoint "$node" "$port" "/cluster/health" "Node-${node} Cluster API" || true
echo ""
done
# Summary
echo "=========================================="
echo "Test Results"
echo "=========================================="
echo -e "${GREEN}Passed: $PASSED${NC}"
echo -e "${RED}Failed: $FAILED${NC}"
echo ""
if [ $FAILED -eq 0 ]; then
echo -e "${GREEN}✓ All tests passed!${NC}"
exit 0
else
echo -e "${YELLOW}⚠ Some tests failed (this is expected if services aren't running)${NC}"
exit 1
fi

4
test.sh Executable file
View File

@ -0,0 +1,4 @@
for prefix in raft ipfs ipfs-cluster olric; do
echo -n "$prefix: "
timeout 3 bash -c "echo | openssl s_client -connect node-hk19de.debros.network:7001 -servername $prefix.node-hk19de.debros.network 2>&1 | grep -q 'CONNECTED' && echo 'OK' || echo 'FAIL'"
done