mirror of
https://github.com/DeBrosOfficial/network.git
synced 2025-12-16 18:58:50 +00:00
Compare commits
No commits in common. "main" and "v0.68.1-nightly" have entirely different histories.
main
...
v0.68.1-ni
198
.github/workflows/release-apt.yml
vendored
198
.github/workflows/release-apt.yml
vendored
@ -1,198 +0,0 @@
|
|||||||
name: Release APT Package
|
|
||||||
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [published]
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
version:
|
|
||||||
description: "Version to release (e.g., 0.69.20)"
|
|
||||||
required: true
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-deb:
|
|
||||||
name: Build Debian Package
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
arch: [amd64, arm64]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: "1.23"
|
|
||||||
|
|
||||||
- name: Get version
|
|
||||||
id: version
|
|
||||||
run: |
|
|
||||||
if [ "${{ github.event_name }}" = "release" ]; then
|
|
||||||
VERSION="${{ github.event.release.tag_name }}"
|
|
||||||
VERSION="${VERSION#v}" # Remove 'v' prefix if present
|
|
||||||
else
|
|
||||||
VERSION="${{ github.event.inputs.version }}"
|
|
||||||
fi
|
|
||||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Set up QEMU (for arm64)
|
|
||||||
if: matrix.arch == 'arm64'
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Build binary
|
|
||||||
env:
|
|
||||||
GOARCH: ${{ matrix.arch }}
|
|
||||||
CGO_ENABLED: 0
|
|
||||||
run: |
|
|
||||||
VERSION="${{ steps.version.outputs.version }}"
|
|
||||||
COMMIT=$(git rev-parse --short HEAD)
|
|
||||||
DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
|
||||||
LDFLAGS="-X 'main.version=$VERSION' -X 'main.commit=$COMMIT' -X 'main.date=$DATE'"
|
|
||||||
|
|
||||||
mkdir -p build/usr/local/bin
|
|
||||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama cmd/cli/main.go
|
|
||||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-node cmd/node/main.go
|
|
||||||
# Build the entire gateway package so helper files (e.g., config parsing) are included
|
|
||||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-gateway ./cmd/gateway
|
|
||||||
|
|
||||||
- name: Create Debian package structure
|
|
||||||
run: |
|
|
||||||
VERSION="${{ steps.version.outputs.version }}"
|
|
||||||
ARCH="${{ matrix.arch }}"
|
|
||||||
PKG_NAME="orama_${VERSION}_${ARCH}"
|
|
||||||
|
|
||||||
mkdir -p ${PKG_NAME}/DEBIAN
|
|
||||||
mkdir -p ${PKG_NAME}/usr/local/bin
|
|
||||||
|
|
||||||
# Copy binaries
|
|
||||||
cp build/usr/local/bin/* ${PKG_NAME}/usr/local/bin/
|
|
||||||
chmod 755 ${PKG_NAME}/usr/local/bin/*
|
|
||||||
|
|
||||||
# Create control file
|
|
||||||
cat > ${PKG_NAME}/DEBIAN/control << EOF
|
|
||||||
Package: orama
|
|
||||||
Version: ${VERSION}
|
|
||||||
Section: net
|
|
||||||
Priority: optional
|
|
||||||
Architecture: ${ARCH}
|
|
||||||
Depends: libc6
|
|
||||||
Maintainer: DeBros Team <team@debros.network>
|
|
||||||
Description: Orama Network - Distributed P2P Database System
|
|
||||||
Orama is a distributed peer-to-peer network that combines
|
|
||||||
RQLite for distributed SQL, IPFS for content-addressed storage,
|
|
||||||
and LibP2P for peer discovery and communication.
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Create postinst script
|
|
||||||
cat > ${PKG_NAME}/DEBIAN/postinst << 'EOF'
|
|
||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
echo ""
|
|
||||||
echo "Orama installed successfully!"
|
|
||||||
echo ""
|
|
||||||
echo "To set up your node, run:"
|
|
||||||
echo " sudo orama install"
|
|
||||||
echo ""
|
|
||||||
EOF
|
|
||||||
chmod 755 ${PKG_NAME}/DEBIAN/postinst
|
|
||||||
|
|
||||||
- name: Build .deb package
|
|
||||||
run: |
|
|
||||||
VERSION="${{ steps.version.outputs.version }}"
|
|
||||||
ARCH="${{ matrix.arch }}"
|
|
||||||
PKG_NAME="orama_${VERSION}_${ARCH}"
|
|
||||||
|
|
||||||
dpkg-deb --build ${PKG_NAME}
|
|
||||||
mv ${PKG_NAME}.deb orama_${VERSION}_${ARCH}.deb
|
|
||||||
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: deb-${{ matrix.arch }}
|
|
||||||
path: "*.deb"
|
|
||||||
|
|
||||||
publish-apt:
|
|
||||||
name: Publish to APT Repository
|
|
||||||
needs: build-deb
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Download all artifacts
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
path: packages
|
|
||||||
|
|
||||||
- name: Get version
|
|
||||||
id: version
|
|
||||||
run: |
|
|
||||||
if [ "${{ github.event_name }}" = "release" ]; then
|
|
||||||
VERSION="${{ github.event.release.tag_name }}"
|
|
||||||
VERSION="${VERSION#v}"
|
|
||||||
else
|
|
||||||
VERSION="${{ github.event.inputs.version }}"
|
|
||||||
fi
|
|
||||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Set up GPG
|
|
||||||
if: env.GPG_PRIVATE_KEY != ''
|
|
||||||
env:
|
|
||||||
GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }}
|
|
||||||
run: |
|
|
||||||
echo "$GPG_PRIVATE_KEY" | gpg --import
|
|
||||||
|
|
||||||
- name: Create APT repository structure
|
|
||||||
run: |
|
|
||||||
mkdir -p apt-repo/pool/main/o/orama
|
|
||||||
mkdir -p apt-repo/dists/stable/main/binary-amd64
|
|
||||||
mkdir -p apt-repo/dists/stable/main/binary-arm64
|
|
||||||
|
|
||||||
# Move packages
|
|
||||||
mv packages/deb-amd64/*.deb apt-repo/pool/main/o/orama/
|
|
||||||
mv packages/deb-arm64/*.deb apt-repo/pool/main/o/orama/
|
|
||||||
|
|
||||||
# Generate Packages files
|
|
||||||
cd apt-repo
|
|
||||||
dpkg-scanpackages --arch amd64 pool/ > dists/stable/main/binary-amd64/Packages
|
|
||||||
dpkg-scanpackages --arch arm64 pool/ > dists/stable/main/binary-arm64/Packages
|
|
||||||
|
|
||||||
gzip -k dists/stable/main/binary-amd64/Packages
|
|
||||||
gzip -k dists/stable/main/binary-arm64/Packages
|
|
||||||
|
|
||||||
# Generate Release file
|
|
||||||
cat > dists/stable/Release << EOF
|
|
||||||
Origin: Orama
|
|
||||||
Label: Orama
|
|
||||||
Suite: stable
|
|
||||||
Codename: stable
|
|
||||||
Architectures: amd64 arm64
|
|
||||||
Components: main
|
|
||||||
Description: Orama Network APT Repository
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
|
|
||||||
- name: Upload to release
|
|
||||||
if: github.event_name == 'release'
|
|
||||||
uses: softprops/action-gh-release@v1
|
|
||||||
with:
|
|
||||||
files: |
|
|
||||||
apt-repo/pool/main/o/orama/*.deb
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Deploy APT repository to GitHub Pages
|
|
||||||
uses: peaceiris/actions-gh-pages@v4
|
|
||||||
with:
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
publish_dir: ./apt-repo
|
|
||||||
destination_dir: apt
|
|
||||||
keep_files: true
|
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
@ -75,5 +75,3 @@ data/bootstrap/rqlite/
|
|||||||
configs/
|
configs/
|
||||||
|
|
||||||
.dev/
|
.dev/
|
||||||
|
|
||||||
.gocache/
|
|
||||||
668
CHANGELOG.md
668
CHANGELOG.md
@ -13,588 +13,13 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Deprecated
|
### Deprecated
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
## [0.72.1] - 2025-12-09
|
|
||||||
|
|
||||||
### Added
|
|
||||||
\n
|
|
||||||
### Changed
|
|
||||||
- Cleaned up the README by removing outdated feature lists and complex examples, focusing on the Quick Start guide.
|
|
||||||
- Updated development configuration to correctly set advertised addresses for RQLite, improving internal cluster communication.
|
|
||||||
- Simplified the build process for the `debros-gateway` binary in the Debian release workflow.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
\n
|
|
||||||
## [0.72.0] - 2025-11-28
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Interactive prompt for selecting local or remote gateway URL during CLI login.
|
|
||||||
- Support for discovering and configuring IPFS Cluster peers during installation and runtime via the gateway status endpoint.
|
|
||||||
- New CLI flags (`--ipfs-cluster-peer`, `--ipfs-cluster-addrs`) added to the `prod install` command for cluster discovery.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Renamed the main network node executable from `node` to `orama-node` and the gateway executable to `orama-gateway`.
|
|
||||||
- Improved the `auth login` flow to use a TLS-aware HTTP client, supporting Let's Encrypt staging certificates for remote gateways.
|
|
||||||
- Updated the production installer to set `CAP_NET_BIND_SERVICE` on `orama-node` to allow binding to privileged ports (80/443) without root.
|
|
||||||
- Updated the production installer to configure IPFS Cluster to listen on port 9098 for consistent multi-node communication.
|
|
||||||
- Refactored the `prod install` process to generate configurations before initializing services, ensuring configuration files are present.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Corrected the IPFS Cluster API port used in `node.yaml` template from 9096 to 9098 to match the cluster's LibP2P port.
|
|
||||||
- Fixed the `anyone-client` systemd service configuration to use the correct binary name and allow writing to the home directory.
|
|
||||||
|
|
||||||
## [0.71.0] - 2025-11-27
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Added `certutil` package for managing self-signed CA and node certificates.
|
|
||||||
- Added support for SNI-based TCP routing for internal services (RQLite Raft, IPFS, Olric) when HTTPS is enabled.
|
|
||||||
- Added `--dry-run`, `--no-pull`, and DNS validation checks to the production installer.
|
|
||||||
- Added `tlsutil` package to centralize TLS configuration and support trusted self-signed certificates for internal communication.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Refactored production installer to use a unified node architecture, removing the separate `debros-gateway` service and embedding the gateway within `debros-node`.
|
|
||||||
- Improved service health checks in the CLI with exponential backoff retries for better reliability during startup and upgrades.
|
|
||||||
- Updated RQLite to listen on an internal port (7002) when SNI is enabled, allowing the SNI gateway to handle external port 7001.
|
|
||||||
- Enhanced systemd service files with stricter security settings (e.g., `ProtectHome=read-only`, `ProtectSystem=strict`).
|
|
||||||
- Updated IPFS configuration to bind Swarm to all interfaces (0.0.0.0) for external connectivity.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Fixed an issue where the `anyone-client` installation could fail due to missing NPM cache directories by ensuring proper initialization and ownership.
|
|
||||||
|
|
||||||
## [0.70.0] - 2025-11-26
|
|
||||||
|
|
||||||
### Added
|
|
||||||
\n
|
|
||||||
### Changed
|
|
||||||
- The HTTP Gateway is now embedded directly within each network node, simplifying deployment and removing the need for a separate gateway service.
|
|
||||||
- The configuration for the full API Gateway (including Auth, PubSub, and internal service routing) is now part of the main node configuration.
|
|
||||||
- Development environment setup no longer generates a separate `gateway.yaml` file or starts a standalone gateway process.
|
|
||||||
- Updated local environment descriptions and default gateway fallback to reflect the node-1 designation.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Updated the installation instructions in the README to reflect the correct APT repository URL.
|
|
||||||
|
|
||||||
## [0.69.22] - 2025-11-26
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Added 'Peer connection status' to the health check list in the README.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Unified development environment nodes, renaming 'bootstrap', 'bootstrap2', 'node2', 'node3', 'node4' to 'node-1' through 'node-5'.
|
|
||||||
- Renamed internal configuration fields and CLI flags from 'bootstrap peers' to 'peers' for consistency across the unified node architecture.
|
|
||||||
- Updated development environment configuration files and data directories to use the unified 'node-N' naming scheme (e.g., `node-1.yaml`, `data/node-1`).
|
|
||||||
- Changed the default main gateway port in the development environment from 6001 to 6000, reserving 6001-6005 for individual node gateways.
|
|
||||||
- Removed the explicit 'node.type' configuration field (bootstrap/node) as all nodes now use a unified configuration.
|
|
||||||
- Improved RQLite cluster joining logic to prioritize joining the most up-to-date peer (highest Raft log index) instead of prioritizing 'bootstrap' nodes.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Fixed migration logic to correctly handle the transition from old unified data directories to the new 'node-1' structure.
|
|
||||||
|
|
||||||
## [0.69.21] - 2025-11-26
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Introduced a new interactive TUI wizard for production installation (`sudo orama install`).
|
|
||||||
- Added support for APT package repository generation and publishing via GitHub Actions.
|
|
||||||
- Added new simplified production CLI commands (`orama install`, `orama upgrade`, `orama status`, etc.) as aliases for the legacy `orama prod` commands.
|
|
||||||
- Added support for a unified HTTP reverse proxy gateway within the node process, routing internal services (RQLite, IPFS, Cluster) via a single port.
|
|
||||||
- Added support for SNI-based TCP routing for secure access to services like RQLite Raft and IPFS Swarm.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Renamed the primary CLI binary from `dbn` to `orama` across the entire codebase, documentation, and build system.
|
|
||||||
- Migrated the production installation directory structure from `~/.debros` to `~/.orama`.
|
|
||||||
- Consolidated production service management into unified systemd units (e.g., `debros-node.service` replaces `debros-node-bootstrap.service` and `debros-node-node.service`).
|
|
||||||
- Updated the default IPFS configuration to bind API and Gateway addresses to `127.0.0.1` for enhanced security, relying on the new unified gateway for external access.
|
|
||||||
- Updated RQLite service configuration to bind to `127.0.0.1` for HTTP and Raft ports, relying on the new SNI gateway for external cluster communication.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Corrected configuration path resolution logic to correctly check for config files in the new `~/.orama/` directory structure.
|
|
||||||
|
|
||||||
|
|
||||||
## [0.69.20] - 2025-11-22
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added verification step to ensure the IPFS Cluster secret is correctly written after configuration updates.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved reliability of `anyone-client` installation and verification by switching to using `npx` for execution and checks, especially for globally installed scoped packages.
|
|
||||||
- Updated the `anyone-client` systemd service to use `npx` for execution and explicitly set the PATH environment variable to ensure the client runs correctly.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.19] - 2025-11-22
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated the installation command for 'anyone-client' to use the correct scoped package name (@anyone-protocol/anyone-client).
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.18] - 2025-11-22
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Integrated `anyone-client` (SOCKS5 proxy) installation and systemd service (`debros-anyone-client.service`).
|
|
||||||
- Added port availability checking logic to prevent conflicts when starting services (e.g., `anyone-client` on port 9050).
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated system dependencies installation to include `nodejs` and `npm` required for `anyone-client`.
|
|
||||||
- Modified Olric configuration generation to bind to the specific VPS IP if provided, otherwise defaults to 0.0.0.0.
|
|
||||||
- Improved IPFS Cluster initialization by passing `CLUSTER_SECRET` directly as an environment variable.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.17] - 2025-11-21
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Initial implementation of a Push Notification Service for the Gateway, utilizing the Expo API.
|
|
||||||
- Detailed documentation for RQLite operations, monitoring, and troubleshooting was added to the README.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved `make stop` and `dbn dev down` commands to ensure all development services are forcefully killed after graceful shutdown attempt.
|
|
||||||
- Refactored RQLite startup logic to simplify cluster establishment and remove complex, error-prone leadership/recovery checks, relying on RQLite's built-in join mechanism.
|
|
||||||
- RQLite logs are now written to individual log files (e.g., `~/.orama/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
|
|
||||||
- Improved peer exchange discovery logging to suppress expected 'protocols not supported' warnings from lightweight clients like the Gateway.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.17] - 2025-11-21
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Initial implementation of a Push Notification Service for the Gateway, utilizing the Expo API.
|
|
||||||
- Detailed documentation for RQLite operations, monitoring, and troubleshooting in the README.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved `make stop` and `dbn dev down` commands to ensure all development services are forcefully killed after graceful shutdown attempt.
|
|
||||||
- Refactored RQLite startup logic to simplify cluster establishment and remove complex, error-prone leadership/recovery checks, relying on RQLite's built-in join mechanism.
|
|
||||||
- RQLite logs are now written to individual log files (e.g., `~/.orama/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
|
|
||||||
- Improved peer exchange discovery logging to suppress expected 'protocols not supported' warnings from lightweight clients like the Gateway.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.16] - 2025-11-16
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved the `make stop` command to ensure a more robust and graceful shutdown of development services.
|
|
||||||
- Enhanced the `make kill` command and underlying scripts for more reliable force termination of stray development processes.
|
|
||||||
- Increased the graceful shutdown timeout for development processes from 500ms to 2 seconds before resorting to force kill.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.15] - 2025-11-16
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved authentication flow to handle wallet addresses case-insensitively during nonce creation and verification.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.14] - 2025-11-14
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added support for background reconnection to the Olric cache cluster in the Gateway, improving resilience if the cache is temporarily unavailable.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved the RQLite database client connection handling to ensure connections are properly closed and reused safely.
|
|
||||||
- RQLite Manager now updates its advertised addresses if cluster discovery provides more accurate information (e.g., replacing localhost).
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Removed internal RQLite process management from the development runner, as RQLite is now expected to be managed externally or via Docker.
|
|
||||||
|
|
||||||
## [0.69.13] - 2025-11-14
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- The Gateway service now waits for the Olric cache service to start before attempting initialization.
|
|
||||||
- Improved robustness of Olric cache client initialization with retry logic and exponential backoff.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Corrected the default path logic for 'gateway.yaml' to prioritize the production data directory while maintaining fallback to legacy paths.
|
|
||||||
|
|
||||||
## [0.69.12] - 2025-11-14
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- The `prod install` command now requires the `--cluster-secret` flag for all non-bootstrap nodes to ensure correct IPFS Cluster configuration.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated IPFS configuration to bind API and Gateway addresses to `0.0.0.0` instead of `127.0.0.1` for better network accessibility.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.11] - 2025-11-13
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added a new comprehensive shell script (`scripts/test-cluster-health.sh`) for checking the health and replication status of RQLite, IPFS, and IPFS Cluster across production environments.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved RQLite cluster discovery logic to ensure `peers.json` is correctly generated and includes the local node, which is crucial for reliable cluster recovery.
|
|
||||||
- Refactored logging across discovery and RQLite components for cleaner, more concise output, especially for routine operations.
|
|
||||||
- Updated the installation and upgrade process to correctly configure IPFS Cluster bootstrap peers using the node's public IP, improving cluster formation reliability.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fixed an issue where RQLite recovery operations (like clearing Raft state) did not correctly force the regeneration of `peers.json`, preventing successful cluster rejoin.
|
|
||||||
- Corrected the port calculation logic for IPFS Cluster to ensure the correct LibP2P listen port (9098) is used for bootstrap peer addressing.
|
|
||||||
|
|
||||||
## [0.69.10] - 2025-11-13
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Automatic health monitoring and recovery for RQLite cluster split-brain scenarios.
|
|
||||||
- RQLite now waits indefinitely for the minimum cluster size to be met before starting, preventing single-node cluster formation.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated default IPFS swarm port from 4001 to 4101 to avoid conflicts with LibP2P.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Resolved an issue where RQLite could start as a single-node cluster if peer discovery was slow, by enforcing minimum cluster size before startup.
|
|
||||||
- Improved cluster recovery logic to correctly use `bootstrap-expect` for new clusters and ensure proper process restart during recovery.
|
|
||||||
|
|
||||||
## [0.69.9] - 2025-11-12
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added automatic recovery logic for RQLite (database) nodes stuck in a configuration mismatch, which attempts to clear stale Raft state if peers have more recent data.
|
|
||||||
- Added logic to discover IPFS Cluster peers directly from the LibP2P host's peerstore, improving peer discovery before the Cluster API is fully operational.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved the IPFS Cluster configuration update process to prioritize writing to the `peerstore` file before updating `service.json`, ensuring the source of truth is updated first.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.8] - 2025-11-12
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Improved `dbn prod start` to automatically unmask and re-enable services if they were previously masked or disabled.
|
|
||||||
- Added automatic discovery and configuration of all IPFS Cluster peers during runtime to improve cluster connectivity.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Enhanced `dbn prod start` and `dbn prod stop` reliability by adding service state resets, retries, and ensuring services are disabled when stopped.
|
|
||||||
- Filtered peer exchange addresses in LibP2P discovery to only include the standard LibP2P port (4001), preventing exposure of internal service ports.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Improved IPFS Cluster bootstrap configuration repair logic to automatically infer and update bootstrap peer addresses if the bootstrap node is available.
|
|
||||||
|
|
||||||
## [0.69.7] - 2025-11-12
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved logic for determining Olric server addresses during configuration generation, especially for bootstrap and non-bootstrap nodes.
|
|
||||||
- Enhanced IPFS cluster configuration to correctly handle IPv6 addresses when updating bootstrap peers.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.6] - 2025-11-12
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Improved production service health checks and port availability validation during install, upgrade, start, and restart commands.
|
|
||||||
- Added service aliases (node, ipfs, cluster, gateway, olric) to `dbn prod logs` command for easier log viewing.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Updated node configuration logic to correctly advertise public IP addresses in multiaddrs (for P2P discovery) and RQLite addresses, improving connectivity for nodes behind NAT/firewalls.
|
|
||||||
- Enhanced `dbn prod install` and `dbn prod upgrade` to automatically detect and preserve existing VPS IP, domain, and cluster join information.
|
|
||||||
- Improved RQLite cluster discovery to automatically replace localhost/loopback addresses with the actual public IP when exchanging metadata between peers.
|
|
||||||
- Updated `dbn prod install` to require `--vps-ip` for all node types (bootstrap and regular) for proper network configuration.
|
|
||||||
- Improved error handling and robustness in the installation script when fetching the latest release from GitHub.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fixed an issue where the RQLite process would wait indefinitely for a join target; now uses a 5-minute timeout.
|
|
||||||
- Corrected the location of the gateway configuration file reference in the README.
|
|
||||||
|
|
||||||
## [0.69.5] - 2025-11-11
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Moved the default location for `gateway.yaml` configuration file from `configs/` to the new `data/` directory for better organization.
|
|
||||||
- Updated configuration path logic to search for `gateway.yaml` in the new `data/` directory first.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.4] - 2025-11-11
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- RQLite database management is now integrated directly into the main node process, removing separate RQLite systemd services (debros-rqlite-\*).
|
|
||||||
- Improved log file provisioning to only create necessary log files based on the node type being installed (bootstrap or node).
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.3] - 2025-11-11
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added `--ignore-resource-checks` flag to the install command to skip disk, RAM, and CPU prerequisite validation.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.2] - 2025-11-11
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added `--no-pull` flag to `dbn prod upgrade` to skip git repository updates and use existing source code.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Removed deprecated environment management commands (`env`, `devnet`, `testnet`, `local`).
|
|
||||||
- Removed deprecated network commands (`health`, `peers`, `status`, `peer-id`, `connect`, `query`, `pubsub`) from the main CLI interface.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.1] - 2025-11-11
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added automatic service stopping before binary upgrades during the `prod upgrade` process to ensure a clean update.
|
|
||||||
- Added logic to preserve existing configuration settings (like `bootstrap_peers`, `domain`, and `rqlite_join_address`) when regenerating configurations during `prod upgrade`.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved the `prod upgrade` process to be more robust by preserving critical configuration details and gracefully stopping services.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.69.0] - 2025-11-11
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Added comprehensive documentation for setting up HTTPS using a domain name, including configuration steps for both installation and existing setups.
|
|
||||||
- Added the `--force` flag to the `install` command for reconfiguring all settings.
|
|
||||||
- Added new log targets (`ipfs-cluster`, `rqlite`, `olric`) and improved the `dbn prod logs` command documentation.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved the IPFS Cluster configuration logic to ensure the cluster secret and IPFS API port are correctly synchronized during updates.
|
|
||||||
- Refined the directory structure creation process to ensure node-specific data directories are created only when initializing services.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.68.1] - 2025-11-11
|
|
||||||
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- Pre-create log files during setup to ensure correct permissions for systemd logging.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- Improved binary installation process to handle copying files individually, preventing potential shell wildcard issues.
|
|
||||||
- Enhanced ownership fixing logic during installation to ensure all files created by root (especially during service initialization) are correctly owned by the 'debros' user.
|
|
||||||
|
|
||||||
### Deprecated
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
\n
|
|
||||||
|
|
||||||
## [0.68.0] - 2025-11-11
|
## [0.68.0] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Added comprehensive documentation for production deployment, including installation, upgrade, service management, and troubleshooting.
|
- Added comprehensive documentation for production deployment, including installation, upgrade, service management, and troubleshooting.
|
||||||
- Added new CLI commands (`dbn prod start`, `dbn prod stop`, `dbn prod restart`) for convenient management of production systemd services.
|
- Added new CLI commands (`dbn prod start`, `dbn prod stop`, `dbn prod restart`) for convenient management of production systemd services.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Updated IPFS configuration during production installation to use port 4501 for the API (to avoid conflicts with RQLite on port 5001) and port 8080 for the Gateway.
|
- Updated IPFS configuration during production installation to use port 4501 for the API (to avoid conflicts with RQLite on port 5001) and port 8080 for the Gateway.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -602,18 +27,15 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Ensured that IPFS configuration automatically disables AutoConf when a private swarm key is present during installation and upgrade, preventing startup errors.
|
- Ensured that IPFS configuration automatically disables AutoConf when a private swarm key is present during installation and upgrade, preventing startup errors.
|
||||||
|
|
||||||
## [0.67.7] - 2025-11-11
|
## [0.67.7] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Added support for specifying the Git branch (main or nightly) during `prod install` and `prod upgrade`.
|
- Added support for specifying the Git branch (main or nightly) during `prod install` and `prod upgrade`.
|
||||||
- The chosen branch is now saved and automatically used for future upgrades unless explicitly overridden.
|
- The chosen branch is now saved and automatically used for future upgrades unless explicitly overridden.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Updated help messages and examples for production commands to include branch options.
|
- Updated help messages and examples for production commands to include branch options.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -621,17 +43,12 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
## [0.67.6] - 2025-11-11
|
## [0.67.6] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- The binary installer now updates the source repository if it already exists, instead of only cloning it if missing.
|
- The binary installer now updates the source repository if it already exists, instead of only cloning it if missing.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -639,18 +56,15 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Resolved an issue where disabling AutoConf in the IPFS repository could leave 'auto' placeholders in the config, causing startup errors.
|
- Resolved an issue where disabling AutoConf in the IPFS repository could leave 'auto' placeholders in the config, causing startup errors.
|
||||||
|
|
||||||
## [0.67.5] - 2025-11-11
|
## [0.67.5] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Added `--restart` option to `dbn prod upgrade` to automatically restart services after upgrade.
|
- Added `--restart` option to `dbn prod upgrade` to automatically restart services after upgrade.
|
||||||
- The gateway now supports an optional `--config` flag to specify the configuration file path.
|
- The gateway now supports an optional `--config` flag to specify the configuration file path.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Improved `dbn prod upgrade` process to better handle existing installations, including detecting node type and ensuring configurations are updated to the latest format.
|
- Improved `dbn prod upgrade` process to better handle existing installations, including detecting node type and ensuring configurations are updated to the latest format.
|
||||||
- Configuration loading logic for `node` and `gateway` commands now correctly handles absolute paths passed via command line or systemd.
|
- Configuration loading logic for `node` and `gateway` commands now correctly handles absolute paths passed via command line or systemd.
|
||||||
|
|
||||||
@ -659,17 +73,13 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Fixed an issue during production upgrades where IPFS repositories in private swarms might fail to start due to `AutoConf` not being disabled.
|
- Fixed an issue during production upgrades where IPFS repositories in private swarms might fail to start due to `AutoConf` not being disabled.
|
||||||
|
|
||||||
## [0.67.4] - 2025-11-11
|
## [0.67.4] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Improved configuration file loading logic to support absolute paths for config files.
|
- Improved configuration file loading logic to support absolute paths for config files.
|
||||||
- Updated IPFS Cluster initialization during setup to run `ipfs-cluster-service init` and automatically configure the cluster secret.
|
- Updated IPFS Cluster initialization during setup to run `ipfs-cluster-service init` and automatically configure the cluster secret.
|
||||||
- IPFS repositories initialized with a private swarm key will now automatically disable AutoConf.
|
- IPFS repositories initialized with a private swarm key will now automatically disable AutoConf.
|
||||||
@ -679,17 +89,13 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
- Fixed configuration path resolution to correctly check for config files in both the legacy (`~/.debros/`) and production (`~/.debros/configs/`) directories.
|
||||||
- Fixed configuration path resolution to correctly check for config files in both the legacy (`~/.orama/`) and production (`~/.orama/configs/`) directories.
|
|
||||||
|
|
||||||
## [0.67.3] - 2025-11-11
|
## [0.67.3] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Improved reliability of IPFS (Kubo) installation by switching from a single install script to the official step-by-step download and extraction process.
|
- Improved reliability of IPFS (Kubo) installation by switching from a single install script to the official step-by-step download and extraction process.
|
||||||
- Updated IPFS (Kubo) installation to use version v0.38.2.
|
- Updated IPFS (Kubo) installation to use version v0.38.2.
|
||||||
- Enhanced binary installation routines (RQLite, IPFS, Go) to ensure the installed binaries are immediately available in the current process's PATH.
|
- Enhanced binary installation routines (RQLite, IPFS, Go) to ensure the installed binaries are immediately available in the current process's PATH.
|
||||||
@ -699,17 +105,14 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Fixed potential installation failures for RQLite by adding error checking to the binary copy command.
|
- Fixed potential installation failures for RQLite by adding error checking to the binary copy command.
|
||||||
|
|
||||||
## [0.67.2] - 2025-11-11
|
## [0.67.2] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Added a new utility function to reliably resolve the full path of required external binaries (like ipfs, rqlited, etc.).
|
- Added a new utility function to reliably resolve the full path of required external binaries (like ipfs, rqlited, etc.).
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Improved service initialization by validating the availability and path of all required external binaries before creating systemd service units.
|
- Improved service initialization by validating the availability and path of all required external binaries before creating systemd service units.
|
||||||
- Updated systemd service generation logic to use the resolved, fully-qualified paths for external binaries instead of relying on hardcoded paths.
|
- Updated systemd service generation logic to use the resolved, fully-qualified paths for external binaries instead of relying on hardcoded paths.
|
||||||
|
|
||||||
@ -718,17 +121,13 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Changed IPFS initialization from a warning to a fatal error if the repo fails to initialize, ensuring setup stops on critical failures.
|
- Changed IPFS initialization from a warning to a fatal error if the repo fails to initialize, ensuring setup stops on critical failures.
|
||||||
|
|
||||||
## [0.67.1] - 2025-11-11
|
## [0.67.1] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Improved disk space check logic to correctly check the parent directory if the specified path does not exist.
|
- Improved disk space check logic to correctly check the parent directory if the specified path does not exist.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -736,18 +135,15 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Fixed an issue in the installation script where the extracted CLI binary might be named 'dbn' instead of 'network-cli', ensuring successful installation regardless of the extracted filename.
|
- Fixed an issue in the installation script where the extracted CLI binary might be named 'dbn' instead of 'network-cli', ensuring successful installation regardless of the extracted filename.
|
||||||
|
|
||||||
## [0.67.0] - 2025-11-11
|
## [0.67.0] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
|
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
|
||||||
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
|
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
|
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
|
||||||
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
|
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
|
||||||
|
|
||||||
@ -756,18 +152,15 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
|
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
|
||||||
|
|
||||||
## [0.67.0] - 2025-11-11
|
## [0.67.0] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
|
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
|
||||||
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
|
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
|
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
|
||||||
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
|
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
|
||||||
|
|
||||||
@ -776,17 +169,13 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
|
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
|
||||||
|
|
||||||
## [0.66.1] - 2025-11-11
|
## [0.66.1] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Allow bootstrap nodes to optionally define a join address to synchronize with another bootstrap cluster.
|
- Allow bootstrap nodes to optionally define a join address to synchronize with another bootstrap cluster.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -794,18 +183,14 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
## [0.66.0] - 2025-11-11
|
## [0.66.0] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Pre-installation checks for minimum system resources (10GB disk space, 2GB RAM, 2 CPU cores) are now performed during setup.
|
- Pre-installation checks for minimum system resources (10GB disk space, 2GB RAM, 2 CPU cores) are now performed during setup.
|
||||||
- All systemd services (IPFS, RQLite, Olric, Node, Gateway) now log directly to dedicated files in the logs directory instead of using the system journal.
|
- All systemd services (IPFS, RQLite, Olric, Node, Gateway) now log directly to dedicated files in the logs directory instead of using the system journal.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Improved logging instructions in the setup completion message to reference the new dedicated log files.
|
- Improved logging instructions in the setup completion message to reference the new dedicated log files.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -813,18 +198,14 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
## [0.65.0] - 2025-11-11
|
## [0.65.0] - 2025-11-11
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Expanded the local development environment (`dbn dev up`) from 3 nodes to 5 nodes (2 bootstraps and 3 regular nodes) for better testing of cluster resilience and quorum.
|
- Expanded the local development environment (`dbn dev up`) from 3 nodes to 5 nodes (2 bootstraps and 3 regular nodes) for better testing of cluster resilience and quorum.
|
||||||
- Added a new `bootstrap2` node configuration and service to the development topology.
|
- Added a new `bootstrap2` node configuration and service to the development topology.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Updated the `dbn dev up` command to configure and start all 5 nodes and associated services (IPFS, RQLite, IPFS Cluster).
|
- Updated the `dbn dev up` command to configure and start all 5 nodes and associated services (IPFS, RQLite, IPFS Cluster).
|
||||||
- Modified RQLite and LibP2P health checks in the development environment to require a quorum of 3 out of 5 nodes.
|
- Modified RQLite and LibP2P health checks in the development environment to require a quorum of 3 out of 5 nodes.
|
||||||
- Refactored development environment configuration logic using a new `Topology` structure for easier management of node ports and addresses.
|
- Refactored development environment configuration logic using a new `Topology` structure for easier management of node ports and addresses.
|
||||||
@ -834,17 +215,13 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Ensured that secondary bootstrap nodes can correctly join the primary RQLite cluster in the development environment.
|
- Ensured that secondary bootstrap nodes can correctly join the primary RQLite cluster in the development environment.
|
||||||
|
|
||||||
## [0.64.1] - 2025-11-10
|
## [0.64.1] - 2025-11-10
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Improved the accuracy of the Raft log index reporting by falling back to reading persisted snapshot metadata from disk if the running RQLite instance is not yet reachable or reports a zero index.
|
- Improved the accuracy of the Raft log index reporting by falling back to reading persisted snapshot metadata from disk if the running RQLite instance is not yet reachable or reports a zero index.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -852,20 +229,16 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
## [0.64.0] - 2025-11-10
|
## [0.64.0] - 2025-11-10
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Comprehensive End-to-End (E2E) test suite for Gateway API endpoints (Cache, RQLite, Storage, Network, Auth).
|
- Comprehensive End-to-End (E2E) test suite for Gateway API endpoints (Cache, RQLite, Storage, Network, Auth).
|
||||||
- New E2E tests for concurrent operations and TTL expiry in the distributed cache.
|
- New E2E tests for concurrent operations and TTL expiry in the distributed cache.
|
||||||
- New E2E tests for LibP2P peer connectivity and discovery.
|
- New E2E tests for LibP2P peer connectivity and discovery.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
- Improved Gateway E2E test configuration: automatically discovers Gateway URL and API Key from local `~/.debros` configuration files, removing the need for environment variables.
|
||||||
- Improved Gateway E2E test configuration: automatically discovers Gateway URL and API Key from local `~/.orama` configuration files, removing the need for environment variables.
|
|
||||||
- The `/v1/network/peers` endpoint now returns a flattened list of multiaddresses for all connected peers.
|
- The `/v1/network/peers` endpoint now returns a flattened list of multiaddresses for all connected peers.
|
||||||
- Improved robustness of Cache API handlers to correctly identify and return 404 (Not Found) errors when keys are missing, even when wrapped by underlying library errors.
|
- Improved robustness of Cache API handlers to correctly identify and return 404 (Not Found) errors when keys are missing, even when wrapped by underlying library errors.
|
||||||
- The RQLite transaction handler now supports the legacy `statements` array format in addition to the `ops` array format for easier use.
|
- The RQLite transaction handler now supports the legacy `statements` array format in addition to the `ops` array format for easier use.
|
||||||
@ -876,17 +249,13 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Corrected IPFS Add operation to return the actual file size (byte count) instead of the DAG size in the response.
|
- Corrected IPFS Add operation to return the actual file size (byte count) instead of the DAG size in the response.
|
||||||
|
|
||||||
## [0.63.3] - 2025-11-10
|
## [0.63.3] - 2025-11-10
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Improved RQLite cluster stability by automatically clearing stale Raft state on startup if peers have a higher log index, allowing the node to join cleanly.
|
- Improved RQLite cluster stability by automatically clearing stale Raft state on startup if peers have a higher log index, allowing the node to join cleanly.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -894,17 +263,12 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
## [0.63.2] - 2025-11-10
|
## [0.63.2] - 2025-11-10
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Improved process termination logic in development environments to ensure child processes are also killed.
|
- Improved process termination logic in development environments to ensure child processes are also killed.
|
||||||
- Enhanced the `dev-kill-all.sh` script to reliably kill all processes using development ports, including orphaned processes and their children.
|
- Enhanced the `dev-kill-all.sh` script to reliably kill all processes using development ports, including orphaned processes and their children.
|
||||||
|
|
||||||
@ -913,17 +277,12 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
## [0.63.1] - 2025-11-10
|
## [0.63.1] - 2025-11-10
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Increased the default minimum cluster size for database environments from 1 to 3.
|
- Increased the default minimum cluster size for database environments from 1 to 3.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -931,18 +290,15 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Prevented unnecessary cluster recovery attempts when a node starts up as the first node (fresh bootstrap).
|
- Prevented unnecessary cluster recovery attempts when a node starts up as the first node (fresh bootstrap).
|
||||||
|
|
||||||
## [0.63.0] - 2025-11-10
|
## [0.63.0] - 2025-11-10
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Added a new `kill` command to the Makefile for forcefully shutting down all development processes.
|
- Added a new `kill` command to the Makefile for forcefully shutting down all development processes.
|
||||||
- Introduced a new `stop` command in the Makefile for graceful shutdown of development processes.
|
- Introduced a new `stop` command in the Makefile for graceful shutdown of development processes.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- The `kill` command now performs a graceful shutdown attempt followed by a force kill of any lingering processes and verifies that development ports are free.
|
- The `kill` command now performs a graceful shutdown attempt followed by a force kill of any lingering processes and verifies that development ports are free.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
@ -950,17 +306,13 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
## [0.62.0] - 2025-11-10
|
## [0.62.0] - 2025-11-10
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- The `prod status` command now correctly checks for both 'bootstrap' and 'node' service variants.
|
- The `prod status` command now correctly checks for both 'bootstrap' and 'node' service variants.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- The production installation process now generates secrets (like the cluster secret and peer ID) before initializing services. This ensures all necessary secrets are available when services start.
|
- The production installation process now generates secrets (like the cluster secret and peer ID) before initializing services. This ensures all necessary secrets are available when services start.
|
||||||
- The `prod install` command now displays the actual Peer ID upon completion instead of a placeholder.
|
- The `prod install` command now displays the actual Peer ID upon completion instead of a placeholder.
|
||||||
|
|
||||||
@ -969,18 +321,15 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Fixed an issue where IPFS Cluster initialization was using a hardcoded configuration file instead of relying on the standard `ipfs-cluster-service init` process.
|
- Fixed an issue where IPFS Cluster initialization was using a hardcoded configuration file instead of relying on the standard `ipfs-cluster-service init` process.
|
||||||
|
|
||||||
## [0.61.0] - 2025-11-10
|
## [0.61.0] - 2025-11-10
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
||||||
- Introduced a new simplified authentication flow (`dbn auth login`) that allows users to generate an API key directly from a wallet address without signature verification (for development/testing purposes).
|
- Introduced a new simplified authentication flow (`dbn auth login`) that allows users to generate an API key directly from a wallet address without signature verification (for development/testing purposes).
|
||||||
- Added a new `PRODUCTION_INSTALL.md` guide for production deployment using the `dbn prod` command suite.
|
- Added a new `PRODUCTION_INSTALL.md` guide for production deployment using the `dbn prod` command suite.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Renamed the primary CLI binary from `network-cli` to `dbn` across all configurations, documentation, and source code.
|
- Renamed the primary CLI binary from `network-cli` to `dbn` across all configurations, documentation, and source code.
|
||||||
- Refactored the IPFS configuration logic in the development environment to directly modify the IPFS config file instead of relying on shell commands, improving stability.
|
- Refactored the IPFS configuration logic in the development environment to directly modify the IPFS config file instead of relying on shell commands, improving stability.
|
||||||
- Improved the IPFS Cluster peer count logic to correctly handle NDJSON streaming responses from the `/peers` endpoint.
|
- Improved the IPFS Cluster peer count logic to correctly handle NDJSON streaming responses from the `/peers` endpoint.
|
||||||
@ -991,7 +340,6 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Removed
|
### Removed
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
\n
|
\n
|
||||||
|
|
||||||
## [0.60.1] - 2025-11-09
|
## [0.60.1] - 2025-11-09
|
||||||
@ -1325,7 +673,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
- Interactive domain configuration during `dbn setup` command
|
- Interactive domain configuration during `dbn setup` command
|
||||||
- Automatic port availability checking for ports 80 and 443 before enabling HTTPS
|
- Automatic port availability checking for ports 80 and 443 before enabling HTTPS
|
||||||
- DNS resolution verification to ensure domain points to the server IP
|
- DNS resolution verification to ensure domain points to the server IP
|
||||||
- TLS certificate cache directory management (`~/.orama/tls-cache`)
|
- TLS certificate cache directory management (`~/.debros/tls-cache`)
|
||||||
- Gateway automatically serves HTTP (port 80) for ACME challenges and HTTPS (port 443) for traffic
|
- Gateway automatically serves HTTP (port 80) for ACME challenges and HTTPS (port 443) for traffic
|
||||||
- New gateway config fields: `enable_https`, `domain_name`, `tls_cache_dir`
|
- New gateway config fields: `enable_https`, `domain_name`, `tls_cache_dir`
|
||||||
- **Domain Validation**: Added domain name validation and DNS verification helpers in setup CLI
|
- **Domain Validation**: Added domain name validation and DNS verification helpers in setup CLI
|
||||||
@ -1395,8 +743,8 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
- Automatic GitHub Release creation with changelog and artifacts
|
- Automatic GitHub Release creation with changelog and artifacts
|
||||||
- Semantic versioning support with pre-release handling
|
- Semantic versioning support with pre-release handling
|
||||||
- **Environment Configuration**: Multi-environment switching system
|
- **Environment Configuration**: Multi-environment switching system
|
||||||
- Default environments: local (http://localhost:6001), devnet (https://devnet.orama.network), testnet (https://testnet.orama.network)
|
- Default environments: local (http://localhost:6001), devnet (https://devnet.debros.network), testnet (https://testnet.debros.network)
|
||||||
- Stored in `~/.orama/environments.json`
|
- Stored in `~/.debros/environments.json`
|
||||||
- CLI auto-uses active environment for authentication and operations
|
- CLI auto-uses active environment for authentication and operations
|
||||||
- **Comprehensive Documentation**
|
- **Comprehensive Documentation**
|
||||||
- `.cursor/RELEASES.md`: Overview and quick start
|
- `.cursor/RELEASES.md`: Overview and quick start
|
||||||
@ -1425,7 +773,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
- Explicit control over LibP2P listen addresses for better localhost/development support
|
- Explicit control over LibP2P listen addresses for better localhost/development support
|
||||||
- Production/development mode detection for NAT services (disabled for localhost, enabled for production)
|
- Production/development mode detection for NAT services (disabled for localhost, enabled for production)
|
||||||
- Process management with .dev/pids directory for background process tracking
|
- Process management with .dev/pids directory for background process tracking
|
||||||
- Centralized logging to ~/.orama/logs/ for all network services
|
- Centralized logging to ~/.debros/logs/ for all network services
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
@ -1475,7 +823,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Updated readme
|
- Updated readme
|
||||||
- Where we read .yaml files from and where data is saved to ~/.orama
|
- Where we read .yaml files from and where data is saved to ~/.debros
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
|
|
||||||
@ -1604,7 +952,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
|
|||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- replaced git.orama.io with github.com
|
- replaced git.debros.io with github.com
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
|
|
||||||
|
|||||||
@ -27,14 +27,14 @@ make deps
|
|||||||
Useful CLI commands:
|
Useful CLI commands:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./bin/orama health
|
./bin/dbn health
|
||||||
./bin/orama peers
|
./bin/dbn peers
|
||||||
./bin/orama status
|
./bin/dbn status
|
||||||
````
|
````
|
||||||
|
|
||||||
## Versioning
|
## Versioning
|
||||||
|
|
||||||
- The CLI reports its version via `orama version`.
|
- The CLI reports its version via `dbn version`.
|
||||||
- Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser.
|
- Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser.
|
||||||
|
|
||||||
## Pull Requests
|
## Pull Requests
|
||||||
|
|||||||
88
Makefile
88
Makefile
@ -6,12 +6,12 @@ test:
|
|||||||
go test -v $(TEST)
|
go test -v $(TEST)
|
||||||
|
|
||||||
# Gateway-focused E2E tests assume gateway and nodes are already running
|
# Gateway-focused E2E tests assume gateway and nodes are already running
|
||||||
# Auto-discovers configuration from ~/.orama and queries database for API key
|
# Auto-discovers configuration from ~/.debros and queries database for API key
|
||||||
# No environment variables required
|
# No environment variables required
|
||||||
.PHONY: test-e2e
|
.PHONY: test-e2e
|
||||||
test-e2e:
|
test-e2e:
|
||||||
@echo "Running comprehensive E2E tests..."
|
@echo "Running comprehensive E2E tests..."
|
||||||
@echo "Auto-discovering configuration from ~/.orama..."
|
@echo "Auto-discovering configuration from ~/.debros..."
|
||||||
go test -v -tags e2e ./e2e
|
go test -v -tags e2e ./e2e
|
||||||
|
|
||||||
# Network - Distributed P2P Database System
|
# Network - Distributed P2P Database System
|
||||||
@ -19,7 +19,7 @@ test-e2e:
|
|||||||
|
|
||||||
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
|
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
|
||||||
|
|
||||||
VERSION := 0.72.1
|
VERSION := 0.68.0
|
||||||
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
||||||
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||||
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
||||||
@ -29,11 +29,11 @@ build: deps
|
|||||||
@echo "Building network executables (version=$(VERSION))..."
|
@echo "Building network executables (version=$(VERSION))..."
|
||||||
@mkdir -p bin
|
@mkdir -p bin
|
||||||
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
|
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
|
||||||
go build -ldflags "$(LDFLAGS)" -o bin/orama-node ./cmd/node
|
go build -ldflags "$(LDFLAGS)" -o bin/node ./cmd/node
|
||||||
go build -ldflags "$(LDFLAGS)" -o bin/orama cmd/cli/main.go
|
go build -ldflags "$(LDFLAGS)" -o bin/dbn cmd/cli/main.go
|
||||||
# Inject gateway build metadata via pkg path variables
|
# Inject gateway build metadata via pkg path variables
|
||||||
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
|
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
|
||||||
@echo "Build complete! Run ./bin/orama version"
|
@echo "Build complete! Run ./bin/dbn version"
|
||||||
|
|
||||||
# Install git hooks
|
# Install git hooks
|
||||||
install-hooks:
|
install-hooks:
|
||||||
@ -49,49 +49,49 @@ clean:
|
|||||||
|
|
||||||
# Run bootstrap node (auto-selects identity and data dir)
|
# Run bootstrap node (auto-selects identity and data dir)
|
||||||
run-node:
|
run-node:
|
||||||
@echo "Starting node..."
|
@echo "Starting bootstrap node..."
|
||||||
@echo "Config: ~/.orama/node.yaml"
|
@echo "Config: ~/.debros/bootstrap.yaml"
|
||||||
go run ./cmd/orama-node --config node.yaml
|
@echo "Generate it with: dbn config init --type bootstrap"
|
||||||
|
go run ./cmd/node --config node.yaml
|
||||||
|
|
||||||
# Run second node - requires join address
|
# Run second node (regular) - requires join address of bootstrap node
|
||||||
|
# Usage: make run-node2 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002
|
||||||
run-node2:
|
run-node2:
|
||||||
@echo "Starting second node..."
|
@echo "Starting regular node (node.yaml)..."
|
||||||
@echo "Config: ~/.orama/node2.yaml"
|
@echo "Config: ~/.debros/node.yaml"
|
||||||
go run ./cmd/orama-node --config node2.yaml
|
@echo "Generate it with: dbn config init --type node --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
|
||||||
|
go run ./cmd/node --config node2.yaml
|
||||||
|
|
||||||
# Run third node - requires join address
|
# Run third node (regular) - requires join address of bootstrap node
|
||||||
|
# Usage: make run-node3 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003
|
||||||
run-node3:
|
run-node3:
|
||||||
@echo "Starting third node..."
|
@echo "Starting regular node (node2.yaml)..."
|
||||||
@echo "Config: ~/.orama/node3.yaml"
|
@echo "Config: ~/.debros/node2.yaml"
|
||||||
go run ./cmd/orama-node --config node3.yaml
|
@echo "Generate it with: dbn config init --type node --name node2.yaml --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
|
||||||
|
go run ./cmd/node --config node3.yaml
|
||||||
|
|
||||||
# Run gateway HTTP server
|
# Run gateway HTTP server
|
||||||
|
# Usage examples:
|
||||||
|
# make run-gateway # uses ~/.debros/gateway.yaml
|
||||||
|
# Config generated with: dbn config init --type gateway
|
||||||
run-gateway:
|
run-gateway:
|
||||||
@echo "Starting gateway HTTP server..."
|
@echo "Starting gateway HTTP server..."
|
||||||
@echo "Note: Config must be in ~/.orama/data/gateway.yaml"
|
@echo "Note: Config must be in ~/.debros/gateway.yaml"
|
||||||
go run ./cmd/orama-gateway
|
@echo "Generate it with: dbn config init --type gateway"
|
||||||
|
go run ./cmd/gateway
|
||||||
# Setup local domain names for development
|
|
||||||
setup-domains:
|
|
||||||
@echo "Setting up local domains..."
|
|
||||||
@sudo bash scripts/setup-local-domains.sh
|
|
||||||
|
|
||||||
# Development environment target
|
# Development environment target
|
||||||
# Uses orama dev up to start full stack with dependency and port checking
|
# Uses dbn dev up to start full stack with dependency and port checking
|
||||||
dev: build setup-domains
|
dev: build
|
||||||
@./bin/orama dev up
|
@./bin/dbn dev up
|
||||||
|
|
||||||
# Graceful shutdown of all dev services
|
# Kill all processes (graceful shutdown + force kill stray processes)
|
||||||
stop:
|
|
||||||
@if [ -f ./bin/orama ]; then \
|
|
||||||
./bin/orama dev down || true; \
|
|
||||||
fi
|
|
||||||
@bash scripts/dev-kill-all.sh
|
|
||||||
|
|
||||||
# Force kill all processes (immediate termination)
|
|
||||||
kill:
|
kill:
|
||||||
@bash scripts/dev-kill-all.sh
|
@bash scripts/dev-kill-all.sh
|
||||||
|
|
||||||
|
stop:
|
||||||
|
@./bin/dbn dev down
|
||||||
|
|
||||||
# Help
|
# Help
|
||||||
help:
|
help:
|
||||||
@echo "Available targets:"
|
@echo "Available targets:"
|
||||||
@ -102,17 +102,19 @@ help:
|
|||||||
@echo "Local Development (Recommended):"
|
@echo "Local Development (Recommended):"
|
||||||
@echo " make dev - Start full development stack with one command"
|
@echo " make dev - Start full development stack with one command"
|
||||||
@echo " - Checks dependencies and available ports"
|
@echo " - Checks dependencies and available ports"
|
||||||
@echo " - Generates configs and starts all services"
|
@echo " - Generates configs (2 bootstraps + 3 nodes + gateway)"
|
||||||
@echo " - Validates cluster health"
|
@echo " - Starts IPFS, RQLite, Olric, all nodes, and gateway"
|
||||||
@echo " make stop - Gracefully stop all development services"
|
@echo " - Validates cluster health (IPFS peers, RQLite, LibP2P)"
|
||||||
@echo " make kill - Force kill all development services (use if stop fails)"
|
@echo " - Stops all services if health checks fail"
|
||||||
|
@echo " - Includes comprehensive logging"
|
||||||
|
@echo " make kill - Stop all development services"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Development Management (via orama):"
|
@echo "Development Management (via dbn):"
|
||||||
@echo " ./bin/orama dev status - Show status of all dev services"
|
@echo " ./bin/dbn dev status - Show status of all dev services"
|
||||||
@echo " ./bin/orama dev logs <component> [--follow]"
|
@echo " ./bin/dbn dev logs <component> [--follow]"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Individual Node Targets (advanced):"
|
@echo "Individual Node Targets (advanced):"
|
||||||
@echo " run-node - Start first node directly"
|
@echo " run-node - Start bootstrap node directly"
|
||||||
@echo " run-node2 - Start second node directly"
|
@echo " run-node2 - Start second node directly"
|
||||||
@echo " run-node3 - Start third node directly"
|
@echo " run-node3 - Start third node directly"
|
||||||
@echo " run-gateway - Start HTTP gateway directly"
|
@echo " run-gateway - Start HTTP gateway directly"
|
||||||
|
|||||||
158
PRODUCTION_INSTALL.md
Normal file
158
PRODUCTION_INSTALL.md
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
# Production Installation Guide - DeBros Network
|
||||||
|
|
||||||
|
This guide covers production deployment of the DeBros Network using the `dbn prod` command suite.
|
||||||
|
|
||||||
|
## System Requirements
|
||||||
|
|
||||||
|
- **OS**: Ubuntu 20.04 LTS or later, Debian 11+, or other Linux distributions
|
||||||
|
- **Architecture**: x86_64 (amd64) or ARM64 (aarch64)
|
||||||
|
- **RAM**: Minimum 4GB, recommended 8GB+
|
||||||
|
- **Storage**: Minimum 50GB SSD recommended
|
||||||
|
- **Ports**:
|
||||||
|
- 4001 (P2P networking)
|
||||||
|
- 4501 (IPFS HTTP API - bootstrap), 4502/4503 (node2/node3)
|
||||||
|
- 5001-5003 (RQLite HTTP - one per node)
|
||||||
|
- 6001 (Gateway)
|
||||||
|
- 7001-7003 (RQLite Raft - one per node)
|
||||||
|
- 9094 (IPFS Cluster API - bootstrap), 9104/9114 (node2/node3)
|
||||||
|
- 3320/3322 (Olric)
|
||||||
|
- 80, 443 (for HTTPS with Let's Encrypt)
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
1. **Root access required**: All production operations require sudo/root privileges
|
||||||
|
2. **Supported distros**: Ubuntu, Debian, Fedora (via package manager)
|
||||||
|
3. **Basic tools**: `curl`, `git`, `make`, `build-essential`, `wget`
|
||||||
|
|
||||||
|
### Single-Node Bootstrap Installation
|
||||||
|
|
||||||
|
Deploy the first node (bootstrap node) on a VPS:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo dbn prod install --bootstrap
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
|
||||||
|
1. Check system prerequisites (OS, arch, root privileges, basic tools)
|
||||||
|
2. Provision the `debros` system user and filesystem structure at `~/.debros`
|
||||||
|
3. Download and install all required binaries (Go, RQLite, IPFS, IPFS Cluster, Olric, DeBros)
|
||||||
|
4. Generate secrets (cluster secret, swarm key, node identity)
|
||||||
|
5. Initialize repositories (IPFS, IPFS Cluster, RQLite)
|
||||||
|
6. Generate configurations for bootstrap node
|
||||||
|
7. Create and start systemd services
|
||||||
|
|
||||||
|
All files will be under `/home/debros/.debros`:
|
||||||
|
|
||||||
|
```
|
||||||
|
~/.debros/
|
||||||
|
├── bin/ # Compiled binaries
|
||||||
|
├── configs/ # YAML configurations
|
||||||
|
├── data/
|
||||||
|
│ ├── ipfs/ # IPFS repository
|
||||||
|
│ ├── ipfs-cluster/ # IPFS Cluster state
|
||||||
|
│ └── rqlite/ # RQLite database
|
||||||
|
├── logs/ # Service logs
|
||||||
|
└── secrets/ # Keys and certificates
|
||||||
|
```
|
||||||
|
|
||||||
|
## Service Management
|
||||||
|
|
||||||
|
### Check Service Status
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo systemctl status debros-node-bootstrap
|
||||||
|
sudo systemctl status debros-gateway
|
||||||
|
sudo systemctl status debros-rqlite-bootstrap
|
||||||
|
```
|
||||||
|
|
||||||
|
### View Service Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Bootstrap node logs
|
||||||
|
sudo journalctl -u debros-node-bootstrap -f
|
||||||
|
|
||||||
|
# Gateway logs
|
||||||
|
sudo journalctl -u debros-gateway -f
|
||||||
|
|
||||||
|
# All services
|
||||||
|
sudo journalctl -u "debros-*" -f
|
||||||
|
```
|
||||||
|
|
||||||
|
## Health Checks
|
||||||
|
|
||||||
|
After installation, verify services are running:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check IPFS
|
||||||
|
curl http://localhost:4501/api/v0/id
|
||||||
|
|
||||||
|
# Check RQLite cluster
|
||||||
|
curl http://localhost:5001/status
|
||||||
|
|
||||||
|
# Check Gateway
|
||||||
|
curl http://localhost:6001/health
|
||||||
|
|
||||||
|
# Check Olric
|
||||||
|
curl http://localhost:3320/ping
|
||||||
|
```
|
||||||
|
|
||||||
|
## Port Reference
|
||||||
|
|
||||||
|
### Development Environment (via `make dev`)
|
||||||
|
|
||||||
|
- IPFS API: 4501 (bootstrap), 4502 (node2), 4503 (node3)
|
||||||
|
- RQLite HTTP: 5001, 5002, 5003
|
||||||
|
- RQLite Raft: 7001, 7002, 7003
|
||||||
|
- IPFS Cluster: 9094, 9104, 9114
|
||||||
|
- P2P: 4001, 4002, 4003
|
||||||
|
- Gateway: 6001
|
||||||
|
- Olric: 3320, 3322
|
||||||
|
|
||||||
|
### Production Environment (via `sudo dbn prod install`)
|
||||||
|
|
||||||
|
- Same port assignments as development for consistency
|
||||||
|
|
||||||
|
## Configuration Files
|
||||||
|
|
||||||
|
Key configuration files are located in `~/.debros/configs/`:
|
||||||
|
|
||||||
|
- **bootstrap.yaml**: Bootstrap node configuration
|
||||||
|
- **node.yaml**: Regular node configuration
|
||||||
|
- **gateway.yaml**: HTTP gateway configuration
|
||||||
|
- **olric.yaml**: In-memory cache configuration
|
||||||
|
|
||||||
|
Edit these files directly for advanced configuration, then restart services:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo systemctl restart debros-node-bootstrap
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Port already in use
|
||||||
|
|
||||||
|
Check which process is using the port:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo lsof -i :4501
|
||||||
|
sudo lsof -i :5001
|
||||||
|
sudo lsof -i :7001
|
||||||
|
```
|
||||||
|
|
||||||
|
Kill conflicting processes or change ports in config.
|
||||||
|
|
||||||
|
### RQLite cluster not forming
|
||||||
|
|
||||||
|
Ensure:
|
||||||
|
|
||||||
|
1. Bootstrap node is running: `systemctl status debros-rqlite-bootstrap`
|
||||||
|
2. Network connectivity between nodes on ports 5001+ (HTTP) and 7001+ (Raft)
|
||||||
|
3. Check logs: `journalctl -u debros-rqlite-bootstrap -f`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Last Updated**: November 2024
|
||||||
|
**Compatible with**: Network v1.0.0+
|
||||||
610
README.md
610
README.md
@ -1,273 +1,437 @@
|
|||||||
# Orama Network - Distributed P2P Database System
|
# DeBros Network - Distributed P2P Database System
|
||||||
|
|
||||||
A decentralized peer-to-peer data platform built in Go. Combines distributed SQL (RQLite), pub/sub messaging, and resilient peer discovery so applications can share state without central infrastructure.
|
DeBros Network is a decentralized peer-to-peer data platform built in Go. It combines distributed SQL (RQLite), pub/sub messaging, and resilient peer discovery so applications can share state without central infrastructure.
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- [At a Glance](#at-a-glance)
|
||||||
|
- [Quick Start](#quick-start)
|
||||||
|
- [Production Deployment](#production-deployment)
|
||||||
|
- [Components & Ports](#components--ports)
|
||||||
|
- [Configuration Cheatsheet](#configuration-cheatsheet)
|
||||||
|
- [CLI Highlights](#cli-highlights)
|
||||||
|
- [HTTP Gateway](#http-gateway)
|
||||||
|
- [Troubleshooting](#troubleshooting)
|
||||||
|
- [Resources](#resources)
|
||||||
|
|
||||||
|
## At a Glance
|
||||||
|
|
||||||
|
- Distributed SQL backed by RQLite and Raft consensus
|
||||||
|
- Topic-based pub/sub with automatic cleanup
|
||||||
|
- Namespace isolation for multi-tenant apps
|
||||||
|
- Secure transport using libp2p plus Noise/TLS
|
||||||
|
- Lightweight Go client and CLI tooling
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
### Local Development
|
1. Clone and build the project:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Build the project
|
git clone https://github.com/DeBrosOfficial/network.git
|
||||||
make build
|
cd network
|
||||||
|
|
||||||
# Start 5-node development cluster
|
|
||||||
make dev
|
|
||||||
```
|
|
||||||
|
|
||||||
The cluster automatically performs health checks before declaring success.
|
|
||||||
|
|
||||||
### Stop Development Environment
|
|
||||||
|
|
||||||
```bash
|
|
||||||
make stop
|
|
||||||
```
|
|
||||||
|
|
||||||
## Testing Services
|
|
||||||
|
|
||||||
After running `make dev`, test service health using these curl requests:
|
|
||||||
|
|
||||||
> **Note:** Local domains (node-1.local, etc.) require running `sudo make setup-domains` first. Alternatively, use `localhost` with port numbers.
|
|
||||||
|
|
||||||
### Node Unified Gateways
|
|
||||||
|
|
||||||
Each node is accessible via a single unified gateway port:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Node-1 (port 6001)
|
|
||||||
curl http://node-1.local:6001/health
|
|
||||||
|
|
||||||
# Node-2 (port 6002)
|
|
||||||
curl http://node-2.local:6002/health
|
|
||||||
|
|
||||||
# Node-3 (port 6003)
|
|
||||||
curl http://node-3.local:6003/health
|
|
||||||
|
|
||||||
# Node-4 (port 6004)
|
|
||||||
curl http://node-4.local:6004/health
|
|
||||||
|
|
||||||
# Node-5 (port 6005)
|
|
||||||
curl http://node-5.local:6005/health
|
|
||||||
```
|
|
||||||
|
|
||||||
## Network Architecture
|
|
||||||
|
|
||||||
### Unified Gateway Ports
|
|
||||||
|
|
||||||
```
|
|
||||||
Node-1: localhost:6001 → /rqlite/http, /rqlite/raft, /cluster, /ipfs/api
|
|
||||||
Node-2: localhost:6002 → Same routes
|
|
||||||
Node-3: localhost:6003 → Same routes
|
|
||||||
Node-4: localhost:6004 → Same routes
|
|
||||||
Node-5: localhost:6005 → Same routes
|
|
||||||
```
|
|
||||||
|
|
||||||
### Direct Service Ports (for debugging)
|
|
||||||
|
|
||||||
```
|
|
||||||
RQLite HTTP: 5001, 5002, 5003, 5004, 5005 (one per node)
|
|
||||||
RQLite Raft: 7001, 7002, 7003, 7004, 7005
|
|
||||||
IPFS API: 4501, 4502, 4503, 4504, 4505
|
|
||||||
IPFS Swarm: 4101, 4102, 4103, 4104, 4105
|
|
||||||
Cluster API: 9094, 9104, 9114, 9124, 9134
|
|
||||||
Internal Gateway: 6000
|
|
||||||
Olric Cache: 3320
|
|
||||||
Anon SOCKS: 9050
|
|
||||||
```
|
|
||||||
|
|
||||||
## Development Commands
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start full cluster (5 nodes + gateway)
|
|
||||||
make dev
|
|
||||||
|
|
||||||
# Check service status
|
|
||||||
orama dev status
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
orama dev logs node-1 # Node-1 logs
|
|
||||||
orama dev logs node-1 --follow # Follow logs in real-time
|
|
||||||
orama dev logs gateway --follow # Gateway logs
|
|
||||||
|
|
||||||
# Stop all services
|
|
||||||
orama stop
|
|
||||||
|
|
||||||
# Build binaries
|
|
||||||
make build
|
make build
|
||||||
```
|
```
|
||||||
|
|
||||||
## CLI Commands
|
2. Generate local configuration (bootstrap, node2, node3, gateway):
|
||||||
|
|
||||||
### Network Status
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./bin/orama health # Cluster health check
|
./bin/dbn config init
|
||||||
./bin/orama peers # List connected peers
|
|
||||||
./bin/orama status # Network status
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Database Operations
|
3. Launch the full development stack:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./bin/orama query "SELECT * FROM users"
|
make dev
|
||||||
./bin/orama query "CREATE TABLE users (id INTEGER PRIMARY KEY)"
|
|
||||||
./bin/orama transaction --file ops.json
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Pub/Sub
|
This starts three nodes and the HTTP gateway. **The command will not complete successfully until all services pass health checks** (IPFS peer connectivity, RQLite cluster formation, and LibP2P connectivity). If health checks fail, all services are stopped automatically. Stop with `Ctrl+C`.
|
||||||
|
|
||||||
|
4. Validate the network from another terminal:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./bin/orama pubsub publish <topic> <message>
|
./bin/dbn health
|
||||||
./bin/orama pubsub subscribe <topic> 30s
|
./bin/dbn peers
|
||||||
./bin/orama pubsub topics
|
./bin/dbn pubsub publish notifications "Hello World"
|
||||||
```
|
./bin/dbn pubsub subscribe notifications 10s
|
||||||
|
|
||||||
### Authentication
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./bin/orama auth login
|
|
||||||
./bin/orama auth status
|
|
||||||
./bin/orama auth logout
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Production Deployment
|
## Production Deployment
|
||||||
|
|
||||||
|
DeBros Network can be deployed as production systemd services on Linux servers. The production installer handles all dependencies, configuration, and service management automatically.
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
- Ubuntu 22.04+ or Debian 12+
|
- **OS**: Ubuntu 20.04+, Debian 11+, or compatible Linux distribution
|
||||||
- `amd64` or `arm64` architecture
|
- **Architecture**: `amd64` (x86_64) or `arm64` (aarch64)
|
||||||
- 4GB RAM, 50GB SSD, 2 CPU cores
|
- **Permissions**: Root access (use `sudo`)
|
||||||
|
- **Resources**: Minimum 2GB RAM, 10GB disk space, 2 CPU cores
|
||||||
### Required Ports
|
|
||||||
|
|
||||||
**External (must be open in firewall):**
|
|
||||||
|
|
||||||
- **80** - HTTP (ACME/Let's Encrypt certificate challenges)
|
|
||||||
- **443** - HTTPS (Main gateway API endpoint)
|
|
||||||
- **4101** - IPFS Swarm (peer connections)
|
|
||||||
- **7001** - RQLite Raft (cluster consensus)
|
|
||||||
|
|
||||||
**Internal (bound to localhost, no firewall needed):**
|
|
||||||
|
|
||||||
- 4501 - IPFS API
|
|
||||||
- 5001 - RQLite HTTP API
|
|
||||||
- 6001 - Unified Gateway
|
|
||||||
- 8080 - IPFS Gateway
|
|
||||||
- 9050 - Anyone Client SOCKS5 proxy
|
|
||||||
- 9094 - IPFS Cluster API
|
|
||||||
- 3320/3322 - Olric Cache
|
|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
|
#### Quick Install
|
||||||
|
|
||||||
|
Install the CLI tool first:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install via APT
|
curl -fsSL https://install.debros.network | sudo bash
|
||||||
echo "deb https://debrosficial.github.io/network/apt stable main" | sudo tee /etc/apt/sources.list.d/debros.list
|
|
||||||
|
|
||||||
sudo apt update && sudo apt install orama
|
|
||||||
|
|
||||||
sudo orama install --interactive
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Service Management
|
Or download manually from [GitHub Releases](https://github.com/DeBrosOfficial/network/releases).
|
||||||
|
|
||||||
|
#### Bootstrap Node (First Node)
|
||||||
|
|
||||||
|
Install the first node in your cluster:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Status
|
# Main branch (stable releases)
|
||||||
orama status
|
sudo dbn prod install --bootstrap
|
||||||
|
|
||||||
# Control services
|
# Nightly branch (latest development)
|
||||||
sudo orama start
|
sudo dbn prod install --bootstrap --branch nightly
|
||||||
sudo orama stop
|
```
|
||||||
sudo orama restart
|
|
||||||
|
|
||||||
# View logs
|
The bootstrap node initializes the cluster and serves as the primary peer for other nodes to join.
|
||||||
orama logs node --follow
|
|
||||||
orama logs gateway --follow
|
#### Secondary Node (Join Existing Cluster)
|
||||||
orama logs ipfs --follow
|
|
||||||
|
Join an existing cluster by providing the bootstrap node's IP and peer multiaddr:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo dbn prod install \
|
||||||
|
--vps-ip <your_public_ip> \
|
||||||
|
--peers /ip4/<bootstrap_ip>/tcp/4001/p2p/<peer_id> \
|
||||||
|
--branch nightly
|
||||||
|
```
|
||||||
|
|
||||||
|
**Required flags for secondary nodes:**
|
||||||
|
|
||||||
|
- `--vps-ip`: Your server's public IP address
|
||||||
|
- `--peers`: Comma-separated list of bootstrap peer multiaddrs
|
||||||
|
|
||||||
|
**Optional flags:**
|
||||||
|
|
||||||
|
- `--branch`: Git branch to use (`main` or `nightly`, default: `main`)
|
||||||
|
- `--domain`: Domain name for HTTPS (enables ACME/Let's Encrypt)
|
||||||
|
- `--bootstrap-join`: Raft join address for secondary bootstrap nodes
|
||||||
|
|
||||||
|
#### Secondary Bootstrap Node
|
||||||
|
|
||||||
|
Create a secondary bootstrap node that joins an existing Raft cluster:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo dbn prod install \
|
||||||
|
--bootstrap \
|
||||||
|
--vps-ip <your_public_ip> \
|
||||||
|
--bootstrap-join <primary_bootstrap_ip>:7001 \
|
||||||
|
--branch nightly
|
||||||
|
```
|
||||||
|
|
||||||
|
### Branch Selection
|
||||||
|
|
||||||
|
DeBros Network supports two branches:
|
||||||
|
|
||||||
|
- **`main`**: Stable releases (default). Recommended for production.
|
||||||
|
- **`nightly`**: Latest development builds. Use for testing new features.
|
||||||
|
|
||||||
|
**Branch preference is saved automatically** during installation. Future upgrades will use the same branch unless you override it with `--branch`.
|
||||||
|
|
||||||
|
**Examples:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install with nightly branch
|
||||||
|
sudo dbn prod install --bootstrap --branch nightly
|
||||||
|
|
||||||
|
# Upgrade using saved branch preference
|
||||||
|
sudo dbn prod upgrade --restart
|
||||||
|
|
||||||
|
# Upgrade and switch to main branch
|
||||||
|
sudo dbn prod upgrade --restart --branch main
|
||||||
```
|
```
|
||||||
|
|
||||||
### Upgrade
|
### Upgrade
|
||||||
|
|
||||||
|
Upgrade an existing installation to the latest version:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Upgrade to latest version
|
# Upgrade using saved branch preference
|
||||||
sudo orama upgrade --interactive
|
sudo dbn prod upgrade --restart
|
||||||
|
|
||||||
|
# Upgrade and switch branches
|
||||||
|
sudo dbn prod upgrade --restart --branch nightly
|
||||||
|
|
||||||
|
# Upgrade without restarting services
|
||||||
|
sudo dbn prod upgrade
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuration
|
The upgrade process:
|
||||||
|
|
||||||
All configuration lives in `~/.orama/`:
|
1. ✅ Checks prerequisites
|
||||||
|
2. ✅ Updates binaries (fetches latest from selected branch)
|
||||||
|
3. ✅ Preserves existing configurations and data
|
||||||
|
4. ✅ Updates configurations to latest format
|
||||||
|
5. ✅ Updates systemd service files
|
||||||
|
6. ✅ Optionally restarts services (`--restart` flag)
|
||||||
|
|
||||||
- `configs/node.yaml` - Node configuration
|
**Note**: The upgrade automatically detects your node type (bootstrap vs. regular node) and preserves all secrets, data, and configurations.
|
||||||
- `configs/gateway.yaml` - Gateway configuration
|
|
||||||
- `configs/olric.yaml` - Cache configuration
|
### Service Management
|
||||||
- `secrets/` - Keys and certificates
|
|
||||||
- `data/` - Service data directories
|
All services run as systemd units under the `debros` user.
|
||||||
|
|
||||||
|
#### Check Status
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# View status of all services
|
||||||
|
dbn prod status
|
||||||
|
|
||||||
|
# Or use systemctl directly
|
||||||
|
systemctl status debros-node-bootstrap
|
||||||
|
systemctl status debros-ipfs-bootstrap
|
||||||
|
systemctl status debros-gateway
|
||||||
|
```
|
||||||
|
|
||||||
|
#### View Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# View recent logs
|
||||||
|
dbn prod logs node
|
||||||
|
|
||||||
|
# Follow logs in real-time
|
||||||
|
dbn prod logs node --follow
|
||||||
|
|
||||||
|
# View specific service logs
|
||||||
|
dbn prod logs ipfs --follow
|
||||||
|
dbn prod logs gateway --follow
|
||||||
|
```
|
||||||
|
|
||||||
|
Available log targets: `node`, `ipfs`, `ipfs-cluster`, `rqlite`, `olric`, `gateway`
|
||||||
|
|
||||||
|
#### Service Control Commands
|
||||||
|
|
||||||
|
Use `dbn prod` commands for convenient service management:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start all services
|
||||||
|
sudo dbn prod start
|
||||||
|
|
||||||
|
# Stop all services
|
||||||
|
sudo dbn prod stop
|
||||||
|
|
||||||
|
# Restart all services
|
||||||
|
sudo dbn prod restart
|
||||||
|
```
|
||||||
|
|
||||||
|
Or use `systemctl` directly for more control:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Restart all services
|
||||||
|
sudo systemctl restart debros-*
|
||||||
|
|
||||||
|
# Restart specific service
|
||||||
|
sudo systemctl restart debros-node-bootstrap
|
||||||
|
|
||||||
|
# Stop services
|
||||||
|
sudo systemctl stop debros-*
|
||||||
|
|
||||||
|
# Start services
|
||||||
|
sudo systemctl start debros-*
|
||||||
|
|
||||||
|
# Enable services (start on boot)
|
||||||
|
sudo systemctl enable debros-*
|
||||||
|
```
|
||||||
|
|
||||||
|
### Directory Structure
|
||||||
|
|
||||||
|
Production installations use `/home/debros/.debros/`:
|
||||||
|
|
||||||
|
```
|
||||||
|
/home/debros/.debros/
|
||||||
|
├── configs/ # Configuration files
|
||||||
|
│ ├── bootstrap.yaml # Bootstrap node config
|
||||||
|
│ ├── node.yaml # Regular node config
|
||||||
|
│ ├── gateway.yaml # Gateway config
|
||||||
|
│ └── olric/ # Olric cache config
|
||||||
|
├── data/ # Runtime data
|
||||||
|
│ ├── bootstrap/ # Bootstrap node data
|
||||||
|
│ │ ├── ipfs/ # IPFS repository
|
||||||
|
│ │ ├── ipfs-cluster/ # IPFS Cluster data
|
||||||
|
│ │ └── rqlite/ # RQLite database
|
||||||
|
│ └── node/ # Regular node data
|
||||||
|
├── secrets/ # Secrets and keys
|
||||||
|
│ ├── cluster-secret # IPFS Cluster secret
|
||||||
|
│ └── swarm.key # IPFS swarm key
|
||||||
|
├── logs/ # Service logs
|
||||||
|
│ ├── node-bootstrap.log
|
||||||
|
│ ├── ipfs-bootstrap.log
|
||||||
|
│ └── gateway.log
|
||||||
|
└── .branch # Saved branch preference
|
||||||
|
```
|
||||||
|
|
||||||
|
### Uninstall
|
||||||
|
|
||||||
|
Remove all production services (preserves data and configs):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo dbn prod uninstall
|
||||||
|
```
|
||||||
|
|
||||||
|
This stops and removes all systemd services but keeps `/home/debros/.debros/` intact. To completely remove:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo dbn prod uninstall
|
||||||
|
sudo rm -rf /home/debros/.debros
|
||||||
|
```
|
||||||
|
|
||||||
|
### Production Troubleshooting
|
||||||
|
|
||||||
|
#### Services Not Starting
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check service status
|
||||||
|
systemctl status debros-node-bootstrap
|
||||||
|
|
||||||
|
# View detailed logs
|
||||||
|
journalctl -u debros-node-bootstrap -n 100
|
||||||
|
|
||||||
|
# Check log files
|
||||||
|
tail -f /home/debros/.debros/logs/node-bootstrap.log
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Configuration Issues
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Verify configs exist
|
||||||
|
ls -la /home/debros/.debros/configs/
|
||||||
|
|
||||||
|
# Regenerate configs (preserves secrets)
|
||||||
|
sudo dbn prod upgrade --restart
|
||||||
|
```
|
||||||
|
|
||||||
|
#### IPFS AutoConf Errors
|
||||||
|
|
||||||
|
If you see "AutoConf.Enabled=false but 'auto' placeholder is used" errors, the upgrade process should fix this automatically. If not:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Re-run upgrade to fix IPFS config
|
||||||
|
sudo dbn prod upgrade --restart
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Port Conflicts
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check what's using ports
|
||||||
|
sudo lsof -i :4001 # P2P port
|
||||||
|
sudo lsof -i :5001 # RQLite HTTP
|
||||||
|
sudo lsof -i :6001 # Gateway
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Reset Installation
|
||||||
|
|
||||||
|
To start fresh (⚠️ **destroys all data**):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo dbn prod uninstall
|
||||||
|
sudo rm -rf /home/debros/.debros
|
||||||
|
sudo dbn prod install --bootstrap --branch nightly
|
||||||
|
```
|
||||||
|
|
||||||
|
## Components & Ports
|
||||||
|
|
||||||
|
- **Bootstrap node**: P2P `4001`, RQLite HTTP `5001`, Raft `7001`
|
||||||
|
- **Additional nodes** (`node2`, `node3`): Incrementing ports (`400{2,3}`, `500{2,3}`, `700{2,3}`)
|
||||||
|
- **Gateway**: HTTP `6001` exposes REST/WebSocket APIs
|
||||||
|
- **Data directory**: `~/.debros/` stores configs, identities, and RQLite data
|
||||||
|
|
||||||
|
Use `make dev` for the complete stack or run binaries individually with `go run ./cmd/node --config <file>` and `go run ./cmd/gateway --config gateway.yaml`.
|
||||||
|
|
||||||
|
## Configuration Cheatsheet
|
||||||
|
|
||||||
|
All runtime configuration lives in `~/.debros/`.
|
||||||
|
|
||||||
|
- `bootstrap.yaml`: `type: bootstrap`, optionally set `database.rqlite_join_address` to join another bootstrap's cluster
|
||||||
|
- `node*.yaml`: `type: node`, set `database.rqlite_join_address` (e.g. `localhost:7001`) and include the bootstrap `discovery.bootstrap_peers`
|
||||||
|
- `gateway.yaml`: configure `gateway.bootstrap_peers`, `gateway.namespace`, and optional auth flags
|
||||||
|
|
||||||
|
Validation reminders:
|
||||||
|
|
||||||
|
- HTTP and Raft ports must differ
|
||||||
|
- Non-bootstrap nodes require a join address and bootstrap peers
|
||||||
|
- Bootstrap nodes can optionally define a join address to synchronize with another bootstrap
|
||||||
|
- Multiaddrs must end with `/p2p/<peerID>`
|
||||||
|
|
||||||
|
Regenerate configs any time with `./bin/dbn config init --force`.
|
||||||
|
|
||||||
|
## CLI Highlights
|
||||||
|
|
||||||
|
All commands accept `--format json`, `--timeout <duration>`, and `--bootstrap <multiaddr>`.
|
||||||
|
|
||||||
|
- **Auth**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./bin/dbn auth login
|
||||||
|
./bin/dbn auth status
|
||||||
|
./bin/dbn auth logout
|
||||||
|
```
|
||||||
|
|
||||||
|
- **Network**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./bin/dbn health
|
||||||
|
./bin/dbn status
|
||||||
|
./bin/dbn peers
|
||||||
|
```
|
||||||
|
|
||||||
|
- **Database**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./bin/dbn query "SELECT * FROM users"
|
||||||
|
./bin/dbn query "CREATE TABLE users (id INTEGER PRIMARY KEY)"
|
||||||
|
./bin/dbn transaction --file ops.json
|
||||||
|
```
|
||||||
|
|
||||||
|
- **Pub/Sub**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./bin/dbn pubsub publish <topic> <message>
|
||||||
|
./bin/dbn pubsub subscribe <topic> 30s
|
||||||
|
./bin/dbn pubsub topics
|
||||||
|
```
|
||||||
|
|
||||||
|
Credentials live at `~/.debros/credentials.json` with user-only permissions.
|
||||||
|
|
||||||
|
## HTTP Gateway
|
||||||
|
|
||||||
|
Start locally with `make run-gateway` or `go run ./cmd/gateway --config gateway.yaml`.
|
||||||
|
|
||||||
|
Environment overrides:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export GATEWAY_ADDR="0.0.0.0:6001"
|
||||||
|
export GATEWAY_NAMESPACE="my-app"
|
||||||
|
export GATEWAY_BOOTSTRAP_PEERS="/ip4/localhost/tcp/4001/p2p/<peerID>"
|
||||||
|
export GATEWAY_REQUIRE_AUTH=true
|
||||||
|
export GATEWAY_API_KEYS="key1:namespace1,key2:namespace2"
|
||||||
|
```
|
||||||
|
|
||||||
|
Common endpoints (see `openapi/gateway.yaml` for the full spec):
|
||||||
|
|
||||||
|
- `GET /health`, `GET /v1/status`, `GET /v1/version`
|
||||||
|
- `POST /v1/auth/challenge`, `POST /v1/auth/verify`, `POST /v1/auth/refresh`
|
||||||
|
- `POST /v1/rqlite/exec`, `POST /v1/rqlite/find`, `POST /v1/rqlite/select`, `POST /v1/rqlite/transaction`
|
||||||
|
- `GET /v1/rqlite/schema`
|
||||||
|
- `POST /v1/pubsub/publish`, `GET /v1/pubsub/topics`, `GET /v1/pubsub/ws?topic=<topic>`
|
||||||
|
- `POST /v1/storage/upload`, `POST /v1/storage/pin`, `GET /v1/storage/status/:cid`, `GET /v1/storage/get/:cid`, `DELETE /v1/storage/unpin/:cid`
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### Services Not Starting
|
- **Config directory errors**: Ensure `~/.debros/` exists, is writable, and has free disk space (`touch ~/.debros/test && rm ~/.debros/test`).
|
||||||
|
- **Port conflicts**: Inspect with `lsof -i :4001` (or other ports) and stop conflicting processes or regenerate configs with new ports.
|
||||||
```bash
|
- **Missing configs**: Run `./bin/dbn config init` before starting nodes.
|
||||||
# Check status
|
- **Cluster join issues**: Confirm the bootstrap node is running, `peer.info` multiaddr matches `bootstrap_peers`, and firewall rules allow the P2P ports.
|
||||||
systemctl status debros-node
|
|
||||||
|
|
||||||
# View logs
|
|
||||||
journalctl -u debros-node -f
|
|
||||||
|
|
||||||
# Check log files
|
|
||||||
tail -f /home/debros/.orama/logs/node.log
|
|
||||||
```
|
|
||||||
|
|
||||||
### Port Conflicts
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check what's using specific ports
|
|
||||||
sudo lsof -i :443 # HTTPS Gateway
|
|
||||||
sudo lsof -i :7001 # TCP/SNI Gateway
|
|
||||||
sudo lsof -i :6001 # Internal Gateway
|
|
||||||
```
|
|
||||||
|
|
||||||
### RQLite Cluster Issues
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Connect to RQLite CLI
|
|
||||||
rqlite -H localhost -p 5001
|
|
||||||
|
|
||||||
# Check cluster status
|
|
||||||
.nodes
|
|
||||||
.status
|
|
||||||
.ready
|
|
||||||
|
|
||||||
# Check consistency level
|
|
||||||
.consistency
|
|
||||||
```
|
|
||||||
|
|
||||||
### Reset Installation
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Production reset (⚠️ DESTROYS DATA)
|
|
||||||
sudo orama uninstall
|
|
||||||
sudo rm -rf /home/debros/.orama
|
|
||||||
sudo orama install
|
|
||||||
```
|
|
||||||
|
|
||||||
## HTTP Gateway API
|
|
||||||
|
|
||||||
### Main Gateway Endpoints
|
|
||||||
|
|
||||||
- `GET /health` - Health status
|
|
||||||
- `GET /v1/status` - Full status
|
|
||||||
- `GET /v1/version` - Version info
|
|
||||||
- `POST /v1/rqlite/exec` - Execute SQL
|
|
||||||
- `POST /v1/rqlite/query` - Query database
|
|
||||||
- `GET /v1/rqlite/schema` - Get schema
|
|
||||||
- `POST /v1/pubsub/publish` - Publish message
|
|
||||||
- `GET /v1/pubsub/topics` - List topics
|
|
||||||
- `GET /v1/pubsub/ws?topic=<name>` - WebSocket subscribe
|
|
||||||
|
|
||||||
See `openapi/gateway.yaml` for complete API specification.
|
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
- [RQLite Documentation](https://rqlite.io/docs/)
|
- Go modules: `go mod tidy`, `go test ./...`
|
||||||
- [LibP2P Documentation](https://docs.libp2p.io/)
|
- Automation: `make build`, `make dev`, `make run-gateway`, `make lint`
|
||||||
- [GitHub Repository](https://github.com/DeBrosOfficial/network)
|
- API reference: `openapi/gateway.yaml`
|
||||||
- [Issue Tracker](https://github.com/DeBrosOfficial/network/issues)
|
- Code of Conduct: [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md)
|
||||||
|
|||||||
143
cmd/cli/main.go
143
cmd/cli/main.go
@ -34,7 +34,7 @@ func main() {
|
|||||||
|
|
||||||
switch command {
|
switch command {
|
||||||
case "version":
|
case "version":
|
||||||
fmt.Printf("orama %s", version)
|
fmt.Printf("dbn %s", version)
|
||||||
if commit != "" {
|
if commit != "" {
|
||||||
fmt.Printf(" (commit %s)", commit)
|
fmt.Printf(" (commit %s)", commit)
|
||||||
}
|
}
|
||||||
@ -44,38 +44,68 @@ func main() {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
return
|
return
|
||||||
|
|
||||||
|
// Environment commands
|
||||||
|
case "env":
|
||||||
|
cli.HandleEnvCommand(args)
|
||||||
|
case "devnet", "testnet", "local":
|
||||||
|
// Shorthand for switching environments
|
||||||
|
if len(args) > 0 && (args[0] == "enable" || args[0] == "switch") {
|
||||||
|
if err := cli.SwitchEnvironment(command); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "❌ Failed to switch environment: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
env, _ := cli.GetActiveEnvironment()
|
||||||
|
fmt.Printf("✅ Switched to %s environment\n", command)
|
||||||
|
if env != nil {
|
||||||
|
fmt.Printf(" Gateway URL: %s\n", env.GatewayURL)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(os.Stderr, "Usage: dbn %s enable\n", command)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
// Development environment commands
|
// Development environment commands
|
||||||
case "dev":
|
case "dev":
|
||||||
cli.HandleDevCommand(args)
|
cli.HandleDevCommand(args)
|
||||||
|
|
||||||
// Production environment commands (legacy with 'prod' prefix)
|
// Production environment commands
|
||||||
case "prod":
|
case "prod":
|
||||||
cli.HandleProdCommand(args)
|
cli.HandleProdCommand(args)
|
||||||
|
|
||||||
// Direct production commands (new simplified interface)
|
|
||||||
case "install":
|
|
||||||
cli.HandleProdCommand(append([]string{"install"}, args...))
|
|
||||||
case "upgrade":
|
|
||||||
cli.HandleProdCommand(append([]string{"upgrade"}, args...))
|
|
||||||
case "migrate":
|
|
||||||
cli.HandleProdCommand(append([]string{"migrate"}, args...))
|
|
||||||
case "status":
|
|
||||||
cli.HandleProdCommand(append([]string{"status"}, args...))
|
|
||||||
case "start":
|
|
||||||
cli.HandleProdCommand(append([]string{"start"}, args...))
|
|
||||||
case "stop":
|
|
||||||
cli.HandleProdCommand(append([]string{"stop"}, args...))
|
|
||||||
case "restart":
|
|
||||||
cli.HandleProdCommand(append([]string{"restart"}, args...))
|
|
||||||
case "logs":
|
|
||||||
cli.HandleProdCommand(append([]string{"logs"}, args...))
|
|
||||||
case "uninstall":
|
|
||||||
cli.HandleProdCommand(append([]string{"uninstall"}, args...))
|
|
||||||
|
|
||||||
// Authentication commands
|
// Authentication commands
|
||||||
case "auth":
|
case "auth":
|
||||||
cli.HandleAuthCommand(args)
|
cli.HandleAuthCommand(args)
|
||||||
|
|
||||||
|
// Basic network commands
|
||||||
|
case "health":
|
||||||
|
cli.HandleHealthCommand(format, timeout)
|
||||||
|
case "peers":
|
||||||
|
cli.HandlePeersCommand(format, timeout)
|
||||||
|
case "status":
|
||||||
|
cli.HandleStatusCommand(format, timeout)
|
||||||
|
case "peer-id":
|
||||||
|
cli.HandlePeerIDCommand(format, timeout)
|
||||||
|
|
||||||
|
// Query command
|
||||||
|
case "query":
|
||||||
|
if len(args) == 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "Usage: dbn query <sql>\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
cli.HandleQueryCommand(args[0], format, timeout)
|
||||||
|
|
||||||
|
// PubSub commands
|
||||||
|
case "pubsub":
|
||||||
|
cli.HandlePubSubCommand(args, format, timeout)
|
||||||
|
|
||||||
|
// Connect command
|
||||||
|
case "connect":
|
||||||
|
if len(args) == 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "Usage: dbn connect <peer_address>\n")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
cli.HandleConnectCommand(args[0], timeout)
|
||||||
|
|
||||||
// Help
|
// Help
|
||||||
case "help", "--help", "-h":
|
case "help", "--help", "-h":
|
||||||
showHelp()
|
showHelp()
|
||||||
@ -105,47 +135,68 @@ func parseGlobalFlags(args []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func showHelp() {
|
func showHelp() {
|
||||||
fmt.Printf("Orama CLI - Distributed P2P Network Management Tool\n\n")
|
fmt.Printf("Network CLI - Distributed P2P Network Management Tool\n\n")
|
||||||
fmt.Printf("Usage: orama <command> [args...]\n\n")
|
fmt.Printf("Usage: dbn <command> [args...]\n\n")
|
||||||
|
|
||||||
|
fmt.Printf("🌍 Environment Management:\n")
|
||||||
|
fmt.Printf(" env list - List available environments\n")
|
||||||
|
fmt.Printf(" env current - Show current environment\n")
|
||||||
|
fmt.Printf(" env switch <env> - Switch to environment (local, devnet, testnet)\n")
|
||||||
|
fmt.Printf(" devnet enable - Shorthand for switching to devnet\n")
|
||||||
|
fmt.Printf(" testnet enable - Shorthand for switching to testnet\n\n")
|
||||||
|
|
||||||
fmt.Printf("💻 Local Development:\n")
|
fmt.Printf("💻 Local Development:\n")
|
||||||
fmt.Printf(" dev up - Start full local dev environment\n")
|
fmt.Printf(" dev up - Start full local dev environment\n")
|
||||||
fmt.Printf(" dev down - Stop all dev services\n")
|
fmt.Printf(" dev down - Stop all dev services\n")
|
||||||
fmt.Printf(" dev status - Show status of dev services\n")
|
fmt.Printf(" dev status - Show status of dev services\n")
|
||||||
fmt.Printf(" dev logs <component> - View dev component logs\n")
|
fmt.Printf(" dev logs <component> - View dev component logs\n\n")
|
||||||
fmt.Printf(" dev help - Show dev command help\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("🚀 Production Deployment:\n")
|
fmt.Printf("🚀 Production Deployment:\n")
|
||||||
fmt.Printf(" install - Install production node (requires root/sudo)\n")
|
fmt.Printf(" prod install [--bootstrap] - Full production bootstrap (requires root)\n")
|
||||||
fmt.Printf(" upgrade - Upgrade existing installation\n")
|
fmt.Printf(" prod upgrade - Upgrade existing installation\n")
|
||||||
fmt.Printf(" status - Show production service status\n")
|
fmt.Printf(" prod status - Show production service status\n")
|
||||||
fmt.Printf(" start - Start all production services (requires root/sudo)\n")
|
fmt.Printf(" prod logs <service> - View production service logs\n")
|
||||||
fmt.Printf(" stop - Stop all production services (requires root/sudo)\n")
|
fmt.Printf(" prod uninstall - Remove production services (preserves data)\n\n")
|
||||||
fmt.Printf(" restart - Restart all production services (requires root/sudo)\n")
|
|
||||||
fmt.Printf(" logs <service> - View production service logs\n")
|
|
||||||
fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n")
|
|
||||||
|
|
||||||
fmt.Printf("🔐 Authentication:\n")
|
fmt.Printf("🔐 Authentication:\n")
|
||||||
fmt.Printf(" auth login - Authenticate with wallet\n")
|
fmt.Printf(" auth login - Authenticate with wallet\n")
|
||||||
fmt.Printf(" auth logout - Clear stored credentials\n")
|
fmt.Printf(" auth logout - Clear stored credentials\n")
|
||||||
fmt.Printf(" auth whoami - Show current authentication\n")
|
fmt.Printf(" auth whoami - Show current authentication\n")
|
||||||
fmt.Printf(" auth status - Show detailed auth info\n")
|
fmt.Printf(" auth status - Show detailed auth info\n\n")
|
||||||
fmt.Printf(" auth help - Show auth command help\n\n")
|
|
||||||
|
fmt.Printf("🌐 Network Commands:\n")
|
||||||
|
fmt.Printf(" health - Check network health\n")
|
||||||
|
fmt.Printf(" peers - List connected peers\n")
|
||||||
|
fmt.Printf(" status - Show network status\n")
|
||||||
|
fmt.Printf(" peer-id - Show this node's peer ID\n")
|
||||||
|
fmt.Printf(" connect <peer_address> - Connect to peer\n\n")
|
||||||
|
|
||||||
|
fmt.Printf("🗄️ Database:\n")
|
||||||
|
fmt.Printf(" query <sql> 🔐 Execute database query\n\n")
|
||||||
|
|
||||||
|
fmt.Printf("📡 PubSub:\n")
|
||||||
|
fmt.Printf(" pubsub publish <topic> <msg> 🔐 Publish message\n")
|
||||||
|
fmt.Printf(" pubsub subscribe <topic> 🔐 Subscribe to topic\n")
|
||||||
|
fmt.Printf(" pubsub topics 🔐 List topics\n\n")
|
||||||
|
|
||||||
fmt.Printf("Global Flags:\n")
|
fmt.Printf("Global Flags:\n")
|
||||||
fmt.Printf(" -f, --format <format> - Output format: table, json (default: table)\n")
|
fmt.Printf(" -f, --format <format> - Output format: table, json (default: table)\n")
|
||||||
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n")
|
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n\n")
|
||||||
fmt.Printf(" --help, -h - Show this help message\n\n")
|
|
||||||
|
fmt.Printf("🔐 = Requires authentication (auto-prompts if needed)\n\n")
|
||||||
|
|
||||||
fmt.Printf("Examples:\n")
|
fmt.Printf("Examples:\n")
|
||||||
fmt.Printf(" # First node (creates new cluster)\n")
|
fmt.Printf(" # Switch to devnet\n")
|
||||||
fmt.Printf(" sudo orama install --vps-ip 203.0.113.1 --domain node-1.orama.network\n\n")
|
fmt.Printf(" dbn devnet enable\n\n")
|
||||||
|
|
||||||
fmt.Printf(" # Join existing cluster\n")
|
fmt.Printf(" # Authenticate and query\n")
|
||||||
fmt.Printf(" sudo orama install --vps-ip 203.0.113.2 --domain node-2.orama.network \\\n")
|
fmt.Printf(" dbn auth login\n")
|
||||||
fmt.Printf(" --peers /ip4/203.0.113.1/tcp/4001/p2p/12D3KooW... --cluster-secret <hex>\n\n")
|
fmt.Printf(" dbn query \"SELECT * FROM users LIMIT 10\"\n\n")
|
||||||
|
|
||||||
fmt.Printf(" # Service management\n")
|
fmt.Printf(" # Setup VPS (Linux only)\n")
|
||||||
fmt.Printf(" orama status\n")
|
fmt.Printf(" sudo dbn setup\n\n")
|
||||||
fmt.Printf(" orama logs node --follow\n")
|
|
||||||
|
fmt.Printf(" # Manage services\n")
|
||||||
|
fmt.Printf(" sudo dbn service status all\n")
|
||||||
|
fmt.Printf(" sudo dbn service logs node --follow\n")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -40,11 +40,11 @@ func getEnvBoolDefault(key string, def bool) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseGatewayConfig loads gateway.yaml from ~/.orama exclusively.
|
// parseGatewayConfig loads gateway.yaml from ~/.debros exclusively.
|
||||||
// It accepts an optional --config flag for absolute paths (used by systemd services).
|
// It accepts an optional --config flag for absolute paths (used by systemd services).
|
||||||
func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||||
// Parse --config flag (optional, for systemd services that pass absolute paths)
|
// Parse --config flag (optional, for systemd services that pass absolute paths)
|
||||||
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
|
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.debros)")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
// Determine config path
|
// Determine config path
|
||||||
@ -63,7 +63,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Default behavior: look for gateway.yaml in ~/.orama/data/, ~/.orama/configs/, or ~/.orama/
|
// Default behavior: look for gateway.yaml in ~/.debros/configs/ or ~/.debros/
|
||||||
configPath, err = config.DefaultPath("gateway.yaml")
|
configPath, err = config.DefaultPath("gateway.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
|
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
|
||||||
@ -77,7 +77,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
|||||||
ListenAddr string `yaml:"listen_addr"`
|
ListenAddr string `yaml:"listen_addr"`
|
||||||
ClientNamespace string `yaml:"client_namespace"`
|
ClientNamespace string `yaml:"client_namespace"`
|
||||||
RQLiteDSN string `yaml:"rqlite_dsn"`
|
RQLiteDSN string `yaml:"rqlite_dsn"`
|
||||||
Peers []string `yaml:"bootstrap_peers"`
|
BootstrapPeers []string `yaml:"bootstrap_peers"`
|
||||||
EnableHTTPS bool `yaml:"enable_https"`
|
EnableHTTPS bool `yaml:"enable_https"`
|
||||||
DomainName string `yaml:"domain_name"`
|
DomainName string `yaml:"domain_name"`
|
||||||
TLSCacheDir string `yaml:"tls_cache_dir"`
|
TLSCacheDir string `yaml:"tls_cache_dir"`
|
||||||
@ -133,16 +133,16 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
|||||||
if v := strings.TrimSpace(y.RQLiteDSN); v != "" {
|
if v := strings.TrimSpace(y.RQLiteDSN); v != "" {
|
||||||
cfg.RQLiteDSN = v
|
cfg.RQLiteDSN = v
|
||||||
}
|
}
|
||||||
if len(y.Peers) > 0 {
|
if len(y.BootstrapPeers) > 0 {
|
||||||
var peers []string
|
var bp []string
|
||||||
for _, p := range y.Peers {
|
for _, p := range y.BootstrapPeers {
|
||||||
p = strings.TrimSpace(p)
|
p = strings.TrimSpace(p)
|
||||||
if p != "" {
|
if p != "" {
|
||||||
peers = append(peers, p)
|
bp = append(bp, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(peers) > 0 {
|
if len(bp) > 0 {
|
||||||
cfg.BootstrapPeers = peers
|
cfg.BootstrapPeers = bp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,7 +157,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
|||||||
// Default TLS cache directory if HTTPS is enabled but not specified
|
// Default TLS cache directory if HTTPS is enabled but not specified
|
||||||
homeDir, err := os.UserHomeDir()
|
homeDir, err := os.UserHomeDir()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
cfg.TLSCacheDir = filepath.Join(homeDir, ".orama", "tls-cache")
|
cfg.TLSCacheDir = filepath.Join(homeDir, ".debros", "tls-cache")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +205,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
|||||||
zap.String("path", configPath),
|
zap.String("path", configPath),
|
||||||
zap.String("addr", cfg.ListenAddr),
|
zap.String("addr", cfg.ListenAddr),
|
||||||
zap.String("namespace", cfg.ClientNamespace),
|
zap.String("namespace", cfg.ClientNamespace),
|
||||||
zap.Int("peer_count", len(cfg.BootstrapPeers)),
|
zap.Int("bootstrap_peer_count", len(cfg.BootstrapPeers)),
|
||||||
)
|
)
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
|
|||||||
@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -33,7 +32,7 @@ func setup_logger(component logging.Component) (logger *logging.ColoredLogger) {
|
|||||||
|
|
||||||
// parse_flags parses command-line flags and returns them.
|
// parse_flags parses command-line flags and returns them.
|
||||||
func parse_flags() (configName *string, help *bool) {
|
func parse_flags() (configName *string, help *bool) {
|
||||||
configName = flag.String("config", "node.yaml", "Config filename in ~/.orama (default: node.yaml)")
|
configName = flag.String("config", "node.yaml", "Config filename in ~/.debros (default: node.yaml)")
|
||||||
help = flag.Bool("help", false, "Show help")
|
help = flag.Bool("help", false, "Show help")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
@ -63,7 +62,7 @@ func check_if_should_open_help(help *bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// select_data_dir validates that we can load the config from ~/.orama
|
// select_data_dir validates that we can load the config from ~/.debros
|
||||||
func select_data_dir_check(configName *string) {
|
func select_data_dir_check(configName *string) {
|
||||||
logger := setup_logger(logging.ComponentNode)
|
logger := setup_logger(logging.ComponentNode)
|
||||||
|
|
||||||
@ -102,8 +101,8 @@ func select_data_dir_check(configName *string) {
|
|||||||
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
|
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
|
||||||
fmt.Fprintf(os.Stderr, "Config file not found at %s\n", configPath)
|
fmt.Fprintf(os.Stderr, "Config file not found at %s\n", configPath)
|
||||||
fmt.Fprintf(os.Stderr, "\nGenerate it with one of:\n")
|
fmt.Fprintf(os.Stderr, "\nGenerate it with one of:\n")
|
||||||
fmt.Fprintf(os.Stderr, " orama config init --type node\n")
|
fmt.Fprintf(os.Stderr, " dbn config init --type bootstrap\n")
|
||||||
fmt.Fprintf(os.Stderr, " orama config init --type node --peers '<peer_multiaddr>'\n")
|
fmt.Fprintf(os.Stderr, " dbn config init --type node --bootstrap-peers '<peer_multiaddr>'\n")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -135,35 +134,16 @@ func startNode(ctx context.Context, cfg *config.Config, port int) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the peer ID to a file for CLI access
|
// Save the peer ID to a file for CLI access (especially useful for bootstrap)
|
||||||
peerID := n.GetPeerID()
|
peerID := n.GetPeerID()
|
||||||
peerInfoFile := filepath.Join(dataDir, "peer.info")
|
peerInfoFile := filepath.Join(dataDir, "peer.info")
|
||||||
|
peerMultiaddr := fmt.Sprintf("/ip4/0.0.0.0/tcp/%d/p2p/%s", port, peerID)
|
||||||
// Extract advertise IP from config (prefer http_adv_address, fallback to raft_adv_address)
|
|
||||||
advertiseIP := "0.0.0.0" // Default fallback
|
|
||||||
if cfg.Discovery.HttpAdvAddress != "" {
|
|
||||||
if host, _, err := net.SplitHostPort(cfg.Discovery.HttpAdvAddress); err == nil && host != "" && host != "localhost" {
|
|
||||||
advertiseIP = host
|
|
||||||
}
|
|
||||||
} else if cfg.Discovery.RaftAdvAddress != "" {
|
|
||||||
if host, _, err := net.SplitHostPort(cfg.Discovery.RaftAdvAddress); err == nil && host != "" && host != "localhost" {
|
|
||||||
advertiseIP = host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine IP protocol (IPv4 or IPv6) for multiaddr
|
|
||||||
ipProtocol := "ip4"
|
|
||||||
if ip := net.ParseIP(advertiseIP); ip != nil && ip.To4() == nil {
|
|
||||||
ipProtocol = "ip6"
|
|
||||||
}
|
|
||||||
|
|
||||||
peerMultiaddr := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", ipProtocol, advertiseIP, port, peerID)
|
|
||||||
|
|
||||||
if err := os.WriteFile(peerInfoFile, []byte(peerMultiaddr), 0644); err != nil {
|
if err := os.WriteFile(peerInfoFile, []byte(peerMultiaddr), 0644); err != nil {
|
||||||
logger.Error("Failed to save peer info: %v", zap.Error(err))
|
logger.Error("Failed to save peer info: %v", zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
logger.Info("Peer info saved to: %s", zap.String("path", peerInfoFile))
|
logger.Info("Peer info saved to: %s", zap.String("path", peerInfoFile))
|
||||||
logger.Info("Peer multiaddr: %s", zap.String("path", peerMultiaddr))
|
logger.Info("Bootstrap multiaddr: %s", zap.String("path", peerMultiaddr))
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("Node started successfully")
|
logger.Info("Node started successfully")
|
||||||
@ -272,7 +252,7 @@ func main() {
|
|||||||
// Absolute path passed directly (e.g., from systemd service)
|
// Absolute path passed directly (e.g., from systemd service)
|
||||||
configPath = *configName
|
configPath = *configName
|
||||||
} else {
|
} else {
|
||||||
// Relative path - use DefaultPath which checks both ~/.orama/configs/ and ~/.orama/
|
// Relative path - use DefaultPath which checks both ~/.debros/configs/ and ~/.debros/
|
||||||
configPath, err = config.DefaultPath(*configName)
|
configPath, err = config.DefaultPath(*configName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to determine config path", zap.Error(err))
|
logger.Error("Failed to determine config path", zap.Error(err))
|
||||||
@ -316,7 +296,7 @@ func main() {
|
|||||||
zap.Strings("listen_addresses", cfg.Node.ListenAddresses),
|
zap.Strings("listen_addresses", cfg.Node.ListenAddresses),
|
||||||
zap.Int("rqlite_http_port", cfg.Database.RQLitePort),
|
zap.Int("rqlite_http_port", cfg.Database.RQLitePort),
|
||||||
zap.Int("rqlite_raft_port", cfg.Database.RQLiteRaftPort),
|
zap.Int("rqlite_raft_port", cfg.Database.RQLiteRaftPort),
|
||||||
zap.Strings("peers", cfg.Discovery.BootstrapPeers),
|
zap.Strings("bootstrap_peers", cfg.Discovery.BootstrapPeers),
|
||||||
zap.String("rqlite_join_address", cfg.Database.RQLiteJoinAddress),
|
zap.String("rqlite_join_address", cfg.Database.RQLiteJoinAddress),
|
||||||
zap.String("data_directory", cfg.Node.DataDir))
|
zap.String("data_directory", cfg.Node.DataDir))
|
||||||
|
|
||||||
|
|||||||
19
debian/control
vendored
19
debian/control
vendored
@ -1,19 +0,0 @@
|
|||||||
Package: orama
|
|
||||||
Version: 0.69.20
|
|
||||||
Section: net
|
|
||||||
Priority: optional
|
|
||||||
Architecture: amd64
|
|
||||||
Depends: libc6
|
|
||||||
Maintainer: DeBros Team <dev@debros.io>
|
|
||||||
Description: Orama Network - Distributed P2P Database System
|
|
||||||
Orama is a distributed peer-to-peer network that combines
|
|
||||||
RQLite for distributed SQL, IPFS for content-addressed storage,
|
|
||||||
and LibP2P for peer discovery and communication.
|
|
||||||
.
|
|
||||||
Features:
|
|
||||||
- Distributed SQLite database with Raft consensus
|
|
||||||
- IPFS-based file storage with encryption
|
|
||||||
- LibP2P peer-to-peer networking
|
|
||||||
- Olric distributed cache
|
|
||||||
- Unified HTTP/HTTPS gateway
|
|
||||||
|
|
||||||
18
debian/postinst
vendored
18
debian/postinst
vendored
@ -1,18 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Post-installation script for orama package
|
|
||||||
|
|
||||||
echo "Orama installed successfully!"
|
|
||||||
echo ""
|
|
||||||
echo "To set up your node, run:"
|
|
||||||
echo " sudo orama install"
|
|
||||||
echo ""
|
|
||||||
echo "This will launch the interactive installer."
|
|
||||||
echo ""
|
|
||||||
echo "For command-line installation:"
|
|
||||||
echo " sudo orama install --vps-ip <your-ip> --domain <your-domain>"
|
|
||||||
echo ""
|
|
||||||
echo "For help:"
|
|
||||||
echo " orama --help"
|
|
||||||
|
|
||||||
30
e2e/env.go
30
e2e/env.go
@ -35,7 +35,7 @@ var (
|
|||||||
cacheMutex sync.RWMutex
|
cacheMutex sync.RWMutex
|
||||||
)
|
)
|
||||||
|
|
||||||
// loadGatewayConfig loads gateway configuration from ~/.orama/gateway.yaml
|
// loadGatewayConfig loads gateway configuration from ~/.debros/gateway.yaml
|
||||||
func loadGatewayConfig() (map[string]interface{}, error) {
|
func loadGatewayConfig() (map[string]interface{}, error) {
|
||||||
configPath, err := config.DefaultPath("gateway.yaml")
|
configPath, err := config.DefaultPath("gateway.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -55,7 +55,7 @@ func loadGatewayConfig() (map[string]interface{}, error) {
|
|||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadNodeConfig loads node configuration from ~/.orama/node-*.yaml
|
// loadNodeConfig loads node configuration from ~/.debros/node.yaml or bootstrap.yaml
|
||||||
func loadNodeConfig(filename string) (map[string]interface{}, error) {
|
func loadNodeConfig(filename string) (map[string]interface{}, error) {
|
||||||
configPath, err := config.DefaultPath(filename)
|
configPath, err := config.DefaultPath(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -111,8 +111,8 @@ func GetRQLiteNodes() []string {
|
|||||||
}
|
}
|
||||||
cacheMutex.RUnlock()
|
cacheMutex.RUnlock()
|
||||||
|
|
||||||
// Try all node config files
|
// Try bootstrap.yaml first, then all node variants
|
||||||
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
|
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
|
||||||
nodeCfg, err := loadNodeConfig(cfgFile)
|
nodeCfg, err := loadNodeConfig(cfgFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
@ -141,13 +141,13 @@ func queryAPIKeyFromRQLite() (string, error) {
|
|||||||
return "", fmt.Errorf("failed to get home directory: %w", err)
|
return "", fmt.Errorf("failed to get home directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try all node data directories
|
// Try bootstrap first, then all nodes
|
||||||
dbPaths := []string{
|
dbPaths := []string{
|
||||||
filepath.Join(homeDir, ".orama", "data", "node-1", "rqlite", "db.sqlite"),
|
filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite"),
|
||||||
filepath.Join(homeDir, ".orama", "data", "node-2", "rqlite", "db.sqlite"),
|
filepath.Join(homeDir, ".debros", "bootstrap2", "rqlite", "db.sqlite"),
|
||||||
filepath.Join(homeDir, ".orama", "data", "node-3", "rqlite", "db.sqlite"),
|
filepath.Join(homeDir, ".debros", "node2", "rqlite", "db.sqlite"),
|
||||||
filepath.Join(homeDir, ".orama", "data", "node-4", "rqlite", "db.sqlite"),
|
filepath.Join(homeDir, ".debros", "node3", "rqlite", "db.sqlite"),
|
||||||
filepath.Join(homeDir, ".orama", "data", "node-5", "rqlite", "db.sqlite"),
|
filepath.Join(homeDir, ".debros", "node4", "rqlite", "db.sqlite"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dbPath := range dbPaths {
|
for _, dbPath := range dbPaths {
|
||||||
@ -221,7 +221,7 @@ func GetBootstrapPeers() []string {
|
|||||||
}
|
}
|
||||||
cacheMutex.RUnlock()
|
cacheMutex.RUnlock()
|
||||||
|
|
||||||
configFiles := []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"}
|
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"}
|
||||||
seen := make(map[string]struct{})
|
seen := make(map[string]struct{})
|
||||||
var peers []string
|
var peers []string
|
||||||
|
|
||||||
@ -272,7 +272,7 @@ func GetIPFSClusterURL() string {
|
|||||||
cacheMutex.RUnlock()
|
cacheMutex.RUnlock()
|
||||||
|
|
||||||
// Try to load from node config
|
// Try to load from node config
|
||||||
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
|
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
|
||||||
nodeCfg, err := loadNodeConfig(cfgFile)
|
nodeCfg, err := loadNodeConfig(cfgFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
@ -304,7 +304,7 @@ func GetIPFSAPIURL() string {
|
|||||||
cacheMutex.RUnlock()
|
cacheMutex.RUnlock()
|
||||||
|
|
||||||
// Try to load from node config
|
// Try to load from node config
|
||||||
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
|
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
|
||||||
nodeCfg, err := loadNodeConfig(cfgFile)
|
nodeCfg, err := loadNodeConfig(cfgFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
@ -329,7 +329,7 @@ func GetIPFSAPIURL() string {
|
|||||||
// GetClientNamespace returns the test client namespace from config
|
// GetClientNamespace returns the test client namespace from config
|
||||||
func GetClientNamespace() string {
|
func GetClientNamespace() string {
|
||||||
// Try to load from node config
|
// Try to load from node config
|
||||||
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
|
for _, cfgFile := range []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"} {
|
||||||
nodeCfg, err := loadNodeConfig(cfgFile)
|
nodeCfg, err := loadNodeConfig(cfgFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
@ -562,7 +562,7 @@ func CleanupDatabaseTable(t *testing.T, tableName string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
dbPath := filepath.Join(homeDir, ".orama", "data", "node-1", "rqlite", "db.sqlite")
|
dbPath := filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite")
|
||||||
db, err := sql.Open("sqlite3", dbPath)
|
db, err := sql.Open("sqlite3", dbPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("warning: failed to open database for cleanup: %v", err)
|
t.Logf("warning: failed to open database for cleanup: %v", err)
|
||||||
|
|||||||
20
go.mod
20
go.mod
@ -5,39 +5,29 @@ go 1.23.8
|
|||||||
toolchain go1.24.1
|
toolchain go1.24.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/charmbracelet/bubbles v0.20.0
|
|
||||||
github.com/charmbracelet/bubbletea v1.2.4
|
|
||||||
github.com/charmbracelet/lipgloss v1.0.0
|
|
||||||
github.com/ethereum/go-ethereum v1.13.14
|
github.com/ethereum/go-ethereum v1.13.14
|
||||||
github.com/go-chi/chi/v5 v5.2.3
|
|
||||||
github.com/gorilla/websocket v1.5.3
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/libp2p/go-libp2p v0.41.1
|
github.com/libp2p/go-libp2p v0.41.1
|
||||||
github.com/libp2p/go-libp2p-pubsub v0.14.2
|
github.com/libp2p/go-libp2p-pubsub v0.14.2
|
||||||
github.com/mackerelio/go-osstat v0.2.6
|
github.com/mackerelio/go-osstat v0.2.6
|
||||||
github.com/mattn/go-sqlite3 v1.14.32
|
|
||||||
github.com/multiformats/go-multiaddr v0.15.0
|
github.com/multiformats/go-multiaddr v0.15.0
|
||||||
github.com/olric-data/olric v0.7.0
|
github.com/olric-data/olric v0.7.0
|
||||||
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
|
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
|
||||||
go.uber.org/zap v1.27.0
|
go.uber.org/zap v1.27.0
|
||||||
golang.org/x/crypto v0.40.0
|
golang.org/x/crypto v0.40.0
|
||||||
golang.org/x/net v0.42.0
|
golang.org/x/net v0.42.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/RoaringBitmap/roaring v1.9.4 // indirect
|
github.com/RoaringBitmap/roaring v1.9.4 // indirect
|
||||||
github.com/armon/go-metrics v0.4.1 // indirect
|
github.com/armon/go-metrics v0.4.1 // indirect
|
||||||
github.com/atotto/clipboard v0.1.4 // indirect
|
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
|
||||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/bits-and-blooms/bitset v1.22.0 // indirect
|
github.com/bits-and-blooms/bitset v1.22.0 // indirect
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||||
github.com/buraksezer/consistent v0.10.0 // indirect
|
github.com/buraksezer/consistent v0.10.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/charmbracelet/x/ansi v0.4.5 // indirect
|
|
||||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
|
||||||
github.com/containerd/cgroups v1.1.0 // indirect
|
github.com/containerd/cgroups v1.1.0 // indirect
|
||||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||||
@ -45,7 +35,6 @@ require (
|
|||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||||
github.com/docker/go-units v0.5.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
github.com/elastic/gosigar v0.14.3 // indirect
|
github.com/elastic/gosigar v0.14.3 // indirect
|
||||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
|
||||||
github.com/flynn/noise v1.1.0 // indirect
|
github.com/flynn/noise v1.1.0 // indirect
|
||||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||||
@ -81,20 +70,15 @@ require (
|
|||||||
github.com/libp2p/go-netroute v0.2.2 // indirect
|
github.com/libp2p/go-netroute v0.2.2 // indirect
|
||||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||||
github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
|
github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
|
||||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
|
||||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
github.com/mattn/go-sqlite3 v1.14.32 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
|
||||||
github.com/miekg/dns v1.1.66 // indirect
|
github.com/miekg/dns v1.1.66 // indirect
|
||||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/mschoch/smat v0.2.0 // indirect
|
github.com/mschoch/smat v0.2.0 // indirect
|
||||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
|
||||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
|
||||||
github.com/muesli/termenv v0.15.2 // indirect
|
|
||||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||||
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
|
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
|
||||||
@ -137,7 +121,6 @@ require (
|
|||||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
|
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
|
||||||
github.com/raulk/go-watchdog v1.3.0 // indirect
|
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||||
github.com/redis/go-redis/v9 v9.8.0 // indirect
|
github.com/redis/go-redis/v9 v9.8.0 // indirect
|
||||||
github.com/rivo/uniseg v0.4.7 // indirect
|
|
||||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
@ -158,5 +141,6 @@ require (
|
|||||||
golang.org/x/text v0.27.0 // indirect
|
golang.org/x/text v0.27.0 // indirect
|
||||||
golang.org/x/tools v0.35.0 // indirect
|
golang.org/x/tools v0.35.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.6 // indirect
|
google.golang.org/protobuf v1.36.6 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
lukechampine.com/blake3 v1.4.1 // indirect
|
lukechampine.com/blake3 v1.4.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
34
go.sum
34
go.sum
@ -19,10 +19,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
|||||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
|
||||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
|
||||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
|
||||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
@ -48,16 +44,6 @@ github.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0ma
|
|||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE=
|
|
||||||
github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU=
|
|
||||||
github.com/charmbracelet/bubbletea v1.2.4 h1:KN8aCViA0eps9SCOThb2/XPIlea3ANJLUkv3KnQRNCE=
|
|
||||||
github.com/charmbracelet/bubbletea v1.2.4/go.mod h1:Qr6fVQw+wX7JkWWkVyXYk/ZUQ92a6XNekLXa3rR18MM=
|
|
||||||
github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg=
|
|
||||||
github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo=
|
|
||||||
github.com/charmbracelet/x/ansi v0.4.5 h1:LqK4vwBNaXw2AyGIICa5/29Sbdq58GbGdFngSexTdRM=
|
|
||||||
github.com/charmbracelet/x/ansi v0.4.5/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw=
|
|
||||||
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
|
||||||
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
|
||||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||||
@ -89,8 +75,6 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
|
|||||||
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
|
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
|
||||||
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
|
||||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
|
||||||
github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ=
|
github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ=
|
||||||
github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU=
|
github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU=
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||||
@ -101,8 +85,6 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD
|
|||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||||
github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE=
|
|
||||||
github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
|
||||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
@ -256,8 +238,6 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc
|
|||||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||||
github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po=
|
github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po=
|
||||||
github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
||||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
|
||||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
|
||||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||||
github.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0=
|
github.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0=
|
||||||
github.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA=
|
github.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA=
|
||||||
@ -266,10 +246,6 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8
|
|||||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
|
||||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
|
||||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
|
||||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
|
||||||
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
||||||
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
@ -295,12 +271,6 @@ github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
|||||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||||
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
|
||||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
|
||||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
|
||||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
|
||||||
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
|
|
||||||
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
|
|
||||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||||
@ -429,9 +399,6 @@ github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtB
|
|||||||
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||||
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
|
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
|
||||||
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
|
||||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
|
||||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
|
||||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||||
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE=
|
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE=
|
||||||
@ -618,7 +585,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
|||||||
@ -34,15 +34,15 @@ func GetCredentialsPath() (string, error) {
|
|||||||
return "", fmt.Errorf("failed to get home directory: %w", err)
|
return "", fmt.Errorf("failed to get home directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
oramaDir := filepath.Join(homeDir, ".orama")
|
debrosDir := filepath.Join(homeDir, ".debros")
|
||||||
if err := os.MkdirAll(oramaDir, 0700); err != nil {
|
if err := os.MkdirAll(debrosDir, 0700); err != nil {
|
||||||
return "", fmt.Errorf("failed to create .orama directory: %w", err)
|
return "", fmt.Errorf("failed to create .debros directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return filepath.Join(oramaDir, "credentials.json"), nil
|
return filepath.Join(debrosDir, "credentials.json"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadCredentials loads credentials from ~/.orama/credentials.json
|
// LoadCredentials loads credentials from ~/.debros/credentials.json
|
||||||
func LoadCredentials() (*CredentialStore, error) {
|
func LoadCredentials() (*CredentialStore, error) {
|
||||||
credPath, err := GetCredentialsPath()
|
credPath, err := GetCredentialsPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -80,7 +80,7 @@ func LoadCredentials() (*CredentialStore, error) {
|
|||||||
return &store, nil
|
return &store, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveCredentials saves credentials to ~/.orama/credentials.json
|
// SaveCredentials saves credentials to ~/.debros/credentials.json
|
||||||
func (store *CredentialStore) SaveCredentials() error {
|
func (store *CredentialStore) SaveCredentials() error {
|
||||||
credPath, err := GetCredentialsPath()
|
credPath, err := GetCredentialsPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -10,8 +10,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// PerformSimpleAuthentication performs a simple authentication flow where the user
|
// PerformSimpleAuthentication performs a simple authentication flow where the user
|
||||||
@ -93,13 +91,7 @@ func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
endpoint := gatewayURL + "/v1/auth/simple-key"
|
endpoint := gatewayURL + "/v1/auth/simple-key"
|
||||||
|
resp, err := http.Post(endpoint, "application/json", bytes.NewReader(payload))
|
||||||
// Extract domain from URL for TLS configuration
|
|
||||||
// This uses tlsutil which handles Let's Encrypt staging certificates for *.debros.network
|
|
||||||
domain := extractDomainFromURL(gatewayURL)
|
|
||||||
client := tlsutil.NewHTTPClientForDomain(30*time.Second, domain)
|
|
||||||
|
|
||||||
resp, err := client.Post(endpoint, "application/json", bytes.NewReader(payload))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to call gateway: %w", err)
|
return "", fmt.Errorf("failed to call gateway: %w", err)
|
||||||
}
|
}
|
||||||
@ -122,23 +114,3 @@ func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, err
|
|||||||
|
|
||||||
return apiKey, nil
|
return apiKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractDomainFromURL extracts the domain from a URL
|
|
||||||
// Removes protocol (https://, http://), path, and port components
|
|
||||||
func extractDomainFromURL(url string) string {
|
|
||||||
// Remove protocol prefixes
|
|
||||||
url = strings.TrimPrefix(url, "https://")
|
|
||||||
url = strings.TrimPrefix(url, "http://")
|
|
||||||
|
|
||||||
// Remove path component
|
|
||||||
if idx := strings.Index(url, "/"); idx != -1 {
|
|
||||||
url = url[:idx]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove port component
|
|
||||||
if idx := strings.Index(url, ":"); idx != -1 {
|
|
||||||
url = url[:idx]
|
|
||||||
}
|
|
||||||
|
|
||||||
return url
|
|
||||||
}
|
|
||||||
|
|||||||
@ -199,7 +199,7 @@ func (as *AuthServer) handleCallback(w http.ResponseWriter, r *http.Request) {
|
|||||||
%s
|
%s
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<p>Your credentials have been saved securely to <code>~/.orama/credentials.json</code></p>
|
<p>Your credentials have been saved securely to <code>~/.debros/credentials.json</code></p>
|
||||||
<p><strong>You can now close this browser window and return to your terminal.</strong></p>
|
<p><strong>You can now close this browser window and return to your terminal.</strong></p>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
|
|||||||
@ -1,257 +0,0 @@
|
|||||||
// Package certutil provides utilities for managing self-signed certificates
|
|
||||||
package certutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"crypto/x509/pkix"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CertificateManager manages self-signed certificates for the network
|
|
||||||
type CertificateManager struct {
|
|
||||||
baseDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCertificateManager creates a new certificate manager
|
|
||||||
func NewCertificateManager(baseDir string) *CertificateManager {
|
|
||||||
return &CertificateManager{
|
|
||||||
baseDir: baseDir,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureCACertificate creates or loads the CA certificate
|
|
||||||
func (cm *CertificateManager) EnsureCACertificate() ([]byte, []byte, error) {
|
|
||||||
caCertPath := filepath.Join(cm.baseDir, "ca.crt")
|
|
||||||
caKeyPath := filepath.Join(cm.baseDir, "ca.key")
|
|
||||||
|
|
||||||
// Check if CA already exists
|
|
||||||
if _, err := os.Stat(caCertPath); err == nil {
|
|
||||||
certPEM, err := os.ReadFile(caCertPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to read CA certificate: %w", err)
|
|
||||||
}
|
|
||||||
keyPEM, err := os.ReadFile(caKeyPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to read CA key: %w", err)
|
|
||||||
}
|
|
||||||
return certPEM, keyPEM, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create new CA certificate
|
|
||||||
certPEM, keyPEM, err := cm.generateCACertificate()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure directory exists
|
|
||||||
if err := os.MkdirAll(cm.baseDir, 0700); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to create cert directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write to files
|
|
||||||
if err := os.WriteFile(caCertPath, certPEM, 0644); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to write CA certificate: %w", err)
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(caKeyPath, keyPEM, 0600); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to write CA key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return certPEM, keyPEM, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureNodeCertificate creates or loads a node certificate signed by the CA
|
|
||||||
func (cm *CertificateManager) EnsureNodeCertificate(hostname string, caCertPEM, caKeyPEM []byte) ([]byte, []byte, error) {
|
|
||||||
certPath := filepath.Join(cm.baseDir, fmt.Sprintf("%s.crt", hostname))
|
|
||||||
keyPath := filepath.Join(cm.baseDir, fmt.Sprintf("%s.key", hostname))
|
|
||||||
|
|
||||||
// Check if certificate already exists
|
|
||||||
if _, err := os.Stat(certPath); err == nil {
|
|
||||||
certData, err := os.ReadFile(certPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to read certificate: %w", err)
|
|
||||||
}
|
|
||||||
keyData, err := os.ReadFile(keyPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to read key: %w", err)
|
|
||||||
}
|
|
||||||
return certData, keyData, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create new certificate
|
|
||||||
certPEM, keyPEM, err := cm.generateNodeCertificate(hostname, caCertPEM, caKeyPEM)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write to files
|
|
||||||
if err := os.WriteFile(certPath, certPEM, 0644); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to write certificate: %w", err)
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(keyPath, keyPEM, 0600); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to write key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return certPEM, keyPEM, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateCACertificate generates a self-signed CA certificate
|
|
||||||
func (cm *CertificateManager) generateCACertificate() ([]byte, []byte, error) {
|
|
||||||
// Generate private key
|
|
||||||
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to generate private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create certificate template
|
|
||||||
template := x509.Certificate{
|
|
||||||
SerialNumber: big.NewInt(1),
|
|
||||||
Subject: pkix.Name{
|
|
||||||
CommonName: "DeBros Network Root CA",
|
|
||||||
Organization: []string{"DeBros"},
|
|
||||||
},
|
|
||||||
NotBefore: time.Now(),
|
|
||||||
NotAfter: time.Now().AddDate(10, 0, 0), // 10 year validity
|
|
||||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
|
||||||
ExtKeyUsage: []x509.ExtKeyUsage{},
|
|
||||||
BasicConstraintsValid: true,
|
|
||||||
IsCA: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Self-sign the certificate
|
|
||||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to create certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode certificate to PEM
|
|
||||||
certPEM := pem.EncodeToMemory(&pem.Block{
|
|
||||||
Type: "CERTIFICATE",
|
|
||||||
Bytes: certDER,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Encode private key to PEM
|
|
||||||
keyDER, err := x509.MarshalPKCS8PrivateKey(privateKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to marshal private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyPEM := pem.EncodeToMemory(&pem.Block{
|
|
||||||
Type: "PRIVATE KEY",
|
|
||||||
Bytes: keyDER,
|
|
||||||
})
|
|
||||||
|
|
||||||
return certPEM, keyPEM, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateNodeCertificate generates a certificate signed by the CA
|
|
||||||
func (cm *CertificateManager) generateNodeCertificate(hostname string, caCertPEM, caKeyPEM []byte) ([]byte, []byte, error) {
|
|
||||||
// Parse CA certificate and key
|
|
||||||
caCert, caKey, err := cm.parseCACertificate(caCertPEM, caKeyPEM)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate node private key
|
|
||||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to generate private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create certificate template
|
|
||||||
template := x509.Certificate{
|
|
||||||
SerialNumber: big.NewInt(time.Now().UnixNano()),
|
|
||||||
Subject: pkix.Name{
|
|
||||||
CommonName: hostname,
|
|
||||||
},
|
|
||||||
NotBefore: time.Now(),
|
|
||||||
NotAfter: time.Now().AddDate(5, 0, 0), // 5 year validity
|
|
||||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
|
|
||||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
|
||||||
DNSNames: []string{hostname},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add wildcard support if hostname contains *.debros.network
|
|
||||||
if hostname == "*.debros.network" {
|
|
||||||
template.DNSNames = []string{"*.debros.network", "debros.network"}
|
|
||||||
} else if hostname == "debros.network" {
|
|
||||||
template.DNSNames = []string{"*.debros.network", "debros.network"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to parse as IP address for IP-based certificates
|
|
||||||
if ip := net.ParseIP(hostname); ip != nil {
|
|
||||||
template.IPAddresses = []net.IP{ip}
|
|
||||||
template.DNSNames = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign certificate with CA
|
|
||||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, caCert, &privateKey.PublicKey, caKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to create certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode certificate to PEM
|
|
||||||
certPEM := pem.EncodeToMemory(&pem.Block{
|
|
||||||
Type: "CERTIFICATE",
|
|
||||||
Bytes: certDER,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Encode private key to PEM
|
|
||||||
keyDER, err := x509.MarshalPKCS8PrivateKey(privateKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to marshal private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyPEM := pem.EncodeToMemory(&pem.Block{
|
|
||||||
Type: "PRIVATE KEY",
|
|
||||||
Bytes: keyDER,
|
|
||||||
})
|
|
||||||
|
|
||||||
return certPEM, keyPEM, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseCACertificate parses CA certificate and key from PEM
|
|
||||||
func (cm *CertificateManager) parseCACertificate(caCertPEM, caKeyPEM []byte) (*x509.Certificate, *rsa.PrivateKey, error) {
|
|
||||||
// Parse CA certificate
|
|
||||||
certBlock, _ := pem.Decode(caCertPEM)
|
|
||||||
if certBlock == nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to parse CA certificate PEM")
|
|
||||||
}
|
|
||||||
|
|
||||||
caCert, err := x509.ParseCertificate(certBlock.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to parse CA certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse CA private key
|
|
||||||
keyBlock, _ := pem.Decode(caKeyPEM)
|
|
||||||
if keyBlock == nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to parse CA key PEM")
|
|
||||||
}
|
|
||||||
|
|
||||||
caKey, err := x509.ParsePKCS8PrivateKey(keyBlock.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to parse CA key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rsaKey, ok := caKey.(*rsa.PrivateKey)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil, fmt.Errorf("CA key is not RSA")
|
|
||||||
}
|
|
||||||
|
|
||||||
return caCert, rsaKey, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadTLSCertificate loads a TLS certificate from PEM files
|
|
||||||
func LoadTLSCertificate(certPEM, keyPEM []byte) (tls.Certificate, error) {
|
|
||||||
return tls.X509KeyPair(certPEM, keyPEM)
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -1,10 +1,8 @@
|
|||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/auth"
|
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||||
)
|
)
|
||||||
@ -52,14 +50,13 @@ func showAuthHelp() {
|
|||||||
fmt.Printf(" 1. Run 'dbn auth login'\n")
|
fmt.Printf(" 1. Run 'dbn auth login'\n")
|
||||||
fmt.Printf(" 2. Enter your wallet address when prompted\n")
|
fmt.Printf(" 2. Enter your wallet address when prompted\n")
|
||||||
fmt.Printf(" 3. Enter your namespace (or press Enter for 'default')\n")
|
fmt.Printf(" 3. Enter your namespace (or press Enter for 'default')\n")
|
||||||
fmt.Printf(" 4. An API key will be generated and saved to ~/.orama/credentials.json\n\n")
|
fmt.Printf(" 4. An API key will be generated and saved to ~/.debros/credentials.json\n\n")
|
||||||
fmt.Printf("Note: Authentication uses the currently active environment.\n")
|
fmt.Printf("Note: Authentication uses the currently active environment.\n")
|
||||||
fmt.Printf(" Use 'dbn env current' to see your active environment.\n")
|
fmt.Printf(" Use 'dbn env current' to see your active environment.\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleAuthLogin() {
|
func handleAuthLogin() {
|
||||||
// Prompt for node selection
|
gatewayURL := getGatewayURL()
|
||||||
gatewayURL := promptForGatewayURL()
|
|
||||||
fmt.Printf("🔐 Authenticating with gateway at: %s\n", gatewayURL)
|
fmt.Printf("🔐 Authenticating with gateway at: %s\n", gatewayURL)
|
||||||
|
|
||||||
// Use the simple authentication flow
|
// Use the simple authentication flow
|
||||||
@ -164,55 +161,7 @@ func handleAuthStatus() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// promptForGatewayURL interactively prompts for the gateway URL
|
|
||||||
// Allows user to choose between local node or remote node by domain
|
|
||||||
func promptForGatewayURL() string {
|
|
||||||
// Check environment variable first (allows override without prompting)
|
|
||||||
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
|
|
||||||
return url
|
|
||||||
}
|
|
||||||
|
|
||||||
reader := bufio.NewReader(os.Stdin)
|
|
||||||
|
|
||||||
fmt.Println("\n🌐 Node Connection")
|
|
||||||
fmt.Println("==================")
|
|
||||||
fmt.Println("1. Local node (localhost:6001)")
|
|
||||||
fmt.Println("2. Remote node (enter domain)")
|
|
||||||
fmt.Print("\nSelect option [1/2]: ")
|
|
||||||
|
|
||||||
choice, _ := reader.ReadString('\n')
|
|
||||||
choice = strings.TrimSpace(choice)
|
|
||||||
|
|
||||||
if choice == "1" || choice == "" {
|
|
||||||
return "http://localhost:6001"
|
|
||||||
}
|
|
||||||
|
|
||||||
if choice != "2" {
|
|
||||||
fmt.Println("⚠️ Invalid option, using localhost")
|
|
||||||
return "http://localhost:6001"
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Print("Enter node domain (e.g., node-hk19de.debros.network): ")
|
|
||||||
domain, _ := reader.ReadString('\n')
|
|
||||||
domain = strings.TrimSpace(domain)
|
|
||||||
|
|
||||||
if domain == "" {
|
|
||||||
fmt.Println("⚠️ No domain entered, using localhost")
|
|
||||||
return "http://localhost:6001"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove any protocol prefix if user included it
|
|
||||||
domain = strings.TrimPrefix(domain, "https://")
|
|
||||||
domain = strings.TrimPrefix(domain, "http://")
|
|
||||||
// Remove trailing slash
|
|
||||||
domain = strings.TrimSuffix(domain, "/")
|
|
||||||
|
|
||||||
// Use HTTPS for remote domains
|
|
||||||
return fmt.Sprintf("https://%s", domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getGatewayURL returns the gateway URL based on environment or env var
|
// getGatewayURL returns the gateway URL based on environment or env var
|
||||||
// Used by other commands that don't need interactive node selection
|
|
||||||
func getGatewayURL() string {
|
func getGatewayURL() string {
|
||||||
// Check environment variable first (for backwards compatibility)
|
// Check environment variable first (for backwards compatibility)
|
||||||
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
|
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
|
||||||
@ -225,6 +174,6 @@ func getGatewayURL() string {
|
|||||||
return env.GatewayURL
|
return env.GatewayURL
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to default (node-1)
|
// Fallback to default
|
||||||
return "http://localhost:6001"
|
return "http://localhost:6001"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -245,21 +245,12 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
|
|||||||
func createClient() (client.NetworkClient, error) {
|
func createClient() (client.NetworkClient, error) {
|
||||||
config := client.DefaultClientConfig("dbn")
|
config := client.DefaultClientConfig("dbn")
|
||||||
|
|
||||||
// Use active environment's gateway URL
|
|
||||||
gatewayURL := getGatewayURL()
|
|
||||||
config.GatewayURL = gatewayURL
|
|
||||||
|
|
||||||
// Try to get peer configuration from active environment
|
|
||||||
env, err := GetActiveEnvironment()
|
|
||||||
if err == nil && env != nil {
|
|
||||||
// Environment loaded successfully - gateway URL already set above
|
|
||||||
_ = env // Reserve for future peer configuration
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for existing credentials using enhanced authentication
|
// Check for existing credentials using enhanced authentication
|
||||||
creds, err := auth.GetValidEnhancedCredentials()
|
creds, err := auth.GetValidEnhancedCredentials()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No valid credentials found, use the enhanced authentication flow
|
// No valid credentials found, use the enhanced authentication flow
|
||||||
|
gatewayURL := getGatewayURL()
|
||||||
|
|
||||||
newCreds, authErr := auth.GetOrPromptForCredentials(gatewayURL)
|
newCreds, authErr := auth.GetOrPromptForCredentials(gatewayURL)
|
||||||
if authErr != nil {
|
if authErr != nil {
|
||||||
return nil, fmt.Errorf("authentication failed: %w", authErr)
|
return nil, fmt.Errorf("authentication failed: %w", authErr)
|
||||||
|
|||||||
@ -40,30 +40,30 @@ func HandleDevCommand(args []string) {
|
|||||||
|
|
||||||
func showDevHelp() {
|
func showDevHelp() {
|
||||||
fmt.Printf("🚀 Development Environment Commands\n\n")
|
fmt.Printf("🚀 Development Environment Commands\n\n")
|
||||||
fmt.Printf("Usage: orama dev <subcommand> [options]\n\n")
|
fmt.Printf("Usage: dbn dev <subcommand> [options]\n\n")
|
||||||
fmt.Printf("Subcommands:\n")
|
fmt.Printf("Subcommands:\n")
|
||||||
fmt.Printf(" up - Start development environment (5 nodes + gateway)\n")
|
fmt.Printf(" up - Start development environment (2 bootstraps + 3 nodes + gateway)\n")
|
||||||
fmt.Printf(" down - Stop all development services\n")
|
fmt.Printf(" down - Stop all development services\n")
|
||||||
fmt.Printf(" status - Show status of running services\n")
|
fmt.Printf(" status - Show status of running services\n")
|
||||||
fmt.Printf(" logs <component> - Tail logs for a component\n")
|
fmt.Printf(" logs <component> - Tail logs for a component\n")
|
||||||
fmt.Printf(" help - Show this help\n\n")
|
fmt.Printf(" help - Show this help\n\n")
|
||||||
fmt.Printf("Examples:\n")
|
fmt.Printf("Examples:\n")
|
||||||
fmt.Printf(" orama dev up\n")
|
fmt.Printf(" dbn dev up\n")
|
||||||
fmt.Printf(" orama dev down\n")
|
fmt.Printf(" dbn dev down\n")
|
||||||
fmt.Printf(" orama dev status\n")
|
fmt.Printf(" dbn dev status\n")
|
||||||
fmt.Printf(" orama dev logs node-1 --follow\n")
|
fmt.Printf(" dbn dev logs bootstrap --follow\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleDevUp(args []string) {
|
func handleDevUp(args []string) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
// Get home directory and .orama path
|
// Get home directory and .debros path
|
||||||
homeDir, err := os.UserHomeDir()
|
homeDir, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
oramaDir := filepath.Join(homeDir, ".orama")
|
debrosDir := filepath.Join(homeDir, ".debros")
|
||||||
|
|
||||||
// Step 1: Check dependencies
|
// Step 1: Check dependencies
|
||||||
fmt.Printf("📋 Checking dependencies...\n\n")
|
fmt.Printf("📋 Checking dependencies...\n\n")
|
||||||
@ -90,7 +90,7 @@ func handleDevUp(args []string) {
|
|||||||
|
|
||||||
// Step 3: Ensure configs
|
// Step 3: Ensure configs
|
||||||
fmt.Printf("⚙️ Preparing configuration files...\n\n")
|
fmt.Printf("⚙️ Preparing configuration files...\n\n")
|
||||||
ensurer := development.NewConfigEnsurer(oramaDir)
|
ensurer := development.NewConfigEnsurer(debrosDir)
|
||||||
if err := ensurer.EnsureAll(); err != nil {
|
if err := ensurer.EnsureAll(); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "❌ Failed to prepare configs: %v\n", err)
|
fmt.Fprintf(os.Stderr, "❌ Failed to prepare configs: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@ -98,7 +98,7 @@ func handleDevUp(args []string) {
|
|||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
|
|
||||||
// Step 4: Start services
|
// Step 4: Start services
|
||||||
pm := development.NewProcessManager(oramaDir, os.Stdout)
|
pm := development.NewProcessManager(debrosDir, os.Stdout)
|
||||||
if err := pm.StartAll(ctx); err != nil {
|
if err := pm.StartAll(ctx); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "❌ Error starting services: %v\n", err)
|
fmt.Fprintf(os.Stderr, "❌ Error starting services: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@ -108,19 +108,19 @@ func handleDevUp(args []string) {
|
|||||||
fmt.Printf("🎉 Development environment is running!\n\n")
|
fmt.Printf("🎉 Development environment is running!\n\n")
|
||||||
fmt.Printf("Key endpoints:\n")
|
fmt.Printf("Key endpoints:\n")
|
||||||
fmt.Printf(" Gateway: http://localhost:6001\n")
|
fmt.Printf(" Gateway: http://localhost:6001\n")
|
||||||
fmt.Printf(" Node-1 IPFS: http://localhost:4501\n")
|
fmt.Printf(" Bootstrap IPFS: http://localhost:4501\n")
|
||||||
fmt.Printf(" Node-2 IPFS: http://localhost:4502\n")
|
fmt.Printf(" Bootstrap2 IPFS: http://localhost:4511\n")
|
||||||
fmt.Printf(" Node-3 IPFS: http://localhost:4503\n")
|
fmt.Printf(" Node2 IPFS: http://localhost:4502\n")
|
||||||
fmt.Printf(" Node-4 IPFS: http://localhost:4504\n")
|
fmt.Printf(" Node3 IPFS: http://localhost:4503\n")
|
||||||
fmt.Printf(" Node-5 IPFS: http://localhost:4505\n")
|
fmt.Printf(" Node4 IPFS: http://localhost:4504\n")
|
||||||
fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n")
|
fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n")
|
||||||
fmt.Printf(" Olric Cache: http://localhost:3320\n\n")
|
fmt.Printf(" Olric Cache: http://localhost:3320\n\n")
|
||||||
fmt.Printf("Useful commands:\n")
|
fmt.Printf("Useful commands:\n")
|
||||||
fmt.Printf(" orama dev status - Show status\n")
|
fmt.Printf(" dbn dev status - Show status\n")
|
||||||
fmt.Printf(" orama dev logs node-1 - Node-1 logs\n")
|
fmt.Printf(" dbn dev logs bootstrap - Bootstrap logs\n")
|
||||||
fmt.Printf(" orama dev logs node-2 - Node-2 logs\n")
|
fmt.Printf(" dbn dev logs bootstrap2 - Bootstrap2 logs\n")
|
||||||
fmt.Printf(" orama dev down - Stop all services\n\n")
|
fmt.Printf(" dbn dev down - Stop all services\n\n")
|
||||||
fmt.Printf("Logs directory: %s/logs\n\n", oramaDir)
|
fmt.Printf("Logs directory: %s/logs\n\n", debrosDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleDevDown(args []string) {
|
func handleDevDown(args []string) {
|
||||||
@ -129,17 +129,14 @@ func handleDevDown(args []string) {
|
|||||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
oramaDir := filepath.Join(homeDir, ".orama")
|
debrosDir := filepath.Join(homeDir, ".debros")
|
||||||
|
|
||||||
pm := development.NewProcessManager(oramaDir, os.Stdout)
|
pm := development.NewProcessManager(debrosDir, os.Stdout)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
if err := pm.StopAll(ctx); err != nil {
|
if err := pm.StopAll(ctx); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "⚠️ Error stopping services: %v\n", err)
|
fmt.Fprintf(os.Stderr, "⚠️ Error stopping services: %v\n", err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("✅ All services have been stopped\n\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleDevStatus(args []string) {
|
func handleDevStatus(args []string) {
|
||||||
@ -148,9 +145,9 @@ func handleDevStatus(args []string) {
|
|||||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
oramaDir := filepath.Join(homeDir, ".orama")
|
debrosDir := filepath.Join(homeDir, ".debros")
|
||||||
|
|
||||||
pm := development.NewProcessManager(oramaDir, os.Stdout)
|
pm := development.NewProcessManager(debrosDir, os.Stdout)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
pm.Status(ctx)
|
pm.Status(ctx)
|
||||||
@ -159,7 +156,7 @@ func handleDevStatus(args []string) {
|
|||||||
func handleDevLogs(args []string) {
|
func handleDevLogs(args []string) {
|
||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
fmt.Fprintf(os.Stderr, "Usage: dbn dev logs <component> [--follow]\n")
|
fmt.Fprintf(os.Stderr, "Usage: dbn dev logs <component> [--follow]\n")
|
||||||
fmt.Fprintf(os.Stderr, "\nComponents: node-1, node-2, node-3, node-4, node-5, gateway, ipfs-node-1, ipfs-node-2, ipfs-node-3, ipfs-node-4, ipfs-node-5, olric, anon\n")
|
fmt.Fprintf(os.Stderr, "\nComponents: bootstrap, bootstrap2, node2, node3, node4, gateway, ipfs-bootstrap, ipfs-bootstrap2, ipfs-node2, ipfs-node3, ipfs-node4, olric, anon\n")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,9 +168,9 @@ func handleDevLogs(args []string) {
|
|||||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
oramaDir := filepath.Join(homeDir, ".orama")
|
debrosDir := filepath.Join(homeDir, ".debros")
|
||||||
|
|
||||||
logPath := filepath.Join(oramaDir, "logs", fmt.Sprintf("%s.log", component))
|
logPath := filepath.Join(debrosDir, "logs", fmt.Sprintf("%s.log", component))
|
||||||
if _, err := os.Stat(logPath); os.IsNotExist(err) {
|
if _, err := os.Stat(logPath); os.IsNotExist(err) {
|
||||||
fmt.Fprintf(os.Stderr, "❌ Log file not found: %s\n", logPath)
|
fmt.Fprintf(os.Stderr, "❌ Log file not found: %s\n", logPath)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|||||||
@ -43,8 +43,8 @@ func showEnvHelp() {
|
|||||||
fmt.Printf(" enable - Alias for 'switch' (e.g., 'devnet enable')\n\n")
|
fmt.Printf(" enable - Alias for 'switch' (e.g., 'devnet enable')\n\n")
|
||||||
fmt.Printf("Available Environments:\n")
|
fmt.Printf("Available Environments:\n")
|
||||||
fmt.Printf(" local - Local development (http://localhost:6001)\n")
|
fmt.Printf(" local - Local development (http://localhost:6001)\n")
|
||||||
fmt.Printf(" devnet - Development network (https://devnet.orama.network)\n")
|
fmt.Printf(" devnet - Development network (https://devnet.debros.network)\n")
|
||||||
fmt.Printf(" testnet - Test network (https://testnet.orama.network)\n\n")
|
fmt.Printf(" testnet - Test network (https://testnet.debros.network)\n\n")
|
||||||
fmt.Printf("Examples:\n")
|
fmt.Printf("Examples:\n")
|
||||||
fmt.Printf(" dbn env list\n")
|
fmt.Printf(" dbn env list\n")
|
||||||
fmt.Printf(" dbn env current\n")
|
fmt.Printf(" dbn env current\n")
|
||||||
|
|||||||
@ -28,18 +28,18 @@ var DefaultEnvironments = []Environment{
|
|||||||
{
|
{
|
||||||
Name: "local",
|
Name: "local",
|
||||||
GatewayURL: "http://localhost:6001",
|
GatewayURL: "http://localhost:6001",
|
||||||
Description: "Local development environment (node-1)",
|
Description: "Local development environment",
|
||||||
IsActive: true,
|
IsActive: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "devnet",
|
Name: "devnet",
|
||||||
GatewayURL: "https://devnet.orama.network",
|
GatewayURL: "https://devnet.debros.network",
|
||||||
Description: "Development network (testnet)",
|
Description: "Development network (testnet)",
|
||||||
IsActive: false,
|
IsActive: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "testnet",
|
Name: "testnet",
|
||||||
GatewayURL: "https://testnet.orama.network",
|
GatewayURL: "https://testnet.debros.network",
|
||||||
Description: "Test network (staging)",
|
Description: "Test network (staging)",
|
||||||
IsActive: false,
|
IsActive: false,
|
||||||
},
|
},
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -5,167 +5,76 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// TestProdCommandFlagParsing verifies that prod command flags are parsed correctly
|
// TestProdCommandFlagParsing verifies that prod command flags are parsed correctly
|
||||||
// Note: The installer now uses --vps-ip presence to determine if it's a first node (no --bootstrap flag)
|
|
||||||
// First node: has --vps-ip but no --peers or --join
|
|
||||||
// Joining node: has --vps-ip, --peers, and --cluster-secret
|
|
||||||
func TestProdCommandFlagParsing(t *testing.T) {
|
func TestProdCommandFlagParsing(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
args []string
|
args []string
|
||||||
|
expectBootstrap bool
|
||||||
expectVPSIP string
|
expectVPSIP string
|
||||||
expectDomain string
|
expectBootstrapJoin string
|
||||||
expectPeers string
|
expectPeers string
|
||||||
expectJoin string
|
|
||||||
expectSecret string
|
|
||||||
expectBranch string
|
|
||||||
isFirstNode bool // first node = no peers and no join address
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "first node (creates new cluster)",
|
name: "bootstrap node",
|
||||||
args: []string{"install", "--vps-ip", "10.0.0.1", "--domain", "node-1.example.com"},
|
args: []string{"install", "--bootstrap"},
|
||||||
expectVPSIP: "10.0.0.1",
|
expectBootstrap: true,
|
||||||
expectDomain: "node-1.example.com",
|
|
||||||
isFirstNode: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "joining node with peers",
|
name: "non-bootstrap with vps-ip",
|
||||||
args: []string{"install", "--vps-ip", "10.0.0.2", "--peers", "/ip4/10.0.0.1/tcp/4001/p2p/Qm123", "--cluster-secret", "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
|
args: []string{"install", "--vps-ip", "10.0.0.2", "--peers", "multiaddr1,multiaddr2"},
|
||||||
expectVPSIP: "10.0.0.2",
|
expectVPSIP: "10.0.0.2",
|
||||||
expectPeers: "/ip4/10.0.0.1/tcp/4001/p2p/Qm123",
|
expectPeers: "multiaddr1,multiaddr2",
|
||||||
expectSecret: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
|
||||||
isFirstNode: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "joining node with join address",
|
name: "secondary bootstrap",
|
||||||
args: []string{"install", "--vps-ip", "10.0.0.3", "--join", "10.0.0.1:7001", "--cluster-secret", "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
|
args: []string{"install", "--bootstrap", "--vps-ip", "10.0.0.3", "--bootstrap-join", "10.0.0.1:7001"},
|
||||||
|
expectBootstrap: true,
|
||||||
expectVPSIP: "10.0.0.3",
|
expectVPSIP: "10.0.0.3",
|
||||||
expectJoin: "10.0.0.1:7001",
|
expectBootstrapJoin: "10.0.0.1:7001",
|
||||||
expectSecret: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
|
||||||
isFirstNode: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "with nightly branch",
|
name: "with domain",
|
||||||
args: []string{"install", "--vps-ip", "10.0.0.4", "--branch", "nightly"},
|
args: []string{"install", "--bootstrap", "--domain", "example.com"},
|
||||||
expectVPSIP: "10.0.0.4",
|
expectBootstrap: true,
|
||||||
expectBranch: "nightly",
|
|
||||||
isFirstNode: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
// Extract flags manually to verify parsing logic
|
// Extract flags manually to verify parsing logic
|
||||||
var vpsIP, domain, peersStr, joinAddr, clusterSecret, branch string
|
isBootstrap := false
|
||||||
|
var vpsIP, peersStr, bootstrapJoin string
|
||||||
|
|
||||||
for i, arg := range tt.args {
|
for i, arg := range tt.args {
|
||||||
switch arg {
|
switch arg {
|
||||||
case "--vps-ip":
|
case "--bootstrap":
|
||||||
if i+1 < len(tt.args) {
|
isBootstrap = true
|
||||||
vpsIP = tt.args[i+1]
|
|
||||||
}
|
|
||||||
case "--domain":
|
|
||||||
if i+1 < len(tt.args) {
|
|
||||||
domain = tt.args[i+1]
|
|
||||||
}
|
|
||||||
case "--peers":
|
case "--peers":
|
||||||
if i+1 < len(tt.args) {
|
if i+1 < len(tt.args) {
|
||||||
peersStr = tt.args[i+1]
|
peersStr = tt.args[i+1]
|
||||||
}
|
}
|
||||||
case "--join":
|
case "--vps-ip":
|
||||||
if i+1 < len(tt.args) {
|
if i+1 < len(tt.args) {
|
||||||
joinAddr = tt.args[i+1]
|
vpsIP = tt.args[i+1]
|
||||||
}
|
}
|
||||||
case "--cluster-secret":
|
case "--bootstrap-join":
|
||||||
if i+1 < len(tt.args) {
|
if i+1 < len(tt.args) {
|
||||||
clusterSecret = tt.args[i+1]
|
bootstrapJoin = tt.args[i+1]
|
||||||
}
|
|
||||||
case "--branch":
|
|
||||||
if i+1 < len(tt.args) {
|
|
||||||
branch = tt.args[i+1]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// First node detection: no peers and no join address
|
if isBootstrap != tt.expectBootstrap {
|
||||||
isFirstNode := peersStr == "" && joinAddr == ""
|
t.Errorf("expected bootstrap=%v, got %v", tt.expectBootstrap, isBootstrap)
|
||||||
|
}
|
||||||
if vpsIP != tt.expectVPSIP {
|
if vpsIP != tt.expectVPSIP {
|
||||||
t.Errorf("expected vpsIP=%q, got %q", tt.expectVPSIP, vpsIP)
|
t.Errorf("expected vpsIP=%q, got %q", tt.expectVPSIP, vpsIP)
|
||||||
}
|
}
|
||||||
if domain != tt.expectDomain {
|
|
||||||
t.Errorf("expected domain=%q, got %q", tt.expectDomain, domain)
|
|
||||||
}
|
|
||||||
if peersStr != tt.expectPeers {
|
if peersStr != tt.expectPeers {
|
||||||
t.Errorf("expected peers=%q, got %q", tt.expectPeers, peersStr)
|
t.Errorf("expected peers=%q, got %q", tt.expectPeers, peersStr)
|
||||||
}
|
}
|
||||||
if joinAddr != tt.expectJoin {
|
if bootstrapJoin != tt.expectBootstrapJoin {
|
||||||
t.Errorf("expected join=%q, got %q", tt.expectJoin, joinAddr)
|
t.Errorf("expected bootstrapJoin=%q, got %q", tt.expectBootstrapJoin, bootstrapJoin)
|
||||||
}
|
|
||||||
if clusterSecret != tt.expectSecret {
|
|
||||||
t.Errorf("expected clusterSecret=%q, got %q", tt.expectSecret, clusterSecret)
|
|
||||||
}
|
|
||||||
if branch != tt.expectBranch {
|
|
||||||
t.Errorf("expected branch=%q, got %q", tt.expectBranch, branch)
|
|
||||||
}
|
|
||||||
if isFirstNode != tt.isFirstNode {
|
|
||||||
t.Errorf("expected isFirstNode=%v, got %v", tt.isFirstNode, isFirstNode)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestNormalizePeers tests the peer multiaddr normalization
|
|
||||||
func TestNormalizePeers(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input string
|
|
||||||
expectCount int
|
|
||||||
expectError bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "empty string",
|
|
||||||
input: "",
|
|
||||||
expectCount: 0,
|
|
||||||
expectError: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "single peer",
|
|
||||||
input: "/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj",
|
|
||||||
expectCount: 1,
|
|
||||||
expectError: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "multiple peers",
|
|
||||||
input: "/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj,/ip4/10.0.0.2/tcp/4001/p2p/12D3KooWJzL4SHW3o7sZpzjfEPJzC6Ky7gKvJxY8vQVDR2jHc8F1",
|
|
||||||
expectCount: 2,
|
|
||||||
expectError: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "duplicate peers deduplicated",
|
|
||||||
input: "/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj,/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj",
|
|
||||||
expectCount: 1,
|
|
||||||
expectError: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid multiaddr",
|
|
||||||
input: "not-a-multiaddr",
|
|
||||||
expectCount: 0,
|
|
||||||
expectError: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
peers, err := normalizePeers(tt.input)
|
|
||||||
|
|
||||||
if tt.expectError && err == nil {
|
|
||||||
t.Errorf("expected error but got none")
|
|
||||||
}
|
|
||||||
if !tt.expectError && err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if len(peers) != tt.expectCount {
|
|
||||||
t.Errorf("expected %d peers, got %d", tt.expectCount, len(peers))
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -195,49 +195,49 @@ func (c *Client) Connect() error {
|
|||||||
c.pubsub = &pubSubBridge{client: c, adapter: adapter}
|
c.pubsub = &pubSubBridge{client: c, adapter: adapter}
|
||||||
c.logger.Info("Pubsub bridge created successfully")
|
c.logger.Info("Pubsub bridge created successfully")
|
||||||
|
|
||||||
c.logger.Info("Starting peer connections...")
|
c.logger.Info("Starting bootstrap peer connections...")
|
||||||
|
|
||||||
// Connect to peers FIRST
|
// Connect to bootstrap peers FIRST
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), c.config.ConnectTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), c.config.ConnectTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
peersConnected := 0
|
bootstrapPeersConnected := 0
|
||||||
for _, peerAddr := range c.config.BootstrapPeers {
|
for _, bootstrapAddr := range c.config.BootstrapPeers {
|
||||||
c.logger.Info("Attempting to connect to peer", zap.String("addr", peerAddr))
|
c.logger.Info("Attempting to connect to bootstrap peer", zap.String("addr", bootstrapAddr))
|
||||||
if err := c.connectToPeer(ctx, peerAddr); err != nil {
|
if err := c.connectToBootstrap(ctx, bootstrapAddr); err != nil {
|
||||||
c.logger.Warn("Failed to connect to peer",
|
c.logger.Warn("Failed to connect to bootstrap peer",
|
||||||
zap.String("addr", peerAddr),
|
zap.String("addr", bootstrapAddr),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
peersConnected++
|
bootstrapPeersConnected++
|
||||||
c.logger.Info("Successfully connected to peer", zap.String("addr", peerAddr))
|
c.logger.Info("Successfully connected to bootstrap peer", zap.String("addr", bootstrapAddr))
|
||||||
}
|
}
|
||||||
|
|
||||||
if peersConnected == 0 {
|
if bootstrapPeersConnected == 0 {
|
||||||
c.logger.Warn("No peers connected, continuing anyway")
|
c.logger.Warn("No bootstrap peers connected, continuing anyway")
|
||||||
} else {
|
} else {
|
||||||
c.logger.Info("Peer connections completed", zap.Int("connected_count", peersConnected))
|
c.logger.Info("Bootstrap peer connections completed", zap.Int("connected_count", bootstrapPeersConnected))
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Info("Adding peers to peerstore...")
|
c.logger.Info("Adding bootstrap peers to peerstore...")
|
||||||
|
|
||||||
// Add peers to peerstore so we can connect to them later
|
// Add bootstrap peers to peerstore so we can connect to them later
|
||||||
for _, peerAddr := range c.config.BootstrapPeers {
|
for _, bootstrapAddr := range c.config.BootstrapPeers {
|
||||||
if ma, err := multiaddr.NewMultiaddr(peerAddr); err == nil {
|
if ma, err := multiaddr.NewMultiaddr(bootstrapAddr); err == nil {
|
||||||
if peerInfo, err := peer.AddrInfoFromP2pAddr(ma); err == nil {
|
if peerInfo, err := peer.AddrInfoFromP2pAddr(ma); err == nil {
|
||||||
c.host.Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, time.Hour*24)
|
c.host.Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, time.Hour*24)
|
||||||
c.logger.Debug("Added peer to peerstore",
|
c.logger.Debug("Added bootstrap peer to peerstore",
|
||||||
zap.String("peer", peerInfo.ID.String()))
|
zap.String("peer", peerInfo.ID.String()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.logger.Info("Peers added to peerstore")
|
c.logger.Info("Bootstrap peers added to peerstore")
|
||||||
|
|
||||||
c.logger.Info("Starting connection monitoring...")
|
c.logger.Info("Starting connection monitoring...")
|
||||||
|
|
||||||
// Client is a lightweight P2P participant - no discovery needed
|
// Client is a lightweight P2P participant - no discovery needed
|
||||||
// We only connect to known peers and let nodes handle discovery
|
// We only connect to known bootstrap peers and let nodes handle discovery
|
||||||
c.logger.Debug("Client configured as lightweight P2P participant (no discovery)")
|
c.logger.Debug("Client configured as lightweight P2P participant (no discovery)")
|
||||||
|
|
||||||
// Start minimal connection monitoring
|
// Start minimal connection monitoring
|
||||||
|
|||||||
@ -9,8 +9,8 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// connectToPeer connects to a peer address
|
// connectToBootstrap connects to a bootstrap peer
|
||||||
func (c *Client) connectToPeer(ctx context.Context, addr string) error {
|
func (c *Client) connectToBootstrap(ctx context.Context, addr string) error {
|
||||||
ma, err := multiaddr.NewMultiaddr(addr)
|
ma, err := multiaddr.NewMultiaddr(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid multiaddr: %w", err)
|
return fmt.Errorf("invalid multiaddr: %w", err)
|
||||||
@ -20,14 +20,14 @@ func (c *Client) connectToPeer(ctx context.Context, addr string) error {
|
|||||||
peerInfo, err := peer.AddrInfoFromP2pAddr(ma)
|
peerInfo, err := peer.AddrInfoFromP2pAddr(ma)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If there's no peer ID, we can't connect
|
// If there's no peer ID, we can't connect
|
||||||
c.logger.Warn("Peer address missing peer ID, skipping",
|
c.logger.Warn("Bootstrap address missing peer ID, skipping",
|
||||||
zap.String("addr", addr))
|
zap.String("addr", addr))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoid dialing ourselves: if the peer address resolves to our own peer ID, skip.
|
// Avoid dialing ourselves: if the bootstrap address resolves to our own peer ID, skip.
|
||||||
if c.host != nil && peerInfo.ID == c.host.ID() {
|
if c.host != nil && peerInfo.ID == c.host.ID() {
|
||||||
c.logger.Debug("Skipping peer address because it resolves to self",
|
c.logger.Debug("Skipping bootstrap address because it resolves to self",
|
||||||
zap.String("addr", addr),
|
zap.String("addr", addr),
|
||||||
zap.String("peer_id", peerInfo.ID.String()))
|
zap.String("peer_id", peerInfo.ID.String()))
|
||||||
return nil
|
return nil
|
||||||
@ -38,7 +38,7 @@ func (c *Client) connectToPeer(ctx context.Context, addr string) error {
|
|||||||
return fmt.Errorf("failed to connect to peer: %w", err)
|
return fmt.Errorf("failed to connect to peer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Debug("Connected to peer",
|
c.logger.Debug("Connected to bootstrap peer",
|
||||||
zap.String("peer_id", peerInfo.ID.String()),
|
zap.String("peer_id", peerInfo.ID.String()),
|
||||||
zap.String("addr", addr))
|
zap.String("addr", addr))
|
||||||
|
|
||||||
|
|||||||
@ -9,7 +9,7 @@ import (
|
|||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultBootstrapPeers returns the default peer multiaddrs.
|
// DefaultBootstrapPeers returns the library's default bootstrap peer multiaddrs.
|
||||||
// These can be overridden by environment variables or config.
|
// These can be overridden by environment variables or config.
|
||||||
func DefaultBootstrapPeers() []string {
|
func DefaultBootstrapPeers() []string {
|
||||||
// Check environment variable first
|
// Check environment variable first
|
||||||
@ -48,7 +48,7 @@ func DefaultDatabaseEndpoints() []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to derive from configured peers if available
|
// Try to derive from bootstrap peers if available
|
||||||
peers := DefaultBootstrapPeers()
|
peers := DefaultBootstrapPeers()
|
||||||
if len(peers) > 0 {
|
if len(peers) > 0 {
|
||||||
endpoints := make([]string, 0, len(peers))
|
endpoints := make([]string, 0, len(peers))
|
||||||
|
|||||||
@ -10,15 +10,15 @@ import (
|
|||||||
func TestDefaultBootstrapPeersNonEmpty(t *testing.T) {
|
func TestDefaultBootstrapPeersNonEmpty(t *testing.T) {
|
||||||
old := os.Getenv("DEBROS_BOOTSTRAP_PEERS")
|
old := os.Getenv("DEBROS_BOOTSTRAP_PEERS")
|
||||||
t.Cleanup(func() { os.Setenv("DEBROS_BOOTSTRAP_PEERS", old) })
|
t.Cleanup(func() { os.Setenv("DEBROS_BOOTSTRAP_PEERS", old) })
|
||||||
// Set a valid peer
|
// Set a valid bootstrap peer
|
||||||
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
|
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
|
||||||
_ = os.Setenv("DEBROS_BOOTSTRAP_PEERS", validPeer)
|
_ = os.Setenv("DEBROS_BOOTSTRAP_PEERS", validPeer)
|
||||||
peers := DefaultBootstrapPeers()
|
peers := DefaultBootstrapPeers()
|
||||||
if len(peers) == 0 {
|
if len(peers) == 0 {
|
||||||
t.Fatalf("expected non-empty default peers")
|
t.Fatalf("expected non-empty default bootstrap peers")
|
||||||
}
|
}
|
||||||
if peers[0] != validPeer {
|
if peers[0] != validPeer {
|
||||||
t.Fatalf("expected peer %s, got %s", validPeer, peers[0])
|
t.Fatalf("expected bootstrap peer %s, got %s", validPeer, peers[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -2,9 +2,7 @@ package client
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -162,31 +160,17 @@ func (d *DatabaseClientImpl) isWriteOperation(sql string) bool {
|
|||||||
func (d *DatabaseClientImpl) clearConnection() {
|
func (d *DatabaseClientImpl) clearConnection() {
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
defer d.mu.Unlock()
|
defer d.mu.Unlock()
|
||||||
if d.connection != nil {
|
|
||||||
d.connection.Close()
|
|
||||||
d.connection = nil
|
d.connection = nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// getRQLiteConnection returns a connection to RQLite, creating one if needed
|
// getRQLiteConnection returns a connection to RQLite, creating one if needed
|
||||||
func (d *DatabaseClientImpl) getRQLiteConnection() (*gorqlite.Connection, error) {
|
func (d *DatabaseClientImpl) getRQLiteConnection() (*gorqlite.Connection, error) {
|
||||||
d.mu.RLock()
|
|
||||||
conn := d.connection
|
|
||||||
d.mu.RUnlock()
|
|
||||||
|
|
||||||
if conn != nil {
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
newConn, err := d.connectToAvailableNode()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
d.mu.Lock()
|
d.mu.Lock()
|
||||||
d.connection = newConn
|
defer d.mu.Unlock()
|
||||||
d.mu.Unlock()
|
|
||||||
return newConn, nil
|
// Always try to get a fresh connection to handle leadership changes
|
||||||
|
// and node failures gracefully
|
||||||
|
return d.connectToAvailableNode()
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRQLiteNodes returns a list of RQLite node URLs with precedence:
|
// getRQLiteNodes returns a list of RQLite node URLs with precedence:
|
||||||
@ -243,6 +227,7 @@ func (d *DatabaseClientImpl) connectToAvailableNode() (*gorqlite.Connection, err
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.connection = conn
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -506,100 +491,15 @@ func (n *NetworkInfoImpl) GetStatus(ctx context.Context) (*NetworkStatus, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to get IPFS peer info (optional - don't fail if unavailable)
|
|
||||||
ipfsInfo := queryIPFSPeerInfo()
|
|
||||||
|
|
||||||
// Try to get IPFS Cluster peer info (optional - don't fail if unavailable)
|
|
||||||
ipfsClusterInfo := queryIPFSClusterPeerInfo()
|
|
||||||
|
|
||||||
return &NetworkStatus{
|
return &NetworkStatus{
|
||||||
NodeID: host.ID().String(),
|
NodeID: host.ID().String(),
|
||||||
PeerID: host.ID().String(),
|
|
||||||
Connected: true,
|
Connected: true,
|
||||||
PeerCount: len(connectedPeers),
|
PeerCount: len(connectedPeers),
|
||||||
DatabaseSize: dbSize,
|
DatabaseSize: dbSize,
|
||||||
Uptime: time.Since(n.client.startTime),
|
Uptime: time.Since(n.client.startTime),
|
||||||
IPFS: ipfsInfo,
|
|
||||||
IPFSCluster: ipfsClusterInfo,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// queryIPFSPeerInfo queries the local IPFS API for peer information
|
|
||||||
// Returns nil if IPFS is not running or unavailable
|
|
||||||
func queryIPFSPeerInfo() *IPFSPeerInfo {
|
|
||||||
// IPFS API typically runs on port 4501 in our setup
|
|
||||||
client := &http.Client{Timeout: 2 * time.Second}
|
|
||||||
resp, err := client.Post("http://localhost:4501/api/v0/id", "", nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil // IPFS not available
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var result struct {
|
|
||||||
ID string `json:"ID"`
|
|
||||||
Addresses []string `json:"Addresses"`
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter addresses to only include public/routable ones
|
|
||||||
var swarmAddrs []string
|
|
||||||
for _, addr := range result.Addresses {
|
|
||||||
// Skip loopback and private addresses for external discovery
|
|
||||||
if !strings.Contains(addr, "127.0.0.1") && !strings.Contains(addr, "/ip6/::1") {
|
|
||||||
swarmAddrs = append(swarmAddrs, addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &IPFSPeerInfo{
|
|
||||||
PeerID: result.ID,
|
|
||||||
SwarmAddresses: swarmAddrs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// queryIPFSClusterPeerInfo queries the local IPFS Cluster API for peer information
|
|
||||||
// Returns nil if IPFS Cluster is not running or unavailable
|
|
||||||
func queryIPFSClusterPeerInfo() *IPFSClusterPeerInfo {
|
|
||||||
// IPFS Cluster API typically runs on port 9094 in our setup
|
|
||||||
client := &http.Client{Timeout: 2 * time.Second}
|
|
||||||
resp, err := client.Get("http://localhost:9094/id")
|
|
||||||
if err != nil {
|
|
||||||
return nil // IPFS Cluster not available
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var result struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Addresses []string `json:"addresses"`
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter addresses to only include public/routable ones for cluster discovery
|
|
||||||
var clusterAddrs []string
|
|
||||||
for _, addr := range result.Addresses {
|
|
||||||
// Skip loopback addresses - only keep routable addresses
|
|
||||||
if !strings.Contains(addr, "127.0.0.1") && !strings.Contains(addr, "/ip6/::1") {
|
|
||||||
clusterAddrs = append(clusterAddrs, addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &IPFSClusterPeerInfo{
|
|
||||||
PeerID: result.ID,
|
|
||||||
Addresses: clusterAddrs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectToPeer connects to a specific peer
|
// ConnectToPeer connects to a specific peer
|
||||||
func (n *NetworkInfoImpl) ConnectToPeer(ctx context.Context, peerAddr string) error {
|
func (n *NetworkInfoImpl) ConnectToPeer(ctx context.Context, peerAddr string) error {
|
||||||
if !n.client.isConnected() {
|
if !n.client.isConnected() {
|
||||||
|
|||||||
@ -115,25 +115,10 @@ type PeerInfo struct {
|
|||||||
// NetworkStatus contains overall network status
|
// NetworkStatus contains overall network status
|
||||||
type NetworkStatus struct {
|
type NetworkStatus struct {
|
||||||
NodeID string `json:"node_id"`
|
NodeID string `json:"node_id"`
|
||||||
PeerID string `json:"peer_id"`
|
|
||||||
Connected bool `json:"connected"`
|
Connected bool `json:"connected"`
|
||||||
PeerCount int `json:"peer_count"`
|
PeerCount int `json:"peer_count"`
|
||||||
DatabaseSize int64 `json:"database_size"`
|
DatabaseSize int64 `json:"database_size"`
|
||||||
Uptime time.Duration `json:"uptime"`
|
Uptime time.Duration `json:"uptime"`
|
||||||
IPFS *IPFSPeerInfo `json:"ipfs,omitempty"`
|
|
||||||
IPFSCluster *IPFSClusterPeerInfo `json:"ipfs_cluster,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPFSPeerInfo contains IPFS peer information for discovery
|
|
||||||
type IPFSPeerInfo struct {
|
|
||||||
PeerID string `json:"peer_id"`
|
|
||||||
SwarmAddresses []string `json:"swarm_addresses"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPFSClusterPeerInfo contains IPFS Cluster peer information for cluster discovery
|
|
||||||
type IPFSClusterPeerInfo struct {
|
|
||||||
PeerID string `json:"peer_id"` // Cluster peer ID (different from IPFS peer ID)
|
|
||||||
Addresses []string `json:"addresses"` // Cluster multiaddresses (e.g., /ip4/x.x.x.x/tcp/9098)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HealthStatus contains health check information
|
// HealthStatus contains health check information
|
||||||
@ -173,7 +158,7 @@ type StorageStatus struct {
|
|||||||
type ClientConfig struct {
|
type ClientConfig struct {
|
||||||
AppName string `json:"app_name"`
|
AppName string `json:"app_name"`
|
||||||
DatabaseName string `json:"database_name"`
|
DatabaseName string `json:"database_name"`
|
||||||
BootstrapPeers []string `json:"peers"`
|
BootstrapPeers []string `json:"bootstrap_peers"`
|
||||||
DatabaseEndpoints []string `json:"database_endpoints"`
|
DatabaseEndpoints []string `json:"database_endpoints"`
|
||||||
GatewayURL string `json:"gateway_url"` // Gateway URL for HTTP API access (e.g., "http://localhost:6001")
|
GatewayURL string `json:"gateway_url"` // Gateway URL for HTTP API access (e.g., "http://localhost:6001")
|
||||||
ConnectTimeout time.Duration `json:"connect_timeout"`
|
ConnectTimeout time.Duration `json:"connect_timeout"`
|
||||||
|
|||||||
@ -13,16 +13,15 @@ type Config struct {
|
|||||||
Discovery DiscoveryConfig `yaml:"discovery"`
|
Discovery DiscoveryConfig `yaml:"discovery"`
|
||||||
Security SecurityConfig `yaml:"security"`
|
Security SecurityConfig `yaml:"security"`
|
||||||
Logging LoggingConfig `yaml:"logging"`
|
Logging LoggingConfig `yaml:"logging"`
|
||||||
HTTPGateway HTTPGatewayConfig `yaml:"http_gateway"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeConfig contains node-specific configuration
|
// NodeConfig contains node-specific configuration
|
||||||
type NodeConfig struct {
|
type NodeConfig struct {
|
||||||
ID string `yaml:"id"` // Auto-generated if empty
|
ID string `yaml:"id"` // Auto-generated if empty
|
||||||
|
Type string `yaml:"type"` // "bootstrap" or "node"
|
||||||
ListenAddresses []string `yaml:"listen_addresses"` // LibP2P listen addresses
|
ListenAddresses []string `yaml:"listen_addresses"` // LibP2P listen addresses
|
||||||
DataDir string `yaml:"data_dir"` // Data directory
|
DataDir string `yaml:"data_dir"` // Data directory
|
||||||
MaxConnections int `yaml:"max_connections"` // Maximum peer connections
|
MaxConnections int `yaml:"max_connections"` // Maximum peer connections
|
||||||
Domain string `yaml:"domain"` // Domain for this node (e.g., node-1.orama.network)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DatabaseConfig contains database-related configuration
|
// DatabaseConfig contains database-related configuration
|
||||||
@ -38,13 +37,6 @@ type DatabaseConfig struct {
|
|||||||
RQLiteRaftPort int `yaml:"rqlite_raft_port"` // RQLite Raft consensus port
|
RQLiteRaftPort int `yaml:"rqlite_raft_port"` // RQLite Raft consensus port
|
||||||
RQLiteJoinAddress string `yaml:"rqlite_join_address"` // Address to join RQLite cluster
|
RQLiteJoinAddress string `yaml:"rqlite_join_address"` // Address to join RQLite cluster
|
||||||
|
|
||||||
// RQLite node-to-node TLS encryption (for inter-node Raft communication)
|
|
||||||
// See: https://rqlite.io/docs/guides/security/#encrypting-node-to-node-communication
|
|
||||||
NodeCert string `yaml:"node_cert"` // Path to X.509 certificate for node-to-node communication
|
|
||||||
NodeKey string `yaml:"node_key"` // Path to X.509 private key for node-to-node communication
|
|
||||||
NodeCACert string `yaml:"node_ca_cert"` // Path to CA certificate (optional, uses system CA if not set)
|
|
||||||
NodeNoVerify bool `yaml:"node_no_verify"` // Skip certificate verification (for testing/self-signed certs)
|
|
||||||
|
|
||||||
// Dynamic discovery configuration (always enabled)
|
// Dynamic discovery configuration (always enabled)
|
||||||
ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s
|
ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s
|
||||||
PeerInactivityLimit time.Duration `yaml:"peer_inactivity_limit"` // default: 24h
|
PeerInactivityLimit time.Duration `yaml:"peer_inactivity_limit"` // default: 24h
|
||||||
@ -83,9 +75,9 @@ type IPFSConfig struct {
|
|||||||
|
|
||||||
// DiscoveryConfig contains peer discovery configuration
|
// DiscoveryConfig contains peer discovery configuration
|
||||||
type DiscoveryConfig struct {
|
type DiscoveryConfig struct {
|
||||||
BootstrapPeers []string `yaml:"bootstrap_peers"` // Peer addresses to connect to
|
BootstrapPeers []string `yaml:"bootstrap_peers"` // Bootstrap peer addresses
|
||||||
DiscoveryInterval time.Duration `yaml:"discovery_interval"` // Discovery announcement interval
|
DiscoveryInterval time.Duration `yaml:"discovery_interval"` // Discovery announcement interval
|
||||||
BootstrapPort int `yaml:"bootstrap_port"` // Default port for peer discovery
|
BootstrapPort int `yaml:"bootstrap_port"` // Default port for bootstrap nodes
|
||||||
HttpAdvAddress string `yaml:"http_adv_address"` // HTTP advertisement address
|
HttpAdvAddress string `yaml:"http_adv_address"` // HTTP advertisement address
|
||||||
RaftAdvAddress string `yaml:"raft_adv_address"` // Raft advertisement
|
RaftAdvAddress string `yaml:"raft_adv_address"` // Raft advertisement
|
||||||
NodeNamespace string `yaml:"node_namespace"` // Namespace for node identifiers
|
NodeNamespace string `yaml:"node_namespace"` // Namespace for node identifiers
|
||||||
@ -105,56 +97,6 @@ type LoggingConfig struct {
|
|||||||
OutputFile string `yaml:"output_file"` // Empty for stdout
|
OutputFile string `yaml:"output_file"` // Empty for stdout
|
||||||
}
|
}
|
||||||
|
|
||||||
// HTTPGatewayConfig contains HTTP reverse proxy gateway configuration
|
|
||||||
type HTTPGatewayConfig struct {
|
|
||||||
Enabled bool `yaml:"enabled"` // Enable HTTP gateway
|
|
||||||
ListenAddr string `yaml:"listen_addr"` // Address to listen on (e.g., ":8080")
|
|
||||||
NodeName string `yaml:"node_name"` // Node name for routing
|
|
||||||
Routes map[string]RouteConfig `yaml:"routes"` // Service routes
|
|
||||||
HTTPS HTTPSConfig `yaml:"https"` // HTTPS/TLS configuration
|
|
||||||
SNI SNIConfig `yaml:"sni"` // SNI-based TCP routing configuration
|
|
||||||
|
|
||||||
// Full gateway configuration (for API, auth, pubsub)
|
|
||||||
ClientNamespace string `yaml:"client_namespace"` // Namespace for network client
|
|
||||||
RQLiteDSN string `yaml:"rqlite_dsn"` // RQLite database DSN
|
|
||||||
OlricServers []string `yaml:"olric_servers"` // List of Olric server addresses
|
|
||||||
OlricTimeout time.Duration `yaml:"olric_timeout"` // Timeout for Olric operations
|
|
||||||
IPFSClusterAPIURL string `yaml:"ipfs_cluster_api_url"` // IPFS Cluster API URL
|
|
||||||
IPFSAPIURL string `yaml:"ipfs_api_url"` // IPFS API URL
|
|
||||||
IPFSTimeout time.Duration `yaml:"ipfs_timeout"` // Timeout for IPFS operations
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPSConfig contains HTTPS/TLS configuration for the gateway
|
|
||||||
type HTTPSConfig struct {
|
|
||||||
Enabled bool `yaml:"enabled"` // Enable HTTPS (port 443)
|
|
||||||
Domain string `yaml:"domain"` // Primary domain (e.g., node-123.orama.network)
|
|
||||||
AutoCert bool `yaml:"auto_cert"` // Use Let's Encrypt for automatic certificate
|
|
||||||
UseSelfSigned bool `yaml:"use_self_signed"` // Use self-signed certificates (pre-generated)
|
|
||||||
CertFile string `yaml:"cert_file"` // Path to certificate file (if not using auto_cert)
|
|
||||||
KeyFile string `yaml:"key_file"` // Path to key file (if not using auto_cert)
|
|
||||||
CacheDir string `yaml:"cache_dir"` // Directory for Let's Encrypt certificate cache
|
|
||||||
HTTPPort int `yaml:"http_port"` // HTTP port for ACME challenge (default: 80)
|
|
||||||
HTTPSPort int `yaml:"https_port"` // HTTPS port (default: 443)
|
|
||||||
Email string `yaml:"email"` // Email for Let's Encrypt account
|
|
||||||
}
|
|
||||||
|
|
||||||
// SNIConfig contains SNI-based TCP routing configuration for port 7001
|
|
||||||
type SNIConfig struct {
|
|
||||||
Enabled bool `yaml:"enabled"` // Enable SNI-based TCP routing
|
|
||||||
ListenAddr string `yaml:"listen_addr"` // Address to listen on (e.g., ":7001")
|
|
||||||
Routes map[string]string `yaml:"routes"` // SNI hostname -> backend address mapping
|
|
||||||
CertFile string `yaml:"cert_file"` // Path to certificate file
|
|
||||||
KeyFile string `yaml:"key_file"` // Path to key file
|
|
||||||
}
|
|
||||||
|
|
||||||
// RouteConfig defines a single reverse proxy route
|
|
||||||
type RouteConfig struct {
|
|
||||||
PathPrefix string `yaml:"path_prefix"` // URL path prefix (e.g., "/rqlite/http")
|
|
||||||
BackendURL string `yaml:"backend_url"` // Backend service URL
|
|
||||||
Timeout time.Duration `yaml:"timeout"` // Request timeout
|
|
||||||
WebSocket bool `yaml:"websocket"` // Support WebSocket upgrades
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientConfig represents configuration for network clients
|
// ClientConfig represents configuration for network clients
|
||||||
type ClientConfig struct {
|
type ClientConfig struct {
|
||||||
AppName string `yaml:"app_name"`
|
AppName string `yaml:"app_name"`
|
||||||
@ -181,6 +123,7 @@ func (c *Config) ParseMultiaddrs() ([]multiaddr.Multiaddr, error) {
|
|||||||
func DefaultConfig() *Config {
|
func DefaultConfig() *Config {
|
||||||
return &Config{
|
return &Config{
|
||||||
Node: NodeConfig{
|
Node: NodeConfig{
|
||||||
|
Type: "node",
|
||||||
ListenAddresses: []string{
|
ListenAddresses: []string{
|
||||||
"/ip4/0.0.0.0/tcp/4001", // TCP only - compatible with Anyone proxy/SOCKS5
|
"/ip4/0.0.0.0/tcp/4001", // TCP only - compatible with Anyone proxy/SOCKS5
|
||||||
},
|
},
|
||||||
@ -197,7 +140,7 @@ func DefaultConfig() *Config {
|
|||||||
// RQLite-specific configuration
|
// RQLite-specific configuration
|
||||||
RQLitePort: 5001,
|
RQLitePort: 5001,
|
||||||
RQLiteRaftPort: 7001,
|
RQLiteRaftPort: 7001,
|
||||||
RQLiteJoinAddress: "", // Empty for first node (creates cluster)
|
RQLiteJoinAddress: "", // Empty for bootstrap node
|
||||||
|
|
||||||
// Dynamic discovery (always enabled)
|
// Dynamic discovery (always enabled)
|
||||||
ClusterSyncInterval: 30 * time.Second,
|
ClusterSyncInterval: 30 * time.Second,
|
||||||
@ -232,18 +175,5 @@ func DefaultConfig() *Config {
|
|||||||
Level: "info",
|
Level: "info",
|
||||||
Format: "console",
|
Format: "console",
|
||||||
},
|
},
|
||||||
HTTPGateway: HTTPGatewayConfig{
|
|
||||||
Enabled: true,
|
|
||||||
ListenAddr: ":8080",
|
|
||||||
NodeName: "default",
|
|
||||||
Routes: make(map[string]RouteConfig),
|
|
||||||
ClientNamespace: "default",
|
|
||||||
RQLiteDSN: "http://localhost:5001",
|
|
||||||
OlricServers: []string{"localhost:3320"},
|
|
||||||
OlricTimeout: 10 * time.Second,
|
|
||||||
IPFSClusterAPIURL: "http://localhost:9094",
|
|
||||||
IPFSAPIURL: "http://localhost:5001",
|
|
||||||
IPFSTimeout: 60 * time.Second,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,13 +6,13 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConfigDir returns the path to the DeBros config directory (~/.orama).
|
// ConfigDir returns the path to the DeBros config directory (~/.debros).
|
||||||
func ConfigDir() (string, error) {
|
func ConfigDir() (string, error) {
|
||||||
home, err := os.UserHomeDir()
|
home, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to determine home directory: %w", err)
|
return "", fmt.Errorf("failed to determine home directory: %w", err)
|
||||||
}
|
}
|
||||||
return filepath.Join(home, ".orama"), nil
|
return filepath.Join(home, ".debros"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnsureConfigDir creates the config directory if it does not exist.
|
// EnsureConfigDir creates the config directory if it does not exist.
|
||||||
@ -28,8 +28,8 @@ func EnsureConfigDir() (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DefaultPath returns the path to the config file for the given component name.
|
// DefaultPath returns the path to the config file for the given component name.
|
||||||
// component should be e.g., "node.yaml", "gateway.yaml"
|
// component should be e.g., "node.yaml", "bootstrap.yaml", "gateway.yaml"
|
||||||
// It checks ~/.orama/data/, ~/.orama/configs/, and ~/.orama/ for backward compatibility.
|
// It checks both ~/.debros/ and ~/.debros/configs/ for backward compatibility.
|
||||||
// If component is already an absolute path, it returns it as-is.
|
// If component is already an absolute path, it returns it as-is.
|
||||||
func DefaultPath(component string) (string, error) {
|
func DefaultPath(component string) (string, error) {
|
||||||
// If component is already an absolute path, return it directly
|
// If component is already an absolute path, return it directly
|
||||||
@ -42,35 +42,18 @@ func DefaultPath(component string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
var gatewayDefault string
|
// First check in ~/.debros/configs/ (production installer location)
|
||||||
// For gateway.yaml, check data/ directory first (production location)
|
|
||||||
if component == "gateway.yaml" {
|
|
||||||
dataPath := filepath.Join(dir, "data", component)
|
|
||||||
if _, err := os.Stat(dataPath); err == nil {
|
|
||||||
return dataPath, nil
|
|
||||||
}
|
|
||||||
// Remember the preferred default so we can still fall back to legacy paths
|
|
||||||
gatewayDefault = dataPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// First check in ~/.orama/configs/ (production installer location)
|
|
||||||
configsPath := filepath.Join(dir, "configs", component)
|
configsPath := filepath.Join(dir, "configs", component)
|
||||||
if _, err := os.Stat(configsPath); err == nil {
|
if _, err := os.Stat(configsPath); err == nil {
|
||||||
return configsPath, nil
|
return configsPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to ~/.orama/ (legacy/development location)
|
// Fallback to ~/.debros/ (legacy/development location)
|
||||||
legacyPath := filepath.Join(dir, component)
|
legacyPath := filepath.Join(dir, component)
|
||||||
if _, err := os.Stat(legacyPath); err == nil {
|
if _, err := os.Stat(legacyPath); err == nil {
|
||||||
return legacyPath, nil
|
return legacyPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if gatewayDefault != "" {
|
|
||||||
// If we preferred the data path (gateway.yaml) but didn't find it anywhere else,
|
|
||||||
// return the data path so error messages point to the production location.
|
|
||||||
return gatewayDefault, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return configs path as default (even if it doesn't exist yet)
|
// Return configs path as default (even if it doesn't exist yet)
|
||||||
// This allows the error message to show the expected production location
|
// This allows the error message to show the expected production location
|
||||||
return configsPath, nil
|
return configsPath, nil
|
||||||
|
|||||||
@ -15,7 +15,7 @@ import (
|
|||||||
|
|
||||||
// ValidationError represents a single validation error with context.
|
// ValidationError represents a single validation error with context.
|
||||||
type ValidationError struct {
|
type ValidationError struct {
|
||||||
Path string // e.g., "discovery.bootstrap_peers[0]" or "discovery.peers[0]"
|
Path string // e.g., "discovery.bootstrap_peers[0]"
|
||||||
Message string // e.g., "invalid multiaddr"
|
Message string // e.g., "invalid multiaddr"
|
||||||
Hint string // e.g., "expected /ip{4,6}/.../tcp/<port>/p2p/<peerID>"
|
Hint string // e.g., "expected /ip{4,6}/.../tcp/<port>/p2p/<peerID>"
|
||||||
}
|
}
|
||||||
@ -61,6 +61,14 @@ func (c *Config) validateNode() []error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate type
|
||||||
|
if nc.Type != "bootstrap" && nc.Type != "node" {
|
||||||
|
errs = append(errs, ValidationError{
|
||||||
|
Path: "node.type",
|
||||||
|
Message: fmt.Sprintf("must be one of [bootstrap node]; got %q", nc.Type),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Validate listen_addresses
|
// Validate listen_addresses
|
||||||
if len(nc.ListenAddresses) == 0 {
|
if len(nc.ListenAddresses) == 0 {
|
||||||
errs = append(errs, ValidationError{
|
errs = append(errs, ValidationError{
|
||||||
@ -210,8 +218,25 @@ func (c *Config) validateDatabase() []error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate rqlite_join_address format if provided (optional for all nodes)
|
// Validate rqlite_join_address context-dependently
|
||||||
// The first node in a cluster won't have a join address; subsequent nodes will
|
if c.Node.Type == "node" {
|
||||||
|
if dc.RQLiteJoinAddress == "" {
|
||||||
|
errs = append(errs, ValidationError{
|
||||||
|
Path: "database.rqlite_join_address",
|
||||||
|
Message: "required for node type (non-bootstrap)",
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
|
||||||
|
errs = append(errs, ValidationError{
|
||||||
|
Path: "database.rqlite_join_address",
|
||||||
|
Message: err.Error(),
|
||||||
|
Hint: "expected format: host:port",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if c.Node.Type == "bootstrap" {
|
||||||
|
// Bootstrap nodes can optionally join another bootstrap's RQLite cluster
|
||||||
|
// This allows secondary bootstraps to synchronize with the primary
|
||||||
if dc.RQLiteJoinAddress != "" {
|
if dc.RQLiteJoinAddress != "" {
|
||||||
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
|
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
|
||||||
errs = append(errs, ValidationError{
|
errs = append(errs, ValidationError{
|
||||||
@ -221,6 +246,7 @@ func (c *Config) validateDatabase() []error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Validate cluster_sync_interval
|
// Validate cluster_sync_interval
|
||||||
if dc.ClusterSyncInterval != 0 && dc.ClusterSyncInterval < 10*time.Second {
|
if dc.ClusterSyncInterval != 0 && dc.ClusterSyncInterval < 10*time.Second {
|
||||||
@ -271,7 +297,7 @@ func (c *Config) validateDiscovery() []error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate peer discovery port
|
// Validate bootstrap_port
|
||||||
if disc.BootstrapPort < 1 || disc.BootstrapPort > 65535 {
|
if disc.BootstrapPort < 1 || disc.BootstrapPort > 65535 {
|
||||||
errs = append(errs, ValidationError{
|
errs = append(errs, ValidationError{
|
||||||
Path: "discovery.bootstrap_port",
|
Path: "discovery.bootstrap_port",
|
||||||
@ -279,8 +305,17 @@ func (c *Config) validateDiscovery() []error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate peer addresses (optional - all nodes are unified peers now)
|
// Validate bootstrap_peers context-dependently
|
||||||
// Validate each peer multiaddr
|
if c.Node.Type == "node" {
|
||||||
|
if len(disc.BootstrapPeers) == 0 {
|
||||||
|
errs = append(errs, ValidationError{
|
||||||
|
Path: "discovery.bootstrap_peers",
|
||||||
|
Message: "required for node type (must not be empty)",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate each bootstrap peer multiaddr
|
||||||
seenPeers := make(map[string]bool)
|
seenPeers := make(map[string]bool)
|
||||||
for i, peer := range disc.BootstrapPeers {
|
for i, peer := range disc.BootstrapPeers {
|
||||||
path := fmt.Sprintf("discovery.bootstrap_peers[%d]", i)
|
path := fmt.Sprintf("discovery.bootstrap_peers[%d]", i)
|
||||||
@ -328,7 +363,7 @@ func (c *Config) validateDiscovery() []error {
|
|||||||
if seenPeers[peer] {
|
if seenPeers[peer] {
|
||||||
errs = append(errs, ValidationError{
|
errs = append(errs, ValidationError{
|
||||||
Path: path,
|
Path: path,
|
||||||
Message: "duplicate peer",
|
Message: "duplicate bootstrap peer",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
seenPeers[peer] = true
|
seenPeers[peer] = true
|
||||||
@ -451,6 +486,22 @@ func (c *Config) validateLogging() []error {
|
|||||||
|
|
||||||
func (c *Config) validateCrossFields() []error {
|
func (c *Config) validateCrossFields() []error {
|
||||||
var errs []error
|
var errs []error
|
||||||
|
|
||||||
|
// If node.type is invalid, don't run cross-checks
|
||||||
|
if c.Node.Type != "bootstrap" && c.Node.Type != "node" {
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cross-check rqlite_join_address vs node type
|
||||||
|
// Note: Bootstrap nodes can optionally join another bootstrap's cluster
|
||||||
|
|
||||||
|
if c.Node.Type == "node" && c.Database.RQLiteJoinAddress == "" {
|
||||||
|
errs = append(errs, ValidationError{
|
||||||
|
Path: "database.rqlite_join_address",
|
||||||
|
Message: "required for non-bootstrap node type",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -5,11 +5,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// validConfigForNode returns a valid config
|
// validConfigForType returns a valid config for the given node type
|
||||||
func validConfigForNode() *Config {
|
func validConfigForType(nodeType string) *Config {
|
||||||
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
|
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
Node: NodeConfig{
|
Node: NodeConfig{
|
||||||
|
Type: nodeType,
|
||||||
ID: "test-node-id",
|
ID: "test-node-id",
|
||||||
ListenAddresses: []string{"/ip4/0.0.0.0/tcp/4001"},
|
ListenAddresses: []string{"/ip4/0.0.0.0/tcp/4001"},
|
||||||
DataDir: ".",
|
DataDir: ".",
|
||||||
@ -24,7 +25,6 @@ func validConfigForNode() *Config {
|
|||||||
RQLitePort: 5001,
|
RQLitePort: 5001,
|
||||||
RQLiteRaftPort: 7001,
|
RQLiteRaftPort: 7001,
|
||||||
MinClusterSize: 1,
|
MinClusterSize: 1,
|
||||||
RQLiteJoinAddress: "", // Optional - first node creates cluster, others join
|
|
||||||
},
|
},
|
||||||
Discovery: DiscoveryConfig{
|
Discovery: DiscoveryConfig{
|
||||||
BootstrapPeers: []string{validPeer},
|
BootstrapPeers: []string{validPeer},
|
||||||
@ -40,9 +40,51 @@ func validConfigForNode() *Config {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set rqlite_join_address based on node type
|
||||||
|
if nodeType == "node" {
|
||||||
|
cfg.Database.RQLiteJoinAddress = "localhost:5001"
|
||||||
|
// Node type requires bootstrap peers
|
||||||
|
cfg.Discovery.BootstrapPeers = []string{validPeer}
|
||||||
|
} else {
|
||||||
|
// Bootstrap type: empty join address and peers optional
|
||||||
|
cfg.Database.RQLiteJoinAddress = ""
|
||||||
|
cfg.Discovery.BootstrapPeers = []string{}
|
||||||
|
}
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidateNodeType(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
nodeType string
|
||||||
|
shouldError bool
|
||||||
|
}{
|
||||||
|
{"bootstrap", "bootstrap", false},
|
||||||
|
{"node", "node", false},
|
||||||
|
{"invalid", "invalid-type", true},
|
||||||
|
{"empty", "", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
cfg := validConfigForType("bootstrap") // Start with valid bootstrap
|
||||||
|
if tt.nodeType == "node" {
|
||||||
|
cfg = validConfigForType("node")
|
||||||
|
} else {
|
||||||
|
cfg.Node.Type = tt.nodeType
|
||||||
|
}
|
||||||
|
errs := cfg.Validate()
|
||||||
|
if tt.shouldError && len(errs) == 0 {
|
||||||
|
t.Errorf("expected error, got none")
|
||||||
|
}
|
||||||
|
if !tt.shouldError && len(errs) > 0 {
|
||||||
|
t.Errorf("unexpected errors: %v", errs)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestValidateListenAddresses(t *testing.T) {
|
func TestValidateListenAddresses(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@ -60,7 +102,7 @@ func TestValidateListenAddresses(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType("node")
|
||||||
cfg.Node.ListenAddresses = tt.addresses
|
cfg.Node.ListenAddresses = tt.addresses
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
if tt.shouldError && len(errs) == 0 {
|
if tt.shouldError && len(errs) == 0 {
|
||||||
@ -88,7 +130,7 @@ func TestValidateReplicationFactor(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType("node")
|
||||||
cfg.Database.ReplicationFactor = tt.replication
|
cfg.Database.ReplicationFactor = tt.replication
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
if tt.shouldError && len(errs) == 0 {
|
if tt.shouldError && len(errs) == 0 {
|
||||||
@ -118,7 +160,7 @@ func TestValidateRQLitePorts(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType("node")
|
||||||
cfg.Database.RQLitePort = tt.httpPort
|
cfg.Database.RQLitePort = tt.httpPort
|
||||||
cfg.Database.RQLiteRaftPort = tt.raftPort
|
cfg.Database.RQLiteRaftPort = tt.raftPort
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
@ -135,18 +177,21 @@ func TestValidateRQLitePorts(t *testing.T) {
|
|||||||
func TestValidateRQLiteJoinAddress(t *testing.T) {
|
func TestValidateRQLiteJoinAddress(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
nodeType string
|
||||||
joinAddr string
|
joinAddr string
|
||||||
shouldError bool
|
shouldError bool
|
||||||
}{
|
}{
|
||||||
{"node with join", "localhost:5001", false},
|
{"node with join", "node", "localhost:5001", false},
|
||||||
{"node without join", "", false}, // Join address is optional (first node creates cluster)
|
{"node without join", "node", "", true},
|
||||||
{"invalid join format", "localhost", true},
|
{"bootstrap with join", "bootstrap", "localhost:5001", false},
|
||||||
{"invalid join port", "localhost:99999", true},
|
{"bootstrap without join", "bootstrap", "", false},
|
||||||
|
{"invalid join format", "node", "localhost", true},
|
||||||
|
{"invalid join port", "node", "localhost:99999", true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType(tt.nodeType)
|
||||||
cfg.Database.RQLiteJoinAddress = tt.joinAddr
|
cfg.Database.RQLiteJoinAddress = tt.joinAddr
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
if tt.shouldError && len(errs) == 0 {
|
if tt.shouldError && len(errs) == 0 {
|
||||||
@ -159,24 +204,27 @@ func TestValidateRQLiteJoinAddress(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidatePeerAddresses(t *testing.T) {
|
func TestValidateBootstrapPeers(t *testing.T) {
|
||||||
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
|
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
nodeType string
|
||||||
peers []string
|
peers []string
|
||||||
shouldError bool
|
shouldError bool
|
||||||
}{
|
}{
|
||||||
{"node with peer", []string{validPeer}, false},
|
{"node with peer", "node", []string{validPeer}, false},
|
||||||
{"node without peer", []string{}, false}, // All nodes are unified peers - bootstrap peers optional
|
{"node without peer", "node", []string{}, true},
|
||||||
{"invalid multiaddr", []string{"invalid"}, true},
|
{"bootstrap with peer", "bootstrap", []string{validPeer}, false},
|
||||||
{"missing p2p", []string{"/ip4/127.0.0.1/tcp/4001"}, true},
|
{"bootstrap without peer", "bootstrap", []string{}, false},
|
||||||
{"duplicate peer", []string{validPeer, validPeer}, true},
|
{"invalid multiaddr", "node", []string{"invalid"}, true},
|
||||||
{"invalid port", []string{"/ip4/127.0.0.1/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true},
|
{"missing p2p", "node", []string{"/ip4/127.0.0.1/tcp/4001"}, true},
|
||||||
|
{"duplicate peer", "node", []string{validPeer, validPeer}, true},
|
||||||
|
{"invalid port", "node", []string{"/ip4/127.0.0.1/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType(tt.nodeType)
|
||||||
cfg.Discovery.BootstrapPeers = tt.peers
|
cfg.Discovery.BootstrapPeers = tt.peers
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
if tt.shouldError && len(errs) == 0 {
|
if tt.shouldError && len(errs) == 0 {
|
||||||
@ -205,7 +253,7 @@ func TestValidateLoggingLevel(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType("node")
|
||||||
cfg.Logging.Level = tt.level
|
cfg.Logging.Level = tt.level
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
if tt.shouldError && len(errs) == 0 {
|
if tt.shouldError && len(errs) == 0 {
|
||||||
@ -232,7 +280,7 @@ func TestValidateLoggingFormat(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType("node")
|
||||||
cfg.Logging.Format = tt.format
|
cfg.Logging.Format = tt.format
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
if tt.shouldError && len(errs) == 0 {
|
if tt.shouldError && len(errs) == 0 {
|
||||||
@ -259,7 +307,7 @@ func TestValidateMaxConnections(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType("node")
|
||||||
cfg.Node.MaxConnections = tt.maxConn
|
cfg.Node.MaxConnections = tt.maxConn
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
if tt.shouldError && len(errs) == 0 {
|
if tt.shouldError && len(errs) == 0 {
|
||||||
@ -286,7 +334,7 @@ func TestValidateDiscoveryInterval(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType("node")
|
||||||
cfg.Discovery.DiscoveryInterval = tt.interval
|
cfg.Discovery.DiscoveryInterval = tt.interval
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
if tt.shouldError && len(errs) == 0 {
|
if tt.shouldError && len(errs) == 0 {
|
||||||
@ -299,7 +347,7 @@ func TestValidateDiscoveryInterval(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidatePeerDiscoveryPort(t *testing.T) {
|
func TestValidateBootstrapPort(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
port int
|
port int
|
||||||
@ -313,7 +361,7 @@ func TestValidatePeerDiscoveryPort(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
cfg := validConfigForNode()
|
cfg := validConfigForType("node")
|
||||||
cfg.Discovery.BootstrapPort = tt.port
|
cfg.Discovery.BootstrapPort = tt.port
|
||||||
errs := cfg.Validate()
|
errs := cfg.Validate()
|
||||||
if tt.shouldError && len(errs) == 0 {
|
if tt.shouldError && len(errs) == 0 {
|
||||||
@ -330,6 +378,7 @@ func TestValidateCompleteConfig(t *testing.T) {
|
|||||||
// Test a complete valid config
|
// Test a complete valid config
|
||||||
validCfg := &Config{
|
validCfg := &Config{
|
||||||
Node: NodeConfig{
|
Node: NodeConfig{
|
||||||
|
Type: "node",
|
||||||
ID: "node1",
|
ID: "node1",
|
||||||
ListenAddresses: []string{"/ip4/0.0.0.0/tcp/4002"},
|
ListenAddresses: []string{"/ip4/0.0.0.0/tcp/4002"},
|
||||||
DataDir: ".",
|
DataDir: ".",
|
||||||
|
|||||||
@ -6,7 +6,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
@ -116,34 +115,35 @@ func (d *Manager) handlePeerExchangeStream(s network.Stream) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter addresses to only include port 4001 (standard libp2p port)
|
// Include all addresses with valid TCP ports
|
||||||
// This prevents including non-libp2p service ports (like RQLite ports) in peer exchange
|
// This allows test clients and dynamic allocations to participate in peer discovery
|
||||||
const libp2pPort = 4001
|
|
||||||
filteredAddrs := make([]multiaddr.Multiaddr, 0)
|
filteredAddrs := make([]multiaddr.Multiaddr, 0)
|
||||||
filteredCount := 0
|
|
||||||
for _, addr := range addrs {
|
for _, addr := range addrs {
|
||||||
// Extract TCP port from multiaddr
|
// Extract TCP port from multiaddr
|
||||||
port, err := addr.ValueForProtocol(multiaddr.P_TCP)
|
port, err := addr.ValueForProtocol(multiaddr.P_TCP)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
portNum, err := strconv.Atoi(port)
|
portNum, err := strconv.Atoi(port)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Only include addresses with port 4001
|
// Accept all valid TCP ports > 0, including ephemeral ports
|
||||||
if portNum == libp2pPort {
|
// Test clients and dynamic allocations may use high ports (> 32768)
|
||||||
|
if portNum > 0 {
|
||||||
filteredAddrs = append(filteredAddrs, addr)
|
filteredAddrs = append(filteredAddrs, addr)
|
||||||
} else {
|
|
||||||
filteredCount++
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Skip addresses with unparseable ports
|
|
||||||
} else {
|
} else {
|
||||||
// Skip non-TCP addresses (libp2p uses TCP)
|
// If we can't parse port, include it anyway (might be non-TCP)
|
||||||
filteredCount++
|
filteredAddrs = append(filteredAddrs, addr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If no TCP port found, include it anyway (might be non-TCP)
|
||||||
|
filteredAddrs = append(filteredAddrs, addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no addresses remain after filtering, skip this peer
|
// If no addresses remain after filtering, skip this peer
|
||||||
// (Filtering is routine - no need to log every occurrence)
|
|
||||||
if len(filteredAddrs) == 0 {
|
if len(filteredAddrs) == 0 {
|
||||||
|
d.logger.Debug("No valid addresses after filtering",
|
||||||
|
zap.String("peer_id", pid.String()[:8]+"..."),
|
||||||
|
zap.Int("original_count", len(addrs)))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,7 +177,9 @@ func (d *Manager) handlePeerExchangeStream(s network.Stream) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Response sent - routine operation, no need to log
|
d.logger.Debug("Sent peer exchange response",
|
||||||
|
zap.Int("peer_count", len(resp.Peers)),
|
||||||
|
zap.Bool("has_rqlite_metadata", resp.RQLiteMetadata != nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start begins periodic peer discovery
|
// Start begins periodic peer discovery
|
||||||
@ -214,12 +216,15 @@ func (d *Manager) Stop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// discoverPeers discovers and connects to new peers using non-DHT strategies:
|
// discoverPeers discovers and connects to new peers using non-DHT strategies:
|
||||||
// - Peerstore entries (peers added to peerstore by the caller)
|
// - Peerstore entries (bootstrap peers added to peerstore by the caller)
|
||||||
// - Peer exchange: query currently connected peers' peerstore entries
|
// - Peer exchange: query currently connected peers' peerstore entries
|
||||||
func (d *Manager) discoverPeers(ctx context.Context, config Config) {
|
func (d *Manager) discoverPeers(ctx context.Context, config Config) {
|
||||||
connectedPeers := d.host.Network().Peers()
|
connectedPeers := d.host.Network().Peers()
|
||||||
initialCount := len(connectedPeers)
|
initialCount := len(connectedPeers)
|
||||||
|
|
||||||
|
d.logger.Debug("Starting peer discovery",
|
||||||
|
zap.Int("current_peers", initialCount))
|
||||||
|
|
||||||
newConnections := 0
|
newConnections := 0
|
||||||
|
|
||||||
// Strategy 1: Try to connect to peers learned from the host's peerstore
|
// Strategy 1: Try to connect to peers learned from the host's peerstore
|
||||||
@ -232,17 +237,16 @@ func (d *Manager) discoverPeers(ctx context.Context, config Config) {
|
|||||||
|
|
||||||
finalPeerCount := len(d.host.Network().Peers())
|
finalPeerCount := len(d.host.Network().Peers())
|
||||||
|
|
||||||
// Summary log: only log if there were changes or new connections
|
|
||||||
if newConnections > 0 || finalPeerCount != initialCount {
|
if newConnections > 0 || finalPeerCount != initialCount {
|
||||||
d.logger.Debug("Discovery summary",
|
d.logger.Debug("Peer discovery completed",
|
||||||
zap.Int("connected", finalPeerCount),
|
zap.Int("new_connections", newConnections),
|
||||||
zap.Int("new", newConnections),
|
zap.Int("initial_peers", initialCount),
|
||||||
zap.Int("was", initialCount))
|
zap.Int("final_peers", finalPeerCount))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// discoverViaPeerstore attempts to connect to peers found in the host's peerstore.
|
// discoverViaPeerstore attempts to connect to peers found in the host's peerstore.
|
||||||
// This is useful for peers that have been pre-populated into the peerstore.
|
// This is useful for bootstrap peers that have been pre-populated into the peerstore.
|
||||||
func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int) int {
|
func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int) int {
|
||||||
if maxConnections <= 0 {
|
if maxConnections <= 0 {
|
||||||
return 0
|
return 0
|
||||||
@ -252,10 +256,7 @@ func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int)
|
|||||||
|
|
||||||
// Iterate over peerstore known peers
|
// Iterate over peerstore known peers
|
||||||
peers := d.host.Peerstore().Peers()
|
peers := d.host.Peerstore().Peers()
|
||||||
|
d.logger.Debug("Peerstore contains peers", zap.Int("count", len(peers)))
|
||||||
// Only connect to peers on our standard LibP2P port to avoid cross-connecting
|
|
||||||
// with IPFS/IPFS Cluster instances that use different ports
|
|
||||||
const libp2pPort = 4001
|
|
||||||
|
|
||||||
for _, pid := range peers {
|
for _, pid := range peers {
|
||||||
if connected >= maxConnections {
|
if connected >= maxConnections {
|
||||||
@ -270,24 +271,6 @@ func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int)
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter peers to only include those with addresses on our port (4001)
|
|
||||||
// This prevents attempting to connect to IPFS (port 4101) or IPFS Cluster (port 9096/9098)
|
|
||||||
peerInfo := d.host.Peerstore().PeerInfo(pid)
|
|
||||||
hasValidPort := false
|
|
||||||
for _, addr := range peerInfo.Addrs {
|
|
||||||
if port, err := addr.ValueForProtocol(multiaddr.P_TCP); err == nil {
|
|
||||||
if portNum, err := strconv.Atoi(port); err == nil && portNum == libp2pPort {
|
|
||||||
hasValidPort = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip peers without valid port 4001 addresses
|
|
||||||
if !hasValidPort {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to connect
|
// Try to connect
|
||||||
if err := d.connectToPeer(ctx, pid); err == nil {
|
if err := d.connectToPeer(ctx, pid); err == nil {
|
||||||
connected++
|
connected++
|
||||||
@ -310,8 +293,8 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
exchangedPeers := 0
|
d.logger.Debug("Starting peer exchange with connected peers",
|
||||||
metadataCollected := 0
|
zap.Int("num_peers", len(connectedPeers)))
|
||||||
|
|
||||||
for _, peerID := range connectedPeers {
|
for _, peerID := range connectedPeers {
|
||||||
if connected >= maxConnections {
|
if connected >= maxConnections {
|
||||||
@ -324,13 +307,9 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
exchangedPeers++
|
d.logger.Debug("Received peer list from peer",
|
||||||
// Check if we got RQLite metadata
|
zap.String("from_peer", peerID.String()[:8]+"..."),
|
||||||
if val, err := d.host.Peerstore().Get(peerID, "rqlite_metadata"); err == nil {
|
zap.Int("peer_count", len(peers)))
|
||||||
if _, ok := val.([]byte); ok {
|
|
||||||
metadataCollected++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to connect to discovered peers
|
// Try to connect to discovered peers
|
||||||
for _, peerInfo := range peers {
|
for _, peerInfo := range peers {
|
||||||
@ -355,8 +334,7 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse and filter addresses to only include port 4001 (standard libp2p port)
|
// Parse addresses
|
||||||
const libp2pPort = 4001
|
|
||||||
addrs := make([]multiaddr.Multiaddr, 0, len(peerInfo.Addrs))
|
addrs := make([]multiaddr.Multiaddr, 0, len(peerInfo.Addrs))
|
||||||
for _, addrStr := range peerInfo.Addrs {
|
for _, addrStr := range peerInfo.Addrs {
|
||||||
ma, err := multiaddr.NewMultiaddr(addrStr)
|
ma, err := multiaddr.NewMultiaddr(addrStr)
|
||||||
@ -364,24 +342,14 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
|||||||
d.logger.Debug("Failed to parse multiaddr", zap.Error(err))
|
d.logger.Debug("Failed to parse multiaddr", zap.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Only include addresses with port 4001
|
|
||||||
port, err := ma.ValueForProtocol(multiaddr.P_TCP)
|
|
||||||
if err == nil {
|
|
||||||
portNum, err := strconv.Atoi(port)
|
|
||||||
if err == nil && portNum == libp2pPort {
|
|
||||||
addrs = append(addrs, ma)
|
addrs = append(addrs, ma)
|
||||||
}
|
}
|
||||||
// Skip addresses with wrong ports
|
|
||||||
}
|
|
||||||
// Skip non-TCP addresses
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(addrs) == 0 {
|
if len(addrs) == 0 {
|
||||||
// Skip peers without valid addresses - no need to log every occurrence
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add to peerstore (only valid addresses with port 4001)
|
// Add to peerstore
|
||||||
d.host.Peerstore().AddAddrs(parsedID, addrs, time.Hour*24)
|
d.host.Peerstore().AddAddrs(parsedID, addrs, time.Hour*24)
|
||||||
|
|
||||||
// Try to connect
|
// Try to connect
|
||||||
@ -390,29 +358,20 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
|||||||
|
|
||||||
if err := d.host.Connect(connectCtx, peerAddrInfo); err != nil {
|
if err := d.host.Connect(connectCtx, peerAddrInfo); err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
// Only log connection failures for debugging - errors are still useful
|
d.logger.Debug("Failed to connect to discovered peer",
|
||||||
d.logger.Debug("Connect failed",
|
zap.String("peer_id", parsedID.String()[:8]+"..."),
|
||||||
zap.String("peer", parsedID.String()[:8]+"..."),
|
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
d.logger.Info("Connected",
|
d.logger.Info("Successfully connected to discovered peer",
|
||||||
zap.String("peer", parsedID.String()[:8]+"..."),
|
zap.String("peer_id", parsedID.String()[:8]+"..."),
|
||||||
zap.String("from", peerID.String()[:8]+"..."))
|
zap.String("discovered_from", peerID.String()[:8]+"..."))
|
||||||
connected++
|
connected++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Summary log for peer exchange
|
|
||||||
if exchangedPeers > 0 {
|
|
||||||
d.logger.Debug("Exchange summary",
|
|
||||||
zap.Int("exchanged_with", exchangedPeers),
|
|
||||||
zap.Int("metadata_collected", metadataCollected),
|
|
||||||
zap.Int("new_connections", connected))
|
|
||||||
}
|
|
||||||
|
|
||||||
return connected
|
return connected
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -421,20 +380,11 @@ func (d *Manager) requestPeersFromPeer(ctx context.Context, peerID peer.ID, limi
|
|||||||
// Open a stream to the peer
|
// Open a stream to the peer
|
||||||
stream, err := d.host.NewStream(ctx, peerID, PeerExchangeProtocol)
|
stream, err := d.host.NewStream(ctx, peerID, PeerExchangeProtocol)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Check if this is a "protocols not supported" error (expected for lightweight clients like gateway)
|
// Suppress repeated warnings for the same peer (log once per minute max)
|
||||||
if strings.Contains(err.Error(), "protocols not supported") {
|
|
||||||
// This is a lightweight client (gateway, etc.) that doesn't support peer exchange - expected behavior
|
|
||||||
// Track it to avoid repeated attempts, but don't log as it's not an error
|
|
||||||
d.failedPeerExchanges[peerID] = time.Now()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// For actual connection errors, log but suppress repeated warnings for the same peer
|
|
||||||
lastFailure, seen := d.failedPeerExchanges[peerID]
|
lastFailure, seen := d.failedPeerExchanges[peerID]
|
||||||
if !seen || time.Since(lastFailure) > time.Minute {
|
if !seen || time.Since(lastFailure) > time.Minute {
|
||||||
d.logger.Debug("Failed to open peer exchange stream with node",
|
d.logger.Debug("Failed to open peer exchange stream",
|
||||||
zap.String("peer_id", peerID.String()[:8]+"..."),
|
zap.String("peer_id", peerID.String()[:8]+"..."),
|
||||||
zap.String("reason", "peer does not support peer exchange protocol or connection failed"),
|
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
d.failedPeerExchanges[peerID] = time.Now()
|
d.failedPeerExchanges[peerID] = time.Now()
|
||||||
}
|
}
|
||||||
@ -474,10 +424,9 @@ func (d *Manager) requestPeersFromPeer(ctx context.Context, peerID peer.ID, limi
|
|||||||
metadataJSON, err := json.Marshal(resp.RQLiteMetadata)
|
metadataJSON, err := json.Marshal(resp.RQLiteMetadata)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
_ = d.host.Peerstore().Put(peerID, "rqlite_metadata", metadataJSON)
|
_ = d.host.Peerstore().Put(peerID, "rqlite_metadata", metadataJSON)
|
||||||
// Only log when new metadata is stored (useful for debugging)
|
d.logger.Debug("Stored RQLite metadata from peer",
|
||||||
d.logger.Debug("Metadata stored",
|
zap.String("peer_id", peerID.String()[:8]+"..."),
|
||||||
zap.String("peer", peerID.String()[:8]+"..."),
|
zap.String("node_id", resp.RQLiteMetadata.NodeID))
|
||||||
zap.String("node", resp.RQLiteMetadata.NodeID))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -493,6 +442,9 @@ func (d *Manager) TriggerPeerExchange(ctx context.Context) int {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.logger.Info("Manually triggering peer exchange",
|
||||||
|
zap.Int("connected_peers", len(connectedPeers)))
|
||||||
|
|
||||||
metadataCollected := 0
|
metadataCollected := 0
|
||||||
for _, peerID := range connectedPeers {
|
for _, peerID := range connectedPeers {
|
||||||
// Request peer list from this peer (which includes their RQLite metadata)
|
// Request peer list from this peer (which includes their RQLite metadata)
|
||||||
@ -506,9 +458,9 @@ func (d *Manager) TriggerPeerExchange(ctx context.Context) int {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d.logger.Info("Exchange completed",
|
d.logger.Info("Peer exchange completed",
|
||||||
zap.Int("peers", len(connectedPeers)),
|
zap.Int("peers_with_metadata", metadataCollected),
|
||||||
zap.Int("with_metadata", metadataCollected))
|
zap.Int("total_peers", len(connectedPeers)))
|
||||||
|
|
||||||
return metadataCollected
|
return metadataCollected
|
||||||
}
|
}
|
||||||
@ -528,7 +480,8 @@ func (d *Manager) connectToPeer(ctx context.Context, peerID peer.ID) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connection success logged at higher level - no need for duplicate DEBUG log
|
d.logger.Debug("Successfully connected to peer",
|
||||||
|
zap.String("peer_id", peerID.String()[:8]+"..."))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,7 +9,7 @@ type RQLiteNodeMetadata struct {
|
|||||||
NodeID string `json:"node_id"` // RQLite node ID (from config)
|
NodeID string `json:"node_id"` // RQLite node ID (from config)
|
||||||
RaftAddress string `json:"raft_address"` // Raft port address (e.g., "51.83.128.181:7001")
|
RaftAddress string `json:"raft_address"` // Raft port address (e.g., "51.83.128.181:7001")
|
||||||
HTTPAddress string `json:"http_address"` // HTTP API address (e.g., "51.83.128.181:5001")
|
HTTPAddress string `json:"http_address"` // HTTP API address (e.g., "51.83.128.181:5001")
|
||||||
NodeType string `json:"node_type"` // Node type identifier
|
NodeType string `json:"node_type"` // "bootstrap" or "node"
|
||||||
RaftLogIndex uint64 `json:"raft_log_index"` // Current Raft log index (for data comparison)
|
RaftLogIndex uint64 `json:"raft_log_index"` // Current Raft log index (for data comparison)
|
||||||
LastSeen time.Time `json:"last_seen"` // Updated on every announcement
|
LastSeen time.Time `json:"last_seen"` // Updated on every announcement
|
||||||
ClusterVersion string `json:"cluster_version"` // For compatibility checking
|
ClusterVersion string `json:"cluster_version"` // For compatibility checking
|
||||||
|
|||||||
@ -17,8 +17,7 @@ func TestPortChecker(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check that required port counts match expectations
|
// Check that required port counts match expectations
|
||||||
// 5 nodes × 9 ports per node + 4 shared ports = 49
|
expectedPortCount := 44 // Based on RequiredPorts
|
||||||
expectedPortCount := 49 // Based on RequiredPorts
|
|
||||||
if len(checker.ports) != expectedPortCount {
|
if len(checker.ports) != expectedPortCount {
|
||||||
t.Errorf("Expected %d ports, got %d", expectedPortCount, len(checker.ports))
|
t.Errorf("Expected %d ports, got %d", expectedPortCount, len(checker.ports))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -14,24 +14,24 @@ import (
|
|||||||
|
|
||||||
// ConfigEnsurer handles all config file creation and validation
|
// ConfigEnsurer handles all config file creation and validation
|
||||||
type ConfigEnsurer struct {
|
type ConfigEnsurer struct {
|
||||||
oramaDir string
|
debrosDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfigEnsurer creates a new config ensurer
|
// NewConfigEnsurer creates a new config ensurer
|
||||||
func NewConfigEnsurer(oramaDir string) *ConfigEnsurer {
|
func NewConfigEnsurer(debrosDir string) *ConfigEnsurer {
|
||||||
return &ConfigEnsurer{
|
return &ConfigEnsurer{
|
||||||
oramaDir: oramaDir,
|
debrosDir: debrosDir,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnsureAll ensures all necessary config files and secrets exist
|
// EnsureAll ensures all necessary config files and secrets exist
|
||||||
func (ce *ConfigEnsurer) EnsureAll() error {
|
func (ce *ConfigEnsurer) EnsureAll() error {
|
||||||
// Create directories
|
// Create directories
|
||||||
if err := os.MkdirAll(ce.oramaDir, 0755); err != nil {
|
if err := os.MkdirAll(ce.debrosDir, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create .orama directory: %w", err)
|
return fmt.Errorf("failed to create .debros directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(filepath.Join(ce.oramaDir, "logs"), 0755); err != nil {
|
if err := os.MkdirAll(filepath.Join(ce.debrosDir, "logs"), 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,27 +43,27 @@ func (ce *ConfigEnsurer) EnsureAll() error {
|
|||||||
// Load topology
|
// Load topology
|
||||||
topology := DefaultTopology()
|
topology := DefaultTopology()
|
||||||
|
|
||||||
// Generate identities for first two nodes and collect their multiaddrs as peer addresses
|
// Generate identities for all bootstrap nodes and collect multiaddrs
|
||||||
// All nodes use these addresses for initial peer discovery
|
bootstrapAddrs := []string{}
|
||||||
peerAddrs := []string{}
|
for _, nodeSpec := range topology.GetBootstrapNodes() {
|
||||||
for i := 0; i < 2 && i < len(topology.Nodes); i++ {
|
|
||||||
nodeSpec := topology.Nodes[i]
|
|
||||||
addr, err := ce.ensureNodeIdentity(nodeSpec)
|
addr, err := ce.ensureNodeIdentity(nodeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to ensure identity for %s: %w", nodeSpec.Name, err)
|
return fmt.Errorf("failed to ensure identity for %s: %w", nodeSpec.Name, err)
|
||||||
}
|
}
|
||||||
peerAddrs = append(peerAddrs, addr)
|
bootstrapAddrs = append(bootstrapAddrs, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure configs for all nodes
|
// Ensure configs for all bootstrap and regular nodes
|
||||||
for _, nodeSpec := range topology.Nodes {
|
for _, nodeSpec := range topology.Nodes {
|
||||||
if err := ce.ensureNodeConfig(nodeSpec, peerAddrs); err != nil {
|
if err := ce.ensureNodeConfig(nodeSpec, bootstrapAddrs); err != nil {
|
||||||
return fmt.Errorf("failed to ensure config for %s: %w", nodeSpec.Name, err)
|
return fmt.Errorf("failed to ensure config for %s: %w", nodeSpec.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gateway configuration is now embedded in each node's config
|
// Ensure gateway config
|
||||||
// No separate gateway.yaml needed anymore
|
if err := ce.ensureGateway(bootstrapAddrs); err != nil {
|
||||||
|
return fmt.Errorf("failed to ensure gateway: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure Olric config
|
// Ensure Olric config
|
||||||
if err := ce.ensureOlric(); err != nil {
|
if err := ce.ensureOlric(); err != nil {
|
||||||
@ -75,7 +75,7 @@ func (ce *ConfigEnsurer) EnsureAll() error {
|
|||||||
|
|
||||||
// ensureSharedSecrets creates cluster secret and swarm key if they don't exist
|
// ensureSharedSecrets creates cluster secret and swarm key if they don't exist
|
||||||
func (ce *ConfigEnsurer) ensureSharedSecrets() error {
|
func (ce *ConfigEnsurer) ensureSharedSecrets() error {
|
||||||
secretPath := filepath.Join(ce.oramaDir, "cluster-secret")
|
secretPath := filepath.Join(ce.debrosDir, "cluster-secret")
|
||||||
if _, err := os.Stat(secretPath); os.IsNotExist(err) {
|
if _, err := os.Stat(secretPath); os.IsNotExist(err) {
|
||||||
secret := generateRandomHex(64) // 64 hex chars = 32 bytes
|
secret := generateRandomHex(64) // 64 hex chars = 32 bytes
|
||||||
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
|
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
|
||||||
@ -84,7 +84,7 @@ func (ce *ConfigEnsurer) ensureSharedSecrets() error {
|
|||||||
fmt.Printf("✓ Generated cluster secret\n")
|
fmt.Printf("✓ Generated cluster secret\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
swarmKeyPath := filepath.Join(ce.oramaDir, "swarm.key")
|
swarmKeyPath := filepath.Join(ce.debrosDir, "swarm.key")
|
||||||
if _, err := os.Stat(swarmKeyPath); os.IsNotExist(err) {
|
if _, err := os.Stat(swarmKeyPath); os.IsNotExist(err) {
|
||||||
keyHex := strings.ToUpper(generateRandomHex(64))
|
keyHex := strings.ToUpper(generateRandomHex(64))
|
||||||
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
|
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
|
||||||
@ -99,7 +99,7 @@ func (ce *ConfigEnsurer) ensureSharedSecrets() error {
|
|||||||
|
|
||||||
// ensureNodeIdentity creates or loads a node identity and returns its multiaddr
|
// ensureNodeIdentity creates or loads a node identity and returns its multiaddr
|
||||||
func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
|
func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
|
||||||
nodeDir := filepath.Join(ce.oramaDir, nodeSpec.DataDir)
|
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
|
||||||
identityPath := filepath.Join(nodeDir, "identity.key")
|
identityPath := filepath.Join(nodeDir, "identity.key")
|
||||||
|
|
||||||
// Create identity if missing
|
// Create identity if missing
|
||||||
@ -133,29 +133,50 @@ func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ensureNodeConfig creates or updates a node configuration
|
// ensureNodeConfig creates or updates a node configuration
|
||||||
func (ce *ConfigEnsurer) ensureNodeConfig(nodeSpec NodeSpec, peerAddrs []string) error {
|
func (ce *ConfigEnsurer) ensureNodeConfig(nodeSpec NodeSpec, bootstrapAddrs []string) error {
|
||||||
nodeDir := filepath.Join(ce.oramaDir, nodeSpec.DataDir)
|
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
|
||||||
configPath := filepath.Join(ce.oramaDir, nodeSpec.ConfigFilename)
|
configPath := filepath.Join(ce.debrosDir, nodeSpec.ConfigFilename)
|
||||||
|
|
||||||
if err := os.MkdirAll(nodeDir, 0755); err != nil {
|
if err := os.MkdirAll(nodeDir, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create node directory: %w", err)
|
return fmt.Errorf("failed to create node directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate node config (all nodes are unified)
|
if nodeSpec.Role == "bootstrap" {
|
||||||
|
// Generate bootstrap config
|
||||||
|
data := templates.BootstrapConfigData{
|
||||||
|
NodeID: nodeSpec.Name,
|
||||||
|
P2PPort: nodeSpec.P2PPort,
|
||||||
|
DataDir: nodeDir,
|
||||||
|
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
|
||||||
|
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
|
||||||
|
ClusterAPIPort: nodeSpec.ClusterAPIPort,
|
||||||
|
IPFSAPIPort: nodeSpec.IPFSAPIPort,
|
||||||
|
BootstrapPeers: bootstrapAddrs,
|
||||||
|
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := templates.RenderBootstrapConfig(data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to render bootstrap config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write bootstrap config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
|
||||||
|
} else {
|
||||||
|
// Generate regular node config
|
||||||
data := templates.NodeConfigData{
|
data := templates.NodeConfigData{
|
||||||
NodeID: nodeSpec.Name,
|
NodeID: nodeSpec.Name,
|
||||||
P2PPort: nodeSpec.P2PPort,
|
P2PPort: nodeSpec.P2PPort,
|
||||||
DataDir: nodeDir,
|
DataDir: nodeDir,
|
||||||
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
|
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
|
||||||
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
|
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
|
||||||
RQLiteRaftInternalPort: nodeSpec.RQLiteRaftPort,
|
|
||||||
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
|
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
|
||||||
BootstrapPeers: peerAddrs,
|
BootstrapPeers: bootstrapAddrs,
|
||||||
ClusterAPIPort: nodeSpec.ClusterAPIPort,
|
ClusterAPIPort: nodeSpec.ClusterAPIPort,
|
||||||
IPFSAPIPort: nodeSpec.IPFSAPIPort,
|
IPFSAPIPort: nodeSpec.IPFSAPIPort,
|
||||||
HTTPAdvAddress: fmt.Sprintf("localhost:%d", nodeSpec.RQLiteHTTPPort),
|
|
||||||
RaftAdvAddress: fmt.Sprintf("localhost:%d", nodeSpec.RQLiteRaftPort),
|
|
||||||
UnifiedGatewayPort: nodeSpec.UnifiedGatewayPort,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
config, err := templates.RenderNodeConfig(data)
|
config, err := templates.RenderNodeConfig(data)
|
||||||
@ -168,24 +189,49 @@ func (ce *ConfigEnsurer) ensureNodeConfig(nodeSpec NodeSpec, peerAddrs []string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
|
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gateway configuration is now embedded in each node's config
|
// ensureGateway creates gateway config
|
||||||
// ensureGateway is no longer needed - each node runs its own embedded gateway
|
func (ce *ConfigEnsurer) ensureGateway(bootstrapAddrs []string) error {
|
||||||
|
configPath := filepath.Join(ce.debrosDir, "gateway.yaml")
|
||||||
|
|
||||||
|
// Get first bootstrap's cluster API port for default
|
||||||
|
topology := DefaultTopology()
|
||||||
|
firstBootstrap := topology.GetBootstrapNodes()[0]
|
||||||
|
|
||||||
|
data := templates.GatewayConfigData{
|
||||||
|
ListenPort: topology.GatewayPort,
|
||||||
|
BootstrapPeers: bootstrapAddrs,
|
||||||
|
OlricServers: []string{fmt.Sprintf("127.0.0.1:%d", topology.OlricHTTPPort)},
|
||||||
|
ClusterAPIPort: firstBootstrap.ClusterAPIPort,
|
||||||
|
IPFSAPIPort: firstBootstrap.IPFSAPIPort,
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := templates.RenderGatewayConfig(data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to render gateway config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write gateway config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("✓ Generated gateway.yaml\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ensureOlric creates Olric config
|
// ensureOlric creates Olric config
|
||||||
func (ce *ConfigEnsurer) ensureOlric() error {
|
func (ce *ConfigEnsurer) ensureOlric() error {
|
||||||
configPath := filepath.Join(ce.oramaDir, "olric-config.yaml")
|
configPath := filepath.Join(ce.debrosDir, "olric-config.yaml")
|
||||||
|
|
||||||
topology := DefaultTopology()
|
topology := DefaultTopology()
|
||||||
data := templates.OlricConfigData{
|
data := templates.OlricConfigData{
|
||||||
ServerBindAddr: "127.0.0.1",
|
BindAddr: "127.0.0.1",
|
||||||
HTTPPort: topology.OlricHTTPPort,
|
HTTPPort: topology.OlricHTTPPort,
|
||||||
MemberlistBindAddr: "127.0.0.1", // localhost for development
|
|
||||||
MemberlistPort: topology.OlricMemberPort,
|
MemberlistPort: topology.OlricMemberPort,
|
||||||
MemberlistEnvironment: "local", // development environment
|
|
||||||
}
|
}
|
||||||
|
|
||||||
config, err := templates.RenderOlricConfig(data)
|
config, err := templates.RenderOlricConfig(data)
|
||||||
|
|||||||
@ -9,8 +9,6 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// HealthCheckResult represents the result of a health check
|
// HealthCheckResult represents the result of a health check
|
||||||
@ -81,7 +79,7 @@ func (pm *ProcessManager) checkRQLiteNode(ctx context.Context, name string, http
|
|||||||
result := HealthCheckResult{Name: fmt.Sprintf("RQLite-%s", name)}
|
result := HealthCheckResult{Name: fmt.Sprintf("RQLite-%s", name)}
|
||||||
|
|
||||||
urlStr := fmt.Sprintf("http://localhost:%d/status", httpPort)
|
urlStr := fmt.Sprintf("http://localhost:%d/status", httpPort)
|
||||||
client := tlsutil.NewHTTPClient(2 * time.Second)
|
client := &http.Client{Timeout: 2 * time.Second}
|
||||||
resp, err := client.Get(urlStr)
|
resp, err := client.Get(urlStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result.Details = fmt.Sprintf("connection failed: %v", err)
|
result.Details = fmt.Sprintf("connection failed: %v", err)
|
||||||
@ -166,42 +164,43 @@ func (pm *ProcessManager) LibP2PHealthCheck(ctx context.Context) HealthCheckResu
|
|||||||
|
|
||||||
// HealthCheckWithRetry performs a health check with retry logic
|
// HealthCheckWithRetry performs a health check with retry logic
|
||||||
func (pm *ProcessManager) HealthCheckWithRetry(ctx context.Context, nodes []ipfsNodeInfo, retries int, retryInterval time.Duration, timeout time.Duration) bool {
|
func (pm *ProcessManager) HealthCheckWithRetry(ctx context.Context, nodes []ipfsNodeInfo, retries int, retryInterval time.Duration, timeout time.Duration) bool {
|
||||||
fmt.Fprintf(pm.logWriter, "⚕️ Validating cluster health...")
|
fmt.Fprintf(pm.logWriter, "\n⚕️ Validating cluster health...\n")
|
||||||
|
|
||||||
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
|
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
spinnerFrames := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
|
||||||
spinnerIndex := 0
|
|
||||||
|
|
||||||
for attempt := 1; attempt <= retries; attempt++ {
|
for attempt := 1; attempt <= retries; attempt++ {
|
||||||
// Perform all checks
|
// Perform all checks
|
||||||
ipfsResult := pm.IPFSHealthCheck(deadlineCtx, nodes)
|
ipfsResult := pm.IPFSHealthCheck(deadlineCtx, nodes)
|
||||||
rqliteResult := pm.RQLiteHealthCheck(deadlineCtx)
|
rqliteResult := pm.RQLiteHealthCheck(deadlineCtx)
|
||||||
libp2pResult := pm.LibP2PHealthCheck(deadlineCtx)
|
libp2pResult := pm.LibP2PHealthCheck(deadlineCtx)
|
||||||
|
|
||||||
// All checks must pass
|
// Log results
|
||||||
if ipfsResult.Healthy && rqliteResult.Healthy && libp2pResult.Healthy {
|
if attempt == 1 || attempt == retries || (attempt%3 == 0) {
|
||||||
fmt.Fprintf(pm.logWriter, "\r✓ Cluster health validated\n")
|
fmt.Fprintf(pm.logWriter, " Attempt %d/%d:\n", attempt, retries)
|
||||||
return true
|
pm.logHealthCheckResult(pm.logWriter, " ", ipfsResult)
|
||||||
|
pm.logHealthCheckResult(pm.logWriter, " ", rqliteResult)
|
||||||
|
pm.logHealthCheckResult(pm.logWriter, " ", libp2pResult)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show spinner progress
|
// All checks must pass
|
||||||
fmt.Fprintf(pm.logWriter, "\r%s Validating cluster health... (%d/%d)", spinnerFrames[spinnerIndex%len(spinnerFrames)], attempt, retries)
|
if ipfsResult.Healthy && rqliteResult.Healthy && libp2pResult.Healthy {
|
||||||
spinnerIndex++
|
fmt.Fprintf(pm.logWriter, "\n✓ All health checks passed!\n")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
if attempt < retries {
|
if attempt < retries {
|
||||||
select {
|
select {
|
||||||
case <-time.After(retryInterval):
|
case <-time.After(retryInterval):
|
||||||
continue
|
continue
|
||||||
case <-deadlineCtx.Done():
|
case <-deadlineCtx.Done():
|
||||||
fmt.Fprintf(pm.logWriter, "\r❌ Health check timeout reached\n")
|
fmt.Fprintf(pm.logWriter, "\n❌ Health check timeout reached\n")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "\r❌ Health checks failed - services not ready\n")
|
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed after %d attempts\n", retries)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -15,13 +15,11 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProcessManager manages all dev environment processes
|
// ProcessManager manages all dev environment processes
|
||||||
type ProcessManager struct {
|
type ProcessManager struct {
|
||||||
oramaDir string
|
debrosDir string
|
||||||
pidsDir string
|
pidsDir string
|
||||||
processes map[string]*ManagedProcess
|
processes map[string]*ManagedProcess
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
@ -37,12 +35,12 @@ type ManagedProcess struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewProcessManager creates a new process manager
|
// NewProcessManager creates a new process manager
|
||||||
func NewProcessManager(oramaDir string, logWriter io.Writer) *ProcessManager {
|
func NewProcessManager(debrosDir string, logWriter io.Writer) *ProcessManager {
|
||||||
pidsDir := filepath.Join(oramaDir, ".pids")
|
pidsDir := filepath.Join(debrosDir, ".pids")
|
||||||
os.MkdirAll(pidsDir, 0755)
|
os.MkdirAll(pidsDir, 0755)
|
||||||
|
|
||||||
return &ProcessManager{
|
return &ProcessManager{
|
||||||
oramaDir: oramaDir,
|
debrosDir: debrosDir,
|
||||||
pidsDir: pidsDir,
|
pidsDir: pidsDir,
|
||||||
processes: make(map[string]*ManagedProcess),
|
processes: make(map[string]*ManagedProcess),
|
||||||
logWriter: logWriter,
|
logWriter: logWriter,
|
||||||
@ -51,8 +49,7 @@ func NewProcessManager(oramaDir string, logWriter io.Writer) *ProcessManager {
|
|||||||
|
|
||||||
// StartAll starts all development services
|
// StartAll starts all development services
|
||||||
func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
||||||
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n")
|
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n\n")
|
||||||
fmt.Fprintf(pm.logWriter, "═══════════════════════════════════════\n\n")
|
|
||||||
|
|
||||||
topology := DefaultTopology()
|
topology := DefaultTopology()
|
||||||
|
|
||||||
@ -65,11 +62,12 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
|||||||
fn func(context.Context) error
|
fn func(context.Context) error
|
||||||
}{
|
}{
|
||||||
{"IPFS", pm.startIPFS},
|
{"IPFS", pm.startIPFS},
|
||||||
|
{"RQLite", pm.startRQLite},
|
||||||
{"IPFS Cluster", pm.startIPFSCluster},
|
{"IPFS Cluster", pm.startIPFSCluster},
|
||||||
{"Olric", pm.startOlric},
|
{"Olric", pm.startOlric},
|
||||||
{"Anon", pm.startAnon},
|
{"Anon", pm.startAnon},
|
||||||
{"Nodes (Network)", pm.startNodes},
|
{"Nodes (Network)", pm.startNodes},
|
||||||
// Gateway is now per-node (embedded in each node) - no separate main gateway needed
|
{"Gateway", pm.startGateway},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, svc := range services {
|
for _, svc := range services {
|
||||||
@ -79,8 +77,6 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "\n")
|
|
||||||
|
|
||||||
// Run health checks with retries before declaring success
|
// Run health checks with retries before declaring success
|
||||||
const (
|
const (
|
||||||
healthCheckRetries = 20
|
healthCheckRetries = 20
|
||||||
@ -89,48 +85,18 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
|||||||
)
|
)
|
||||||
|
|
||||||
if !pm.HealthCheckWithRetry(ctx, ipfsNodes, healthCheckRetries, healthCheckInterval, healthCheckTimeout) {
|
if !pm.HealthCheckWithRetry(ctx, ipfsNodes, healthCheckRetries, healthCheckInterval, healthCheckTimeout) {
|
||||||
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed - stopping all services\n")
|
fmt.Fprintf(pm.logWriter, "\n❌ Development environment failed health checks - stopping all services\n")
|
||||||
pm.StopAll(ctx)
|
pm.StopAll(ctx)
|
||||||
return fmt.Errorf("cluster health checks failed - services stopped")
|
return fmt.Errorf("cluster health checks failed - services stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print success and key endpoints
|
fmt.Fprintf(pm.logWriter, "\n✅ Development environment started!\n\n")
|
||||||
pm.printStartupSummary(topology)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// printStartupSummary prints the final startup summary with key endpoints
|
|
||||||
func (pm *ProcessManager) printStartupSummary(topology *Topology) {
|
|
||||||
fmt.Fprintf(pm.logWriter, "\n✅ Development environment ready!\n")
|
|
||||||
fmt.Fprintf(pm.logWriter, "═══════════════════════════════════════\n\n")
|
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "📡 Access your nodes via unified gateway ports:\n\n")
|
|
||||||
for _, node := range topology.Nodes {
|
|
||||||
fmt.Fprintf(pm.logWriter, " %s:\n", node.Name)
|
|
||||||
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/health\n", node.UnifiedGatewayPort)
|
|
||||||
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/rqlite/http/db/execute\n", node.UnifiedGatewayPort)
|
|
||||||
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/cluster/health\n\n", node.UnifiedGatewayPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "🌐 Main Gateway:\n")
|
|
||||||
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/v1/status\n\n", topology.GatewayPort)
|
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "📊 Other Services:\n")
|
|
||||||
fmt.Fprintf(pm.logWriter, " Olric: http://localhost:%d\n", topology.OlricHTTPPort)
|
|
||||||
fmt.Fprintf(pm.logWriter, " Anon SOCKS: 127.0.0.1:%d\n\n", topology.AnonSOCKSPort)
|
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "📝 Useful Commands:\n")
|
|
||||||
fmt.Fprintf(pm.logWriter, " ./bin/orama dev status - Check service status\n")
|
|
||||||
fmt.Fprintf(pm.logWriter, " ./bin/orama dev logs node-1 - View logs\n")
|
|
||||||
fmt.Fprintf(pm.logWriter, " ./bin/orama dev down - Stop all services\n\n")
|
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "📂 Logs: %s/logs\n", pm.oramaDir)
|
|
||||||
fmt.Fprintf(pm.logWriter, "⚙️ Config: %s\n\n", pm.oramaDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopAll stops all running processes
|
// StopAll stops all running processes
|
||||||
func (pm *ProcessManager) StopAll(ctx context.Context) error {
|
func (pm *ProcessManager) StopAll(ctx context.Context) error {
|
||||||
fmt.Fprintf(pm.logWriter, "\n🛑 Stopping development environment...\n\n")
|
fmt.Fprintf(pm.logWriter, "\n🛑 Stopping development environment...\n")
|
||||||
|
|
||||||
topology := DefaultTopology()
|
topology := DefaultTopology()
|
||||||
var services []string
|
var services []string
|
||||||
@ -145,28 +111,21 @@ func (pm *ProcessManager) StopAll(ctx context.Context) error {
|
|||||||
node := topology.Nodes[i]
|
node := topology.Nodes[i]
|
||||||
services = append(services, fmt.Sprintf("ipfs-cluster-%s", node.Name))
|
services = append(services, fmt.Sprintf("ipfs-cluster-%s", node.Name))
|
||||||
}
|
}
|
||||||
|
for i := len(topology.Nodes) - 1; i >= 0; i-- {
|
||||||
|
node := topology.Nodes[i]
|
||||||
|
services = append(services, fmt.Sprintf("rqlite-%s", node.Name))
|
||||||
|
}
|
||||||
for i := len(topology.Nodes) - 1; i >= 0; i-- {
|
for i := len(topology.Nodes) - 1; i >= 0; i-- {
|
||||||
node := topology.Nodes[i]
|
node := topology.Nodes[i]
|
||||||
services = append(services, fmt.Sprintf("ipfs-%s", node.Name))
|
services = append(services, fmt.Sprintf("ipfs-%s", node.Name))
|
||||||
}
|
}
|
||||||
services = append(services, "olric", "anon")
|
services = append(services, "olric", "anon")
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "Stopping %d services...\n\n", len(services))
|
|
||||||
|
|
||||||
// Stop all processes sequentially (in dependency order) and wait for each
|
|
||||||
stoppedCount := 0
|
|
||||||
for _, svc := range services {
|
for _, svc := range services {
|
||||||
if err := pm.stopProcess(svc); err != nil {
|
pm.stopProcess(svc)
|
||||||
fmt.Fprintf(pm.logWriter, "⚠️ Error stopping %s: %v\n", svc, err)
|
|
||||||
} else {
|
|
||||||
stoppedCount++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Show progress
|
fmt.Fprintf(pm.logWriter, "✓ All services stopped\n\n")
|
||||||
fmt.Fprintf(pm.logWriter, " [%d/%d] stopped\n", stoppedCount, len(services))
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "\n✅ All %d services have been stopped\n\n", stoppedCount)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,6 +150,13 @@ func (pm *ProcessManager) Status(ctx context.Context) {
|
|||||||
fmt.Sprintf("%s IPFS", node.Name),
|
fmt.Sprintf("%s IPFS", node.Name),
|
||||||
[]int{node.IPFSAPIPort, node.IPFSSwarmPort},
|
[]int{node.IPFSAPIPort, node.IPFSSwarmPort},
|
||||||
})
|
})
|
||||||
|
services = append(services, struct {
|
||||||
|
name string
|
||||||
|
ports []int
|
||||||
|
}{
|
||||||
|
fmt.Sprintf("%s RQLite", node.Name),
|
||||||
|
[]int{node.RQLiteHTTPPort, node.RQLiteRaftPort},
|
||||||
|
})
|
||||||
services = append(services, struct {
|
services = append(services, struct {
|
||||||
name string
|
name string
|
||||||
ports []int
|
ports []int
|
||||||
@ -239,10 +205,10 @@ func (pm *ProcessManager) Status(ctx context.Context) {
|
|||||||
fmt.Fprintf(pm.logWriter, " %-25s %s (%s)\n", svc.name, status, portStr)
|
fmt.Fprintf(pm.logWriter, " %-25s %s (%s)\n", svc.name, status, portStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.oramaDir)
|
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.debrosDir)
|
||||||
configFiles := []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml", "olric-config.yaml"}
|
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node2.yaml", "node3.yaml", "node4.yaml", "gateway.yaml", "olric-config.yaml"}
|
||||||
for _, f := range configFiles {
|
for _, f := range configFiles {
|
||||||
path := filepath.Join(pm.oramaDir, f)
|
path := filepath.Join(pm.debrosDir, f)
|
||||||
if _, err := os.Stat(path); err == nil {
|
if _, err := os.Stat(path); err == nil {
|
||||||
fmt.Fprintf(pm.logWriter, " ✓ %s\n", f)
|
fmt.Fprintf(pm.logWriter, " ✓ %s\n", f)
|
||||||
} else {
|
} else {
|
||||||
@ -250,7 +216,7 @@ func (pm *ProcessManager) Status(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(pm.logWriter, "\nLogs directory: %s/logs\n\n", pm.oramaDir)
|
fmt.Fprintf(pm.logWriter, "\nLogs directory: %s/logs\n\n", pm.debrosDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper functions for starting individual services
|
// Helper functions for starting individual services
|
||||||
@ -261,7 +227,7 @@ func (pm *ProcessManager) buildIPFSNodes(topology *Topology) []ipfsNodeInfo {
|
|||||||
for _, nodeSpec := range topology.Nodes {
|
for _, nodeSpec := range topology.Nodes {
|
||||||
nodes = append(nodes, ipfsNodeInfo{
|
nodes = append(nodes, ipfsNodeInfo{
|
||||||
name: nodeSpec.Name,
|
name: nodeSpec.Name,
|
||||||
ipfsPath: filepath.Join(pm.oramaDir, nodeSpec.DataDir, "ipfs/repo"),
|
ipfsPath: filepath.Join(pm.debrosDir, nodeSpec.DataDir, "ipfs/repo"),
|
||||||
apiPort: nodeSpec.IPFSAPIPort,
|
apiPort: nodeSpec.IPFSAPIPort,
|
||||||
swarmPort: nodeSpec.IPFSSwarmPort,
|
swarmPort: nodeSpec.IPFSSwarmPort,
|
||||||
gatewayPort: nodeSpec.IPFSGatewayPort,
|
gatewayPort: nodeSpec.IPFSGatewayPort,
|
||||||
@ -271,11 +237,11 @@ func (pm *ProcessManager) buildIPFSNodes(topology *Topology) []ipfsNodeInfo {
|
|||||||
return nodes
|
return nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
// startNodes starts all network nodes
|
// startNodes starts all network nodes (bootstraps and regular)
|
||||||
func (pm *ProcessManager) startNodes(ctx context.Context) error {
|
func (pm *ProcessManager) startNodes(ctx context.Context) error {
|
||||||
topology := DefaultTopology()
|
topology := DefaultTopology()
|
||||||
for _, nodeSpec := range topology.Nodes {
|
for _, nodeSpec := range topology.Nodes {
|
||||||
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("%s.log", nodeSpec.Name))
|
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("%s.log", nodeSpec.Name))
|
||||||
if err := pm.startNode(nodeSpec.Name, nodeSpec.ConfigFilename, logPath); err != nil {
|
if err := pm.startNode(nodeSpec.Name, nodeSpec.ConfigFilename, logPath); err != nil {
|
||||||
return fmt.Errorf("failed to start %s: %w", nodeSpec.Name, err)
|
return fmt.Errorf("failed to start %s: %w", nodeSpec.Name, err)
|
||||||
}
|
}
|
||||||
@ -483,7 +449,7 @@ func (pm *ProcessManager) waitIPFSReady(ctx context.Context, node ipfsNodeInfo)
|
|||||||
|
|
||||||
// ipfsHTTPCall makes an HTTP call to IPFS API
|
// ipfsHTTPCall makes an HTTP call to IPFS API
|
||||||
func (pm *ProcessManager) ipfsHTTPCall(ctx context.Context, urlStr string, method string) error {
|
func (pm *ProcessManager) ipfsHTTPCall(ctx context.Context, urlStr string, method string) error {
|
||||||
client := tlsutil.NewHTTPClient(5 * time.Second)
|
client := &http.Client{Timeout: 5 * time.Second}
|
||||||
req, err := http.NewRequestWithContext(ctx, method, urlStr, nil)
|
req, err := http.NewRequestWithContext(ctx, method, urlStr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create request: %w", err)
|
return fmt.Errorf("failed to create request: %w", err)
|
||||||
@ -520,7 +486,7 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy swarm key
|
// Copy swarm key
|
||||||
swarmKeyPath := filepath.Join(pm.oramaDir, "swarm.key")
|
swarmKeyPath := filepath.Join(pm.debrosDir, "swarm.key")
|
||||||
if data, err := os.ReadFile(swarmKeyPath); err == nil {
|
if data, err := os.ReadFile(swarmKeyPath); err == nil {
|
||||||
os.WriteFile(filepath.Join(nodes[i].ipfsPath, "swarm.key"), data, 0600)
|
os.WriteFile(filepath.Join(nodes[i].ipfsPath, "swarm.key"), data, 0600)
|
||||||
}
|
}
|
||||||
@ -540,7 +506,7 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
|
|||||||
// Phase 2: Start all IPFS daemons
|
// Phase 2: Start all IPFS daemons
|
||||||
for i := range nodes {
|
for i := range nodes {
|
||||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-%s.pid", nodes[i].name))
|
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-%s.pid", nodes[i].name))
|
||||||
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-%s.log", nodes[i].name))
|
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-%s.log", nodes[i].name))
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "ipfs", "daemon", "--enable-pubsub-experiment", "--repo-dir="+nodes[i].ipfsPath)
|
cmd := exec.CommandContext(ctx, "ipfs", "daemon", "--enable-pubsub-experiment", "--repo-dir="+nodes[i].ipfsPath)
|
||||||
logFile, _ := os.Create(logPath)
|
logFile, _ := os.Create(logPath)
|
||||||
@ -591,7 +557,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
|
|||||||
ipfsPort int
|
ipfsPort int
|
||||||
}{
|
}{
|
||||||
nodeSpec.Name,
|
nodeSpec.Name,
|
||||||
filepath.Join(pm.oramaDir, nodeSpec.DataDir, "ipfs-cluster"),
|
filepath.Join(pm.debrosDir, nodeSpec.DataDir, "ipfs-cluster"),
|
||||||
nodeSpec.ClusterAPIPort,
|
nodeSpec.ClusterAPIPort,
|
||||||
nodeSpec.ClusterPort,
|
nodeSpec.ClusterPort,
|
||||||
nodeSpec.IPFSAPIPort,
|
nodeSpec.IPFSAPIPort,
|
||||||
@ -608,7 +574,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Read cluster secret to ensure all nodes use the same PSK
|
// Read cluster secret to ensure all nodes use the same PSK
|
||||||
secretPath := filepath.Join(pm.oramaDir, "cluster-secret")
|
secretPath := filepath.Join(pm.debrosDir, "cluster-secret")
|
||||||
clusterSecret, err := os.ReadFile(secretPath)
|
clusterSecret, err := os.ReadFile(secretPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to read cluster secret: %w", err)
|
return fmt.Errorf("failed to read cluster secret: %w", err)
|
||||||
@ -657,7 +623,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
|
|||||||
|
|
||||||
// Start bootstrap cluster service
|
// Start bootstrap cluster service
|
||||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
|
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
|
||||||
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
|
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
|
||||||
|
|
||||||
cmd = exec.CommandContext(ctx, "ipfs-cluster-service", "daemon")
|
cmd = exec.CommandContext(ctx, "ipfs-cluster-service", "daemon")
|
||||||
cmd.Env = append(os.Environ(), fmt.Sprintf("IPFS_CLUSTER_PATH=%s", node.clusterPath))
|
cmd.Env = append(os.Environ(), fmt.Sprintf("IPFS_CLUSTER_PATH=%s", node.clusterPath))
|
||||||
@ -731,7 +697,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
|
|||||||
|
|
||||||
// Start follower cluster service with bootstrap flag
|
// Start follower cluster service with bootstrap flag
|
||||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
|
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
|
||||||
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
|
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
|
||||||
|
|
||||||
args := []string{"daemon"}
|
args := []string{"daemon"}
|
||||||
if bootstrapMultiaddr != "" {
|
if bootstrapMultiaddr != "" {
|
||||||
@ -976,10 +942,76 @@ func (pm *ProcessManager) ensureIPFSClusterPorts(clusterPath string, restAPIPort
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pm *ProcessManager) startRQLite(ctx context.Context) error {
|
||||||
|
topology := DefaultTopology()
|
||||||
|
var nodes []struct {
|
||||||
|
name string
|
||||||
|
dataDir string
|
||||||
|
httpPort int
|
||||||
|
raftPort int
|
||||||
|
joinAddr string
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, nodeSpec := range topology.Nodes {
|
||||||
|
nodes = append(nodes, struct {
|
||||||
|
name string
|
||||||
|
dataDir string
|
||||||
|
httpPort int
|
||||||
|
raftPort int
|
||||||
|
joinAddr string
|
||||||
|
}{
|
||||||
|
nodeSpec.Name,
|
||||||
|
filepath.Join(pm.debrosDir, nodeSpec.DataDir, "rqlite"),
|
||||||
|
nodeSpec.RQLiteHTTPPort,
|
||||||
|
nodeSpec.RQLiteRaftPort,
|
||||||
|
nodeSpec.RQLiteJoinTarget,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, node := range nodes {
|
||||||
|
os.MkdirAll(node.dataDir, 0755)
|
||||||
|
|
||||||
|
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("rqlite-%s.pid", node.name))
|
||||||
|
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("rqlite-%s.log", node.name))
|
||||||
|
|
||||||
|
var args []string
|
||||||
|
args = append(args, fmt.Sprintf("-http-addr=0.0.0.0:%d", node.httpPort))
|
||||||
|
args = append(args, fmt.Sprintf("-http-adv-addr=localhost:%d", node.httpPort))
|
||||||
|
args = append(args, fmt.Sprintf("-raft-addr=0.0.0.0:%d", node.raftPort))
|
||||||
|
args = append(args, fmt.Sprintf("-raft-adv-addr=localhost:%d", node.raftPort))
|
||||||
|
if node.joinAddr != "" {
|
||||||
|
args = append(args, "-join", node.joinAddr, "-join-attempts", "30", "-join-interval", "10s")
|
||||||
|
}
|
||||||
|
args = append(args, node.dataDir)
|
||||||
|
cmd := exec.CommandContext(ctx, "rqlited", args...)
|
||||||
|
|
||||||
|
logFile, _ := os.Create(logPath)
|
||||||
|
cmd.Stdout = logFile
|
||||||
|
cmd.Stderr = logFile
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to start rqlite-%s: %w", node.name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", cmd.Process.Pid)), 0644)
|
||||||
|
pm.processes[fmt.Sprintf("rqlite-%s", node.name)] = &ManagedProcess{
|
||||||
|
Name: fmt.Sprintf("rqlite-%s", node.name),
|
||||||
|
PID: cmd.Process.Pid,
|
||||||
|
StartTime: time.Now(),
|
||||||
|
LogPath: logPath,
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(pm.logWriter, "✓ RQLite (%s) started (PID: %d, HTTP: %d, Raft: %d)\n", node.name, cmd.Process.Pid, node.httpPort, node.raftPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (pm *ProcessManager) startOlric(ctx context.Context) error {
|
func (pm *ProcessManager) startOlric(ctx context.Context) error {
|
||||||
pidPath := filepath.Join(pm.pidsDir, "olric.pid")
|
pidPath := filepath.Join(pm.pidsDir, "olric.pid")
|
||||||
logPath := filepath.Join(pm.oramaDir, "logs", "olric.log")
|
logPath := filepath.Join(pm.debrosDir, "logs", "olric.log")
|
||||||
configPath := filepath.Join(pm.oramaDir, "olric-config.yaml")
|
configPath := filepath.Join(pm.debrosDir, "olric-config.yaml")
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "olric-server")
|
cmd := exec.CommandContext(ctx, "olric-server")
|
||||||
cmd.Env = append(os.Environ(), fmt.Sprintf("OLRIC_SERVER_CONFIG=%s", configPath))
|
cmd.Env = append(os.Environ(), fmt.Sprintf("OLRIC_SERVER_CONFIG=%s", configPath))
|
||||||
@ -1004,7 +1036,7 @@ func (pm *ProcessManager) startAnon(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pidPath := filepath.Join(pm.pidsDir, "anon.pid")
|
pidPath := filepath.Join(pm.pidsDir, "anon.pid")
|
||||||
logPath := filepath.Join(pm.oramaDir, "logs", "anon.log")
|
logPath := filepath.Join(pm.debrosDir, "logs", "anon.log")
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "npx", "anyone-client")
|
cmd := exec.CommandContext(ctx, "npx", "anyone-client")
|
||||||
logFile, _ := os.Create(logPath)
|
logFile, _ := os.Create(logPath)
|
||||||
@ -1024,7 +1056,7 @@ func (pm *ProcessManager) startAnon(ctx context.Context) error {
|
|||||||
|
|
||||||
func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
|
func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
|
||||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
|
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
|
||||||
cmd := exec.Command("./bin/orama-node", "--config", configFile)
|
cmd := exec.Command("./bin/node", "--config", configFile)
|
||||||
logFile, _ := os.Create(logPath)
|
logFile, _ := os.Create(logPath)
|
||||||
cmd.Stdout = logFile
|
cmd.Stdout = logFile
|
||||||
cmd.Stderr = logFile
|
cmd.Stderr = logFile
|
||||||
@ -1042,7 +1074,7 @@ func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
|
|||||||
|
|
||||||
func (pm *ProcessManager) startGateway(ctx context.Context) error {
|
func (pm *ProcessManager) startGateway(ctx context.Context) error {
|
||||||
pidPath := filepath.Join(pm.pidsDir, "gateway.pid")
|
pidPath := filepath.Join(pm.pidsDir, "gateway.pid")
|
||||||
logPath := filepath.Join(pm.oramaDir, "logs", "gateway.log")
|
logPath := filepath.Join(pm.debrosDir, "logs", "gateway.log")
|
||||||
|
|
||||||
cmd := exec.Command("./bin/gateway", "--config", "gateway.yaml")
|
cmd := exec.Command("./bin/gateway", "--config", "gateway.yaml")
|
||||||
logFile, _ := os.Create(logPath)
|
logFile, _ := os.Create(logPath)
|
||||||
@ -1073,56 +1105,34 @@ func (pm *ProcessManager) stopProcess(name string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if process exists before trying to kill
|
|
||||||
if !checkProcessRunning(pid) {
|
|
||||||
os.Remove(pidPath)
|
|
||||||
fmt.Fprintf(pm.logWriter, "✓ %s (not running)\n", name)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
proc, err := os.FindProcess(pid)
|
proc, err := os.FindProcess(pid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(pidPath)
|
os.Remove(pidPath)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try graceful shutdown first (SIGTERM)
|
// Try graceful shutdown first
|
||||||
proc.Signal(os.Interrupt)
|
proc.Signal(os.Interrupt)
|
||||||
|
|
||||||
// Wait up to 2 seconds for graceful shutdown
|
// Wait a bit for graceful shutdown
|
||||||
gracefulShutdown := false
|
time.Sleep(500 * time.Millisecond)
|
||||||
for i := 0; i < 20; i++ {
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
if !checkProcessRunning(pid) {
|
|
||||||
gracefulShutdown = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Force kill if still running after graceful attempt
|
// Check if process is still running
|
||||||
if !gracefulShutdown && checkProcessRunning(pid) {
|
if checkProcessRunning(pid) {
|
||||||
|
// Force kill if still running
|
||||||
proc.Signal(os.Kill)
|
proc.Signal(os.Kill)
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
|
|
||||||
// Kill any child processes (platform-specific)
|
// Also kill any child processes (platform-specific)
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
|
// Use pkill to kill children on Unix-like systems
|
||||||
exec.Command("pkill", "-9", "-P", fmt.Sprintf("%d", pid)).Run()
|
exec.Command("pkill", "-9", "-P", fmt.Sprintf("%d", pid)).Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Final force kill attempt if somehow still alive
|
|
||||||
if checkProcessRunning(pid) {
|
|
||||||
exec.Command("kill", "-9", fmt.Sprintf("%d", pid)).Run()
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
os.Remove(pidPath)
|
os.Remove(pidPath)
|
||||||
|
|
||||||
if gracefulShutdown {
|
fmt.Fprintf(pm.logWriter, "✓ %s stopped\n", name)
|
||||||
fmt.Fprintf(pm.logWriter, "✓ %s stopped gracefully\n", name)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(pm.logWriter, "✓ %s stopped (forced)\n", name)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -4,9 +4,10 @@ import "fmt"
|
|||||||
|
|
||||||
// NodeSpec defines configuration for a single dev environment node
|
// NodeSpec defines configuration for a single dev environment node
|
||||||
type NodeSpec struct {
|
type NodeSpec struct {
|
||||||
Name string // node-1, node-2, node-3, node-4, node-5
|
Name string // bootstrap, bootstrap2, node2, node3, node4
|
||||||
ConfigFilename string // node-1.yaml, node-2.yaml, etc.
|
Role string // "bootstrap" or "node"
|
||||||
DataDir string // relative path from .orama root
|
ConfigFilename string // bootstrap.yaml, bootstrap2.yaml, node2.yaml, etc.
|
||||||
|
DataDir string // relative path from .debros root
|
||||||
P2PPort int // LibP2P listen port
|
P2PPort int // LibP2P listen port
|
||||||
IPFSAPIPort int // IPFS API port
|
IPFSAPIPort int // IPFS API port
|
||||||
IPFSSwarmPort int // IPFS Swarm port
|
IPFSSwarmPort int // IPFS Swarm port
|
||||||
@ -15,9 +16,8 @@ type NodeSpec struct {
|
|||||||
RQLiteRaftPort int // RQLite Raft consensus port
|
RQLiteRaftPort int // RQLite Raft consensus port
|
||||||
ClusterAPIPort int // IPFS Cluster REST API port
|
ClusterAPIPort int // IPFS Cluster REST API port
|
||||||
ClusterPort int // IPFS Cluster P2P port
|
ClusterPort int // IPFS Cluster P2P port
|
||||||
UnifiedGatewayPort int // Unified gateway port (proxies all services)
|
RQLiteJoinTarget string // which bootstrap RQLite port to join (leave empty for bootstraps that lead)
|
||||||
RQLiteJoinTarget string // which node's RQLite Raft port to join (empty for first node)
|
ClusterJoinTarget string // which bootstrap cluster to join (leave empty for bootstrap that leads)
|
||||||
ClusterJoinTarget string // which node's cluster to join (empty for first node)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Topology defines the complete development environment topology
|
// Topology defines the complete development environment topology
|
||||||
@ -34,9 +34,10 @@ func DefaultTopology() *Topology {
|
|||||||
return &Topology{
|
return &Topology{
|
||||||
Nodes: []NodeSpec{
|
Nodes: []NodeSpec{
|
||||||
{
|
{
|
||||||
Name: "node-1",
|
Name: "bootstrap",
|
||||||
ConfigFilename: "node-1.yaml",
|
Role: "bootstrap",
|
||||||
DataDir: "node-1",
|
ConfigFilename: "bootstrap.yaml",
|
||||||
|
DataDir: "bootstrap",
|
||||||
P2PPort: 4001,
|
P2PPort: 4001,
|
||||||
IPFSAPIPort: 4501,
|
IPFSAPIPort: 4501,
|
||||||
IPFSSwarmPort: 4101,
|
IPFSSwarmPort: 4101,
|
||||||
@ -45,14 +46,14 @@ func DefaultTopology() *Topology {
|
|||||||
RQLiteRaftPort: 7001,
|
RQLiteRaftPort: 7001,
|
||||||
ClusterAPIPort: 9094,
|
ClusterAPIPort: 9094,
|
||||||
ClusterPort: 9096,
|
ClusterPort: 9096,
|
||||||
UnifiedGatewayPort: 6001,
|
RQLiteJoinTarget: "",
|
||||||
RQLiteJoinTarget: "", // First node - creates cluster
|
|
||||||
ClusterJoinTarget: "",
|
ClusterJoinTarget: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "node-2",
|
Name: "bootstrap2",
|
||||||
ConfigFilename: "node-2.yaml",
|
Role: "bootstrap",
|
||||||
DataDir: "node-2",
|
ConfigFilename: "bootstrap2.yaml",
|
||||||
|
DataDir: "bootstrap2",
|
||||||
P2PPort: 4011,
|
P2PPort: 4011,
|
||||||
IPFSAPIPort: 4511,
|
IPFSAPIPort: 4511,
|
||||||
IPFSSwarmPort: 4111,
|
IPFSSwarmPort: 4111,
|
||||||
@ -61,14 +62,14 @@ func DefaultTopology() *Topology {
|
|||||||
RQLiteRaftPort: 7011,
|
RQLiteRaftPort: 7011,
|
||||||
ClusterAPIPort: 9104,
|
ClusterAPIPort: 9104,
|
||||||
ClusterPort: 9106,
|
ClusterPort: 9106,
|
||||||
UnifiedGatewayPort: 6002,
|
|
||||||
RQLiteJoinTarget: "localhost:7001",
|
RQLiteJoinTarget: "localhost:7001",
|
||||||
ClusterJoinTarget: "localhost:9096",
|
ClusterJoinTarget: "localhost:9096",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "node-3",
|
Name: "node2",
|
||||||
ConfigFilename: "node-3.yaml",
|
Role: "node",
|
||||||
DataDir: "node-3",
|
ConfigFilename: "node2.yaml",
|
||||||
|
DataDir: "node2",
|
||||||
P2PPort: 4002,
|
P2PPort: 4002,
|
||||||
IPFSAPIPort: 4502,
|
IPFSAPIPort: 4502,
|
||||||
IPFSSwarmPort: 4102,
|
IPFSSwarmPort: 4102,
|
||||||
@ -77,14 +78,14 @@ func DefaultTopology() *Topology {
|
|||||||
RQLiteRaftPort: 7002,
|
RQLiteRaftPort: 7002,
|
||||||
ClusterAPIPort: 9114,
|
ClusterAPIPort: 9114,
|
||||||
ClusterPort: 9116,
|
ClusterPort: 9116,
|
||||||
UnifiedGatewayPort: 6003,
|
|
||||||
RQLiteJoinTarget: "localhost:7001",
|
RQLiteJoinTarget: "localhost:7001",
|
||||||
ClusterJoinTarget: "localhost:9096",
|
ClusterJoinTarget: "localhost:9096",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "node-4",
|
Name: "node3",
|
||||||
ConfigFilename: "node-4.yaml",
|
Role: "node",
|
||||||
DataDir: "node-4",
|
ConfigFilename: "node3.yaml",
|
||||||
|
DataDir: "node3",
|
||||||
P2PPort: 4003,
|
P2PPort: 4003,
|
||||||
IPFSAPIPort: 4503,
|
IPFSAPIPort: 4503,
|
||||||
IPFSSwarmPort: 4103,
|
IPFSSwarmPort: 4103,
|
||||||
@ -93,14 +94,14 @@ func DefaultTopology() *Topology {
|
|||||||
RQLiteRaftPort: 7003,
|
RQLiteRaftPort: 7003,
|
||||||
ClusterAPIPort: 9124,
|
ClusterAPIPort: 9124,
|
||||||
ClusterPort: 9126,
|
ClusterPort: 9126,
|
||||||
UnifiedGatewayPort: 6004,
|
|
||||||
RQLiteJoinTarget: "localhost:7001",
|
RQLiteJoinTarget: "localhost:7001",
|
||||||
ClusterJoinTarget: "localhost:9096",
|
ClusterJoinTarget: "localhost:9096",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "node-5",
|
Name: "node4",
|
||||||
ConfigFilename: "node-5.yaml",
|
Role: "node",
|
||||||
DataDir: "node-5",
|
ConfigFilename: "node4.yaml",
|
||||||
|
DataDir: "node4",
|
||||||
P2PPort: 4004,
|
P2PPort: 4004,
|
||||||
IPFSAPIPort: 4504,
|
IPFSAPIPort: 4504,
|
||||||
IPFSSwarmPort: 4104,
|
IPFSSwarmPort: 4104,
|
||||||
@ -109,12 +110,11 @@ func DefaultTopology() *Topology {
|
|||||||
RQLiteRaftPort: 7004,
|
RQLiteRaftPort: 7004,
|
||||||
ClusterAPIPort: 9134,
|
ClusterAPIPort: 9134,
|
||||||
ClusterPort: 9136,
|
ClusterPort: 9136,
|
||||||
UnifiedGatewayPort: 6005,
|
|
||||||
RQLiteJoinTarget: "localhost:7001",
|
RQLiteJoinTarget: "localhost:7001",
|
||||||
ClusterJoinTarget: "localhost:9096",
|
ClusterJoinTarget: "localhost:9096",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
GatewayPort: 6000, // Main gateway on 6000 (nodes use 6001-6005)
|
GatewayPort: 6001,
|
||||||
OlricHTTPPort: 3320,
|
OlricHTTPPort: 3320,
|
||||||
OlricMemberPort: 3322,
|
OlricMemberPort: 3322,
|
||||||
AnonSOCKSPort: 9050,
|
AnonSOCKSPort: 9050,
|
||||||
@ -136,7 +136,6 @@ func (t *Topology) AllPorts() []int {
|
|||||||
node.RQLiteRaftPort,
|
node.RQLiteRaftPort,
|
||||||
node.ClusterAPIPort,
|
node.ClusterAPIPort,
|
||||||
node.ClusterPort,
|
node.ClusterPort,
|
||||||
node.UnifiedGatewayPort,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -164,7 +163,6 @@ func (t *Topology) PortMap() map[int]string {
|
|||||||
portMap[node.RQLiteRaftPort] = fmt.Sprintf("%s RQLite Raft", node.Name)
|
portMap[node.RQLiteRaftPort] = fmt.Sprintf("%s RQLite Raft", node.Name)
|
||||||
portMap[node.ClusterAPIPort] = fmt.Sprintf("%s IPFS Cluster API", node.Name)
|
portMap[node.ClusterAPIPort] = fmt.Sprintf("%s IPFS Cluster API", node.Name)
|
||||||
portMap[node.ClusterPort] = fmt.Sprintf("%s IPFS Cluster P2P", node.Name)
|
portMap[node.ClusterPort] = fmt.Sprintf("%s IPFS Cluster P2P", node.Name)
|
||||||
portMap[node.UnifiedGatewayPort] = fmt.Sprintf("%s Unified Gateway", node.Name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
portMap[t.GatewayPort] = "Gateway"
|
portMap[t.GatewayPort] = "Gateway"
|
||||||
@ -175,20 +173,26 @@ func (t *Topology) PortMap() map[int]string {
|
|||||||
return portMap
|
return portMap
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFirstNode returns the first node (the one that creates the cluster)
|
// GetBootstrapNodes returns only the bootstrap nodes
|
||||||
func (t *Topology) GetFirstNode() *NodeSpec {
|
func (t *Topology) GetBootstrapNodes() []NodeSpec {
|
||||||
if len(t.Nodes) > 0 {
|
var bootstraps []NodeSpec
|
||||||
return &t.Nodes[0]
|
for _, node := range t.Nodes {
|
||||||
|
if node.Role == "bootstrap" {
|
||||||
|
bootstraps = append(bootstraps, node)
|
||||||
}
|
}
|
||||||
return nil
|
}
|
||||||
|
return bootstraps
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetJoiningNodes returns all nodes except the first one (they join the cluster)
|
// GetRegularNodes returns only the regular (non-bootstrap) nodes
|
||||||
func (t *Topology) GetJoiningNodes() []NodeSpec {
|
func (t *Topology) GetRegularNodes() []NodeSpec {
|
||||||
if len(t.Nodes) > 1 {
|
var regulars []NodeSpec
|
||||||
return t.Nodes[1:]
|
for _, node := range t.Nodes {
|
||||||
|
if node.Role == "node" {
|
||||||
|
regulars = append(regulars, node)
|
||||||
}
|
}
|
||||||
return nil
|
}
|
||||||
|
return regulars
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeByName returns a node by its name, or nil if not found
|
// GetNodeByName returns a node by its name, or nil if not found
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package production
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -298,35 +297,3 @@ func (rc *ResourceChecker) CheckCPU() error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PortChecker checks if ports are available or in use
|
|
||||||
type PortChecker struct{}
|
|
||||||
|
|
||||||
// NewPortChecker creates a new port checker
|
|
||||||
func NewPortChecker() *PortChecker {
|
|
||||||
return &PortChecker{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPortInUse checks if a specific port is already in use
|
|
||||||
func (pc *PortChecker) IsPortInUse(port int) bool {
|
|
||||||
addr := fmt.Sprintf("localhost:%d", port)
|
|
||||||
conn, err := net.Dial("tcp", addr)
|
|
||||||
if err != nil {
|
|
||||||
// Port is not in use
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
// Port is in use
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPortInUseOnHost checks if a port is in use on a specific host
|
|
||||||
func (pc *PortChecker) IsPortInUseOnHost(host string, port int) bool {
|
|
||||||
addr := net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
|
||||||
conn, err := net.Dial("tcp", addr)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|||||||
@ -4,214 +4,83 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/user"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/environments/templates"
|
"github.com/DeBrosOfficial/network/pkg/environments/templates"
|
||||||
"github.com/libp2p/go-libp2p/core/crypto"
|
"github.com/libp2p/go-libp2p/core/crypto"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/multiformats/go-multiaddr"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConfigGenerator manages generation of node, gateway, and service configs
|
// ConfigGenerator manages generation of node, gateway, and service configs
|
||||||
type ConfigGenerator struct {
|
type ConfigGenerator struct {
|
||||||
oramaDir string
|
debrosDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfigGenerator creates a new config generator
|
// NewConfigGenerator creates a new config generator
|
||||||
func NewConfigGenerator(oramaDir string) *ConfigGenerator {
|
func NewConfigGenerator(debrosDir string) *ConfigGenerator {
|
||||||
return &ConfigGenerator{
|
return &ConfigGenerator{
|
||||||
oramaDir: oramaDir,
|
debrosDir: debrosDir,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractIPFromMultiaddr extracts the IP address from a peer multiaddr
|
// GenerateNodeConfig generates node.yaml configuration
|
||||||
// Supports IP4, IP6, DNS4, DNS6, and DNSADDR protocols
|
func (cg *ConfigGenerator) GenerateNodeConfig(isBootstrap bool, bootstrapPeers []string, vpsIP string, bootstrapJoin string) (string, error) {
|
||||||
// Returns the IP address as a string, or empty string if extraction/resolution fails
|
var nodeID string
|
||||||
func extractIPFromMultiaddr(multiaddrStr string) string {
|
if isBootstrap {
|
||||||
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
|
nodeID = "bootstrap"
|
||||||
if err != nil {
|
} else {
|
||||||
return ""
|
nodeID = "node"
|
||||||
}
|
}
|
||||||
|
|
||||||
// First, try to extract direct IP address
|
if isBootstrap {
|
||||||
var ip net.IP
|
// Bootstrap node - populate peer list and optional join address
|
||||||
var dnsName string
|
data := templates.BootstrapConfigData{
|
||||||
multiaddr.ForEach(ma, func(c multiaddr.Component) bool {
|
NodeID: nodeID,
|
||||||
switch c.Protocol().Code {
|
P2PPort: 4001,
|
||||||
case multiaddr.P_IP4, multiaddr.P_IP6:
|
DataDir: filepath.Join(cg.debrosDir, "data", "bootstrap"),
|
||||||
ip = net.ParseIP(c.Value())
|
RQLiteHTTPPort: 5001,
|
||||||
return false // Stop iteration - found IP
|
RQLiteRaftPort: 7001,
|
||||||
case multiaddr.P_DNS4, multiaddr.P_DNS6, multiaddr.P_DNSADDR:
|
ClusterAPIPort: 9094,
|
||||||
dnsName = c.Value()
|
IPFSAPIPort: 4501,
|
||||||
// Continue to check for IP, but remember DNS name as fallback
|
BootstrapPeers: bootstrapPeers,
|
||||||
|
RQLiteJoinAddress: bootstrapJoin,
|
||||||
}
|
}
|
||||||
return true
|
return templates.RenderBootstrapConfig(data)
|
||||||
})
|
|
||||||
|
|
||||||
// If we found a direct IP, return it
|
|
||||||
if ip != nil {
|
|
||||||
return ip.String()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we found a DNS name, try to resolve it
|
// Regular node - must have join address
|
||||||
if dnsName != "" {
|
rqliteJoinAddr := "localhost:7001"
|
||||||
if resolvedIPs, err := net.LookupIP(dnsName); err == nil && len(resolvedIPs) > 0 {
|
|
||||||
// Prefer IPv4 addresses, but accept IPv6 if that's all we have
|
|
||||||
for _, resolvedIP := range resolvedIPs {
|
|
||||||
if resolvedIP.To4() != nil {
|
|
||||||
return resolvedIP.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Return first IPv6 address if no IPv4 found
|
|
||||||
return resolvedIPs[0].String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// inferPeerIP extracts the IP address from peer multiaddrs
|
|
||||||
// Iterates through all peers to find a valid IP (supports DNS resolution)
|
|
||||||
// Falls back to vpsIP if provided, otherwise returns empty string
|
|
||||||
func inferPeerIP(peers []string, vpsIP string) string {
|
|
||||||
// Try to extract IP from each peer (in order)
|
|
||||||
for _, peer := range peers {
|
|
||||||
if ip := extractIPFromMultiaddr(peer); ip != "" {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Fall back to vpsIP if provided
|
|
||||||
if vpsIP != "" {
|
if vpsIP != "" {
|
||||||
return vpsIP
|
rqliteJoinAddr = vpsIP + ":7001"
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateNodeConfig generates node.yaml configuration (unified architecture)
|
|
||||||
func (cg *ConfigGenerator) GenerateNodeConfig(peerAddresses []string, vpsIP string, joinAddress string, domain string, enableHTTPS bool) (string, error) {
|
|
||||||
// Generate node ID from domain or use default
|
|
||||||
nodeID := "node"
|
|
||||||
if domain != "" {
|
|
||||||
// Extract node identifier from domain (e.g., "node-123" from "node-123.orama.network")
|
|
||||||
parts := strings.Split(domain, ".")
|
|
||||||
if len(parts) > 0 {
|
|
||||||
nodeID = parts[0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine advertise addresses - use vpsIP if provided
|
|
||||||
// When HTTPS is enabled, RQLite uses native TLS on port 7002 (not SNI gateway)
|
|
||||||
// This avoids conflicts between SNI gateway TLS termination and RQLite's native TLS
|
|
||||||
var httpAdvAddr, raftAdvAddr string
|
|
||||||
if vpsIP != "" {
|
|
||||||
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
|
|
||||||
if enableHTTPS {
|
|
||||||
// Use direct IP:7002 for Raft - RQLite handles TLS natively via -node-cert
|
|
||||||
// This bypasses the SNI gateway which would cause TLS termination conflicts
|
|
||||||
raftAdvAddr = net.JoinHostPort(vpsIP, "7002")
|
|
||||||
} else {
|
|
||||||
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Fallback to localhost if no vpsIP
|
|
||||||
httpAdvAddr = "localhost:5001"
|
|
||||||
raftAdvAddr = "localhost:7001"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine RQLite join address
|
|
||||||
// When HTTPS is enabled, use port 7002 (direct RQLite TLS) instead of 7001 (SNI gateway)
|
|
||||||
joinPort := "7001"
|
|
||||||
if enableHTTPS {
|
|
||||||
joinPort = "7002"
|
|
||||||
}
|
|
||||||
|
|
||||||
var rqliteJoinAddr string
|
|
||||||
if joinAddress != "" {
|
|
||||||
// Use explicitly provided join address
|
|
||||||
// If it contains :7001 and HTTPS is enabled, update to :7002
|
|
||||||
if enableHTTPS && strings.Contains(joinAddress, ":7001") {
|
|
||||||
rqliteJoinAddr = strings.Replace(joinAddress, ":7001", ":7002", 1)
|
|
||||||
} else {
|
|
||||||
rqliteJoinAddr = joinAddress
|
|
||||||
}
|
|
||||||
} else if len(peerAddresses) > 0 {
|
|
||||||
// Infer join address from peers
|
|
||||||
peerIP := inferPeerIP(peerAddresses, "")
|
|
||||||
if peerIP != "" {
|
|
||||||
rqliteJoinAddr = net.JoinHostPort(peerIP, joinPort)
|
|
||||||
// Validate that join address doesn't match this node's own raft address (would cause self-join)
|
|
||||||
if rqliteJoinAddr == raftAdvAddr {
|
|
||||||
rqliteJoinAddr = "" // Clear it - this is the first node
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If no join address and no peers, this is the first node - it will create the cluster
|
|
||||||
|
|
||||||
// TLS/ACME configuration
|
|
||||||
tlsCacheDir := ""
|
|
||||||
httpPort := 80
|
|
||||||
httpsPort := 443
|
|
||||||
if enableHTTPS {
|
|
||||||
tlsCacheDir = filepath.Join(cg.oramaDir, "tls-cache")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unified data directory (all nodes equal)
|
|
||||||
// When HTTPS/SNI is enabled, use internal port 7002 for RQLite Raft (SNI gateway listens on 7001)
|
|
||||||
raftInternalPort := 7001
|
|
||||||
if enableHTTPS {
|
|
||||||
raftInternalPort = 7002 // Internal port when SNI is enabled
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data := templates.NodeConfigData{
|
data := templates.NodeConfigData{
|
||||||
NodeID: nodeID,
|
NodeID: nodeID,
|
||||||
P2PPort: 4001,
|
P2PPort: 4001,
|
||||||
DataDir: filepath.Join(cg.oramaDir, "data"),
|
DataDir: filepath.Join(cg.debrosDir, "data", "node"),
|
||||||
RQLiteHTTPPort: 5001,
|
RQLiteHTTPPort: 5001,
|
||||||
RQLiteRaftPort: 7001, // External SNI port
|
RQLiteRaftPort: 7001,
|
||||||
RQLiteRaftInternalPort: raftInternalPort, // Internal RQLite binding port
|
|
||||||
RQLiteJoinAddress: rqliteJoinAddr,
|
RQLiteJoinAddress: rqliteJoinAddr,
|
||||||
BootstrapPeers: peerAddresses,
|
BootstrapPeers: bootstrapPeers,
|
||||||
ClusterAPIPort: 9094,
|
ClusterAPIPort: 9094,
|
||||||
IPFSAPIPort: 4501,
|
IPFSAPIPort: 4501,
|
||||||
HTTPAdvAddress: httpAdvAddr,
|
|
||||||
RaftAdvAddress: raftAdvAddr,
|
|
||||||
UnifiedGatewayPort: 6001,
|
|
||||||
Domain: domain,
|
|
||||||
EnableHTTPS: enableHTTPS,
|
|
||||||
TLSCacheDir: tlsCacheDir,
|
|
||||||
HTTPPort: httpPort,
|
|
||||||
HTTPSPort: httpsPort,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// When HTTPS is enabled, configure RQLite node-to-node TLS encryption
|
|
||||||
// RQLite handles TLS natively on port 7002, bypassing the SNI gateway
|
|
||||||
// This avoids TLS termination conflicts between SNI gateway and RQLite
|
|
||||||
if enableHTTPS && domain != "" {
|
|
||||||
data.NodeCert = filepath.Join(tlsCacheDir, domain+".crt")
|
|
||||||
data.NodeKey = filepath.Join(tlsCacheDir, domain+".key")
|
|
||||||
// Skip verification since nodes may have different domain certificates
|
|
||||||
data.NodeNoVerify = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return templates.RenderNodeConfig(data)
|
return templates.RenderNodeConfig(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateGatewayConfig generates gateway.yaml configuration
|
// GenerateGatewayConfig generates gateway.yaml configuration
|
||||||
func (cg *ConfigGenerator) GenerateGatewayConfig(peerAddresses []string, enableHTTPS bool, domain string, olricServers []string) (string, error) {
|
func (cg *ConfigGenerator) GenerateGatewayConfig(bootstrapPeers []string, enableHTTPS bool, domain string, olricServers []string) (string, error) {
|
||||||
tlsCacheDir := ""
|
tlsCacheDir := ""
|
||||||
if enableHTTPS {
|
if enableHTTPS {
|
||||||
tlsCacheDir = filepath.Join(cg.oramaDir, "tls-cache")
|
tlsCacheDir = filepath.Join(cg.debrosDir, "tls-cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
data := templates.GatewayConfigData{
|
data := templates.GatewayConfigData{
|
||||||
ListenPort: 6001,
|
ListenPort: 6001,
|
||||||
BootstrapPeers: peerAddresses,
|
BootstrapPeers: bootstrapPeers,
|
||||||
OlricServers: olricServers,
|
OlricServers: olricServers,
|
||||||
ClusterAPIPort: 9094,
|
ClusterAPIPort: 9094,
|
||||||
IPFSAPIPort: 4501,
|
IPFSAPIPort: 4501,
|
||||||
@ -224,65 +93,41 @@ func (cg *ConfigGenerator) GenerateGatewayConfig(peerAddresses []string, enableH
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GenerateOlricConfig generates Olric configuration
|
// GenerateOlricConfig generates Olric configuration
|
||||||
func (cg *ConfigGenerator) GenerateOlricConfig(serverBindAddr string, httpPort int, memberlistBindAddr string, memberlistPort int, memberlistEnv string) (string, error) {
|
func (cg *ConfigGenerator) GenerateOlricConfig(bindAddr string, httpPort, memberlistPort int) (string, error) {
|
||||||
data := templates.OlricConfigData{
|
data := templates.OlricConfigData{
|
||||||
ServerBindAddr: serverBindAddr,
|
BindAddr: bindAddr,
|
||||||
HTTPPort: httpPort,
|
HTTPPort: httpPort,
|
||||||
MemberlistBindAddr: memberlistBindAddr,
|
|
||||||
MemberlistPort: memberlistPort,
|
MemberlistPort: memberlistPort,
|
||||||
MemberlistEnvironment: memberlistEnv,
|
|
||||||
}
|
}
|
||||||
return templates.RenderOlricConfig(data)
|
return templates.RenderOlricConfig(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SecretGenerator manages generation of shared secrets and keys
|
// SecretGenerator manages generation of shared secrets and keys
|
||||||
type SecretGenerator struct {
|
type SecretGenerator struct {
|
||||||
oramaDir string
|
debrosDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSecretGenerator creates a new secret generator
|
// NewSecretGenerator creates a new secret generator
|
||||||
func NewSecretGenerator(oramaDir string) *SecretGenerator {
|
func NewSecretGenerator(debrosDir string) *SecretGenerator {
|
||||||
return &SecretGenerator{
|
return &SecretGenerator{
|
||||||
oramaDir: oramaDir,
|
debrosDir: debrosDir,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateClusterSecret ensures a cluster secret is 32 bytes of hex
|
|
||||||
func ValidateClusterSecret(secret string) error {
|
|
||||||
secret = strings.TrimSpace(secret)
|
|
||||||
if secret == "" {
|
|
||||||
return fmt.Errorf("cluster secret cannot be empty")
|
|
||||||
}
|
|
||||||
if len(secret) != 64 {
|
|
||||||
return fmt.Errorf("cluster secret must be 64 hex characters (32 bytes)")
|
|
||||||
}
|
|
||||||
if _, err := hex.DecodeString(secret); err != nil {
|
|
||||||
return fmt.Errorf("cluster secret must be valid hex: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureClusterSecret gets or generates the IPFS Cluster secret
|
// EnsureClusterSecret gets or generates the IPFS Cluster secret
|
||||||
func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
|
func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
|
||||||
secretPath := filepath.Join(sg.oramaDir, "secrets", "cluster-secret")
|
secretPath := filepath.Join(sg.debrosDir, "secrets", "cluster-secret")
|
||||||
secretDir := filepath.Dir(secretPath)
|
secretDir := filepath.Dir(secretPath)
|
||||||
|
|
||||||
// Ensure secrets directory exists with restricted permissions (0700)
|
// Ensure secrets directory exists
|
||||||
if err := os.MkdirAll(secretDir, 0700); err != nil {
|
if err := os.MkdirAll(secretDir, 0755); err != nil {
|
||||||
return "", fmt.Errorf("failed to create secrets directory: %w", err)
|
return "", fmt.Errorf("failed to create secrets directory: %w", err)
|
||||||
}
|
}
|
||||||
// Ensure directory permissions are correct even if it already existed
|
|
||||||
if err := os.Chmod(secretDir, 0700); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to set secrets directory permissions: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to read existing secret
|
// Try to read existing secret
|
||||||
if data, err := os.ReadFile(secretPath); err == nil {
|
if data, err := os.ReadFile(secretPath); err == nil {
|
||||||
secret := strings.TrimSpace(string(data))
|
secret := strings.TrimSpace(string(data))
|
||||||
if len(secret) == 64 {
|
if len(secret) == 64 {
|
||||||
if err := ensureSecretFilePermissions(secretPath); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return secret, nil
|
return secret, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -298,48 +143,19 @@ func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
|
|||||||
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
|
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
|
||||||
return "", fmt.Errorf("failed to save cluster secret: %w", err)
|
return "", fmt.Errorf("failed to save cluster secret: %w", err)
|
||||||
}
|
}
|
||||||
if err := ensureSecretFilePermissions(secretPath); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return secret, nil
|
return secret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureSecretFilePermissions(secretPath string) error {
|
|
||||||
if err := os.Chmod(secretPath, 0600); err != nil {
|
|
||||||
return fmt.Errorf("failed to set permissions on %s: %w", secretPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if usr, err := user.Lookup("debros"); err == nil {
|
|
||||||
uid, err := strconv.Atoi(usr.Uid)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse debros UID: %w", err)
|
|
||||||
}
|
|
||||||
gid, err := strconv.Atoi(usr.Gid)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse debros GID: %w", err)
|
|
||||||
}
|
|
||||||
if err := os.Chown(secretPath, uid, gid); err != nil {
|
|
||||||
return fmt.Errorf("failed to change ownership of %s: %w", secretPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnsureSwarmKey gets or generates the IPFS private swarm key
|
// EnsureSwarmKey gets or generates the IPFS private swarm key
|
||||||
func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
|
func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
|
||||||
swarmKeyPath := filepath.Join(sg.oramaDir, "secrets", "swarm.key")
|
swarmKeyPath := filepath.Join(sg.debrosDir, "secrets", "swarm.key")
|
||||||
secretDir := filepath.Dir(swarmKeyPath)
|
secretDir := filepath.Dir(swarmKeyPath)
|
||||||
|
|
||||||
// Ensure secrets directory exists with restricted permissions (0700)
|
// Ensure secrets directory exists
|
||||||
if err := os.MkdirAll(secretDir, 0700); err != nil {
|
if err := os.MkdirAll(secretDir, 0755); err != nil {
|
||||||
return nil, fmt.Errorf("failed to create secrets directory: %w", err)
|
return nil, fmt.Errorf("failed to create secrets directory: %w", err)
|
||||||
}
|
}
|
||||||
// Ensure directory permissions are correct even if it already existed
|
|
||||||
if err := os.Chmod(secretDir, 0700); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to set secrets directory permissions: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to read existing key
|
// Try to read existing key
|
||||||
if data, err := os.ReadFile(swarmKeyPath); err == nil {
|
if data, err := os.ReadFile(swarmKeyPath); err == nil {
|
||||||
@ -365,10 +181,9 @@ func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
|
|||||||
return []byte(content), nil
|
return []byte(content), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnsureNodeIdentity gets or generates the node's LibP2P identity (unified - no bootstrap/node distinction)
|
// EnsureNodeIdentity gets or generates the node's LibP2P identity
|
||||||
func (sg *SecretGenerator) EnsureNodeIdentity() (peer.ID, error) {
|
func (sg *SecretGenerator) EnsureNodeIdentity(nodeType string) (peer.ID, error) {
|
||||||
// Unified data directory (no bootstrap/node distinction)
|
keyDir := filepath.Join(sg.debrosDir, "data", nodeType)
|
||||||
keyDir := filepath.Join(sg.oramaDir, "data")
|
|
||||||
keyPath := filepath.Join(keyDir, "identity.key")
|
keyPath := filepath.Join(keyDir, "identity.key")
|
||||||
|
|
||||||
// Ensure data directory exists
|
// Ensure data directory exists
|
||||||
@ -409,16 +224,9 @@ func (sg *SecretGenerator) EnsureNodeIdentity() (peer.ID, error) {
|
|||||||
|
|
||||||
// SaveConfig writes a configuration file to disk
|
// SaveConfig writes a configuration file to disk
|
||||||
func (sg *SecretGenerator) SaveConfig(filename string, content string) error {
|
func (sg *SecretGenerator) SaveConfig(filename string, content string) error {
|
||||||
var configDir string
|
configDir := filepath.Join(sg.debrosDir, "configs")
|
||||||
// gateway.yaml goes to data/ directory, other configs go to configs/
|
|
||||||
if filename == "gateway.yaml" {
|
|
||||||
configDir = filepath.Join(sg.oramaDir, "data")
|
|
||||||
} else {
|
|
||||||
configDir = filepath.Join(sg.oramaDir, "configs")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create config directory: %w", err)
|
return fmt.Errorf("failed to create configs directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
configPath := filepath.Join(configDir, filename)
|
configPath := filepath.Join(configDir, filename)
|
||||||
|
|||||||
@ -3,7 +3,6 @@ package production
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -13,11 +12,11 @@ import (
|
|||||||
// BinaryInstaller handles downloading and installing external binaries
|
// BinaryInstaller handles downloading and installing external binaries
|
||||||
type BinaryInstaller struct {
|
type BinaryInstaller struct {
|
||||||
arch string
|
arch string
|
||||||
logWriter io.Writer
|
logWriter interface{} // io.Writer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBinaryInstaller creates a new binary installer
|
// NewBinaryInstaller creates a new binary installer
|
||||||
func NewBinaryInstaller(arch string, logWriter io.Writer) *BinaryInstaller {
|
func NewBinaryInstaller(arch string, logWriter interface{}) *BinaryInstaller {
|
||||||
return &BinaryInstaller{
|
return &BinaryInstaller{
|
||||||
arch: arch,
|
arch: arch,
|
||||||
logWriter: logWriter,
|
logWriter: logWriter,
|
||||||
@ -27,11 +26,11 @@ func NewBinaryInstaller(arch string, logWriter io.Writer) *BinaryInstaller {
|
|||||||
// InstallRQLite downloads and installs RQLite
|
// InstallRQLite downloads and installs RQLite
|
||||||
func (bi *BinaryInstaller) InstallRQLite() error {
|
func (bi *BinaryInstaller) InstallRQLite() error {
|
||||||
if _, err := exec.LookPath("rqlited"); err == nil {
|
if _, err := exec.LookPath("rqlited"); err == nil {
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ RQLite already installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ RQLite already installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " Installing RQLite...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing RQLite...\n")
|
||||||
|
|
||||||
version := "8.43.0"
|
version := "8.43.0"
|
||||||
tarball := fmt.Sprintf("rqlite-v%s-linux-%s.tar.gz", version, bi.arch)
|
tarball := fmt.Sprintf("rqlite-v%s-linux-%s.tar.gz", version, bi.arch)
|
||||||
@ -54,14 +53,12 @@ func (bi *BinaryInstaller) InstallRQLite() error {
|
|||||||
if err := exec.Command("cp", dir+"/rqlited", "/usr/local/bin/").Run(); err != nil {
|
if err := exec.Command("cp", dir+"/rqlited", "/usr/local/bin/").Run(); err != nil {
|
||||||
return fmt.Errorf("failed to copy rqlited binary: %w", err)
|
return fmt.Errorf("failed to copy rqlited binary: %w", err)
|
||||||
}
|
}
|
||||||
if err := exec.Command("chmod", "+x", "/usr/local/bin/rqlited").Run(); err != nil {
|
exec.Command("chmod", "+x", "/usr/local/bin/rqlited").Run()
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chmod rqlited: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure PATH includes /usr/local/bin
|
// Ensure PATH includes /usr/local/bin
|
||||||
os.Setenv("PATH", os.Getenv("PATH")+":/usr/local/bin")
|
os.Setenv("PATH", os.Getenv("PATH")+":/usr/local/bin")
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ RQLite installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ RQLite installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -69,11 +66,11 @@ func (bi *BinaryInstaller) InstallRQLite() error {
|
|||||||
// Follows official steps from https://docs.ipfs.tech/install/command-line/
|
// Follows official steps from https://docs.ipfs.tech/install/command-line/
|
||||||
func (bi *BinaryInstaller) InstallIPFS() error {
|
func (bi *BinaryInstaller) InstallIPFS() error {
|
||||||
if _, err := exec.LookPath("ipfs"); err == nil {
|
if _, err := exec.LookPath("ipfs"); err == nil {
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ IPFS already installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS already installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " Installing IPFS (Kubo)...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing IPFS (Kubo)...\n")
|
||||||
|
|
||||||
// Follow official installation steps in order
|
// Follow official installation steps in order
|
||||||
kuboVersion := "v0.38.2"
|
kuboVersion := "v0.38.2"
|
||||||
@ -84,7 +81,7 @@ func (bi *BinaryInstaller) InstallIPFS() error {
|
|||||||
kuboDir := filepath.Join(tmpDir, "kubo")
|
kuboDir := filepath.Join(tmpDir, "kubo")
|
||||||
|
|
||||||
// Step 1: Download the Linux binary from dist.ipfs.tech
|
// Step 1: Download the Linux binary from dist.ipfs.tech
|
||||||
fmt.Fprintf(bi.logWriter, " Step 1: Downloading Kubo v%s...\n", kuboVersion)
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 1: Downloading Kubo v%s...\n", kuboVersion)
|
||||||
cmd := exec.Command("wget", "-q", url, "-O", tarPath)
|
cmd := exec.Command("wget", "-q", url, "-O", tarPath)
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
return fmt.Errorf("failed to download kubo from %s: %w", url, err)
|
return fmt.Errorf("failed to download kubo from %s: %w", url, err)
|
||||||
@ -96,7 +93,7 @@ func (bi *BinaryInstaller) InstallIPFS() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Step 2: Unzip the file
|
// Step 2: Unzip the file
|
||||||
fmt.Fprintf(bi.logWriter, " Step 2: Extracting Kubo archive...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 2: Extracting Kubo archive...\n")
|
||||||
cmd = exec.Command("tar", "-xzf", tarPath, "-C", tmpDir)
|
cmd = exec.Command("tar", "-xzf", tarPath, "-C", tmpDir)
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
return fmt.Errorf("failed to extract kubo tarball: %w", err)
|
return fmt.Errorf("failed to extract kubo tarball: %w", err)
|
||||||
@ -108,7 +105,7 @@ func (bi *BinaryInstaller) InstallIPFS() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Step 3: Move into the kubo folder (cd kubo)
|
// Step 3: Move into the kubo folder (cd kubo)
|
||||||
fmt.Fprintf(bi.logWriter, " Step 3: Running installation script...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 3: Running installation script...\n")
|
||||||
|
|
||||||
// Step 4: Run the installation script (sudo bash install.sh)
|
// Step 4: Run the installation script (sudo bash install.sh)
|
||||||
installScript := filepath.Join(kuboDir, "install.sh")
|
installScript := filepath.Join(kuboDir, "install.sh")
|
||||||
@ -123,7 +120,7 @@ func (bi *BinaryInstaller) InstallIPFS() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Step 5: Test that Kubo has installed correctly
|
// Step 5: Test that Kubo has installed correctly
|
||||||
fmt.Fprintf(bi.logWriter, " Step 5: Verifying installation...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Step 5: Verifying installation...\n")
|
||||||
cmd = exec.Command("ipfs", "--version")
|
cmd = exec.Command("ipfs", "--version")
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -144,24 +141,24 @@ func (bi *BinaryInstaller) InstallIPFS() error {
|
|||||||
return fmt.Errorf("ipfs binary not found after installation in %v", ipfsLocations)
|
return fmt.Errorf("ipfs binary not found after installation in %v", ipfsLocations)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(bi.logWriter, " %s", string(output))
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " %s", string(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure PATH is updated for current process
|
// Ensure PATH is updated for current process
|
||||||
os.Setenv("PATH", os.Getenv("PATH")+":/usr/local/bin")
|
os.Setenv("PATH", os.Getenv("PATH")+":/usr/local/bin")
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ IPFS installed successfully\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS installed successfully\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstallIPFSCluster downloads and installs IPFS Cluster Service
|
// InstallIPFSCluster downloads and installs IPFS Cluster Service
|
||||||
func (bi *BinaryInstaller) InstallIPFSCluster() error {
|
func (bi *BinaryInstaller) InstallIPFSCluster() error {
|
||||||
if _, err := exec.LookPath("ipfs-cluster-service"); err == nil {
|
if _, err := exec.LookPath("ipfs-cluster-service"); err == nil {
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ IPFS Cluster already installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS Cluster already installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " Installing IPFS Cluster Service...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing IPFS Cluster Service...\n")
|
||||||
|
|
||||||
// Check if Go is available
|
// Check if Go is available
|
||||||
if _, err := exec.LookPath("go"); err != nil {
|
if _, err := exec.LookPath("go"); err != nil {
|
||||||
@ -174,18 +171,18 @@ func (bi *BinaryInstaller) InstallIPFSCluster() error {
|
|||||||
return fmt.Errorf("failed to install IPFS Cluster: %w", err)
|
return fmt.Errorf("failed to install IPFS Cluster: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ IPFS Cluster installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ IPFS Cluster installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstallOlric downloads and installs Olric server
|
// InstallOlric downloads and installs Olric server
|
||||||
func (bi *BinaryInstaller) InstallOlric() error {
|
func (bi *BinaryInstaller) InstallOlric() error {
|
||||||
if _, err := exec.LookPath("olric-server"); err == nil {
|
if _, err := exec.LookPath("olric-server"); err == nil {
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ Olric already installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Olric already installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " Installing Olric...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing Olric...\n")
|
||||||
|
|
||||||
// Check if Go is available
|
// Check if Go is available
|
||||||
if _, err := exec.LookPath("go"); err != nil {
|
if _, err := exec.LookPath("go"); err != nil {
|
||||||
@ -198,20 +195,20 @@ func (bi *BinaryInstaller) InstallOlric() error {
|
|||||||
return fmt.Errorf("failed to install Olric: %w", err)
|
return fmt.Errorf("failed to install Olric: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ Olric installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Olric installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstallGo downloads and installs Go toolchain
|
// InstallGo downloads and installs Go toolchain
|
||||||
func (bi *BinaryInstaller) InstallGo() error {
|
func (bi *BinaryInstaller) InstallGo() error {
|
||||||
if _, err := exec.LookPath("go"); err == nil {
|
if _, err := exec.LookPath("go"); err == nil {
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ Go already installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Go already installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " Installing Go...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing Go...\n")
|
||||||
|
|
||||||
goTarball := fmt.Sprintf("go1.22.5.linux-%s.tar.gz", bi.arch)
|
goTarball := fmt.Sprintf("go1.21.6.linux-%s.tar.gz", bi.arch)
|
||||||
goURL := fmt.Sprintf("https://go.dev/dl/%s", goTarball)
|
goURL := fmt.Sprintf("https://go.dev/dl/%s", goTarball)
|
||||||
|
|
||||||
// Download
|
// Download
|
||||||
@ -235,7 +232,7 @@ func (bi *BinaryInstaller) InstallGo() error {
|
|||||||
return fmt.Errorf("go installed but not found in PATH after installation")
|
return fmt.Errorf("go installed but not found in PATH after installation")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ Go installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ Go installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -278,50 +275,31 @@ func (bi *BinaryInstaller) ResolveBinaryPath(binary string, extraPaths ...string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// InstallDeBrosBinaries clones and builds DeBros binaries
|
// InstallDeBrosBinaries clones and builds DeBros binaries
|
||||||
func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, oramaHome string, skipRepoUpdate bool) error {
|
func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, debrosHome string) error {
|
||||||
fmt.Fprintf(bi.logWriter, " Building DeBros binaries...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building DeBros binaries...\n")
|
||||||
|
|
||||||
srcDir := filepath.Join(oramaHome, "src")
|
srcDir := filepath.Join(debrosHome, "src")
|
||||||
binDir := filepath.Join(oramaHome, "bin")
|
binDir := filepath.Join(debrosHome, "bin")
|
||||||
|
|
||||||
// Ensure directories exist
|
// Ensure directories exist
|
||||||
if err := os.MkdirAll(srcDir, 0755); err != nil {
|
os.MkdirAll(srcDir, 0755)
|
||||||
return fmt.Errorf("failed to create source directory %s: %w", srcDir, err)
|
os.MkdirAll(binDir, 0755)
|
||||||
}
|
|
||||||
if err := os.MkdirAll(binDir, 0755); err != nil {
|
|
||||||
return fmt.Errorf("failed to create bin directory %s: %w", binDir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if source directory has content (either git repo or pre-existing source)
|
|
||||||
hasSourceContent := false
|
|
||||||
if entries, err := os.ReadDir(srcDir); err == nil && len(entries) > 0 {
|
|
||||||
hasSourceContent = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if git repository is already initialized
|
// Check if git repository is already initialized
|
||||||
isGitRepo := false
|
repoInitialized := false
|
||||||
if _, err := os.Stat(filepath.Join(srcDir, ".git")); err == nil {
|
if _, err := os.Stat(filepath.Join(srcDir, ".git")); err == nil {
|
||||||
isGitRepo = true
|
repoInitialized = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle repository update/clone based on skipRepoUpdate flag
|
|
||||||
if skipRepoUpdate {
|
|
||||||
fmt.Fprintf(bi.logWriter, " Skipping repo clone/pull (--no-pull flag)\n")
|
|
||||||
if !hasSourceContent {
|
|
||||||
return fmt.Errorf("cannot skip pull: source directory is empty at %s (need to populate it first)", srcDir)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(bi.logWriter, " Using existing source at %s (skipping git operations)\n", srcDir)
|
|
||||||
// Skip to build step - don't execute any git commands
|
|
||||||
} else {
|
|
||||||
// Clone repository if not present, otherwise update it
|
// Clone repository if not present, otherwise update it
|
||||||
if !isGitRepo {
|
if !repoInitialized {
|
||||||
fmt.Fprintf(bi.logWriter, " Cloning repository...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Cloning repository...\n")
|
||||||
cmd := exec.Command("git", "clone", "--branch", branch, "--depth", "1", "https://github.com/DeBrosOfficial/network.git", srcDir)
|
cmd := exec.Command("git", "clone", "--branch", branch, "--depth", "1", "https://github.com/DeBrosOfficial/network.git", srcDir)
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
return fmt.Errorf("failed to clone repository: %w", err)
|
return fmt.Errorf("failed to clone repository: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(bi.logWriter, " Updating repository to latest changes...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Updating repository to latest changes...\n")
|
||||||
if output, err := exec.Command("git", "-C", srcDir, "fetch", "origin", branch).CombinedOutput(); err != nil {
|
if output, err := exec.Command("git", "-C", srcDir, "fetch", "origin", branch).CombinedOutput(); err != nil {
|
||||||
return fmt.Errorf("failed to fetch repository updates: %v\n%s", err, string(output))
|
return fmt.Errorf("failed to fetch repository updates: %v\n%s", err, string(output))
|
||||||
}
|
}
|
||||||
@ -332,119 +310,59 @@ func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, oramaHome string
|
|||||||
return fmt.Errorf("failed to clean repository: %v\n%s", err, string(output))
|
return fmt.Errorf("failed to clean repository: %v\n%s", err, string(output))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Build binaries
|
// Build binaries
|
||||||
fmt.Fprintf(bi.logWriter, " Building binaries...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building binaries...\n")
|
||||||
cmd := exec.Command("make", "build")
|
cmd := exec.Command("make", "build")
|
||||||
cmd.Dir = srcDir
|
cmd.Dir = srcDir
|
||||||
cmd.Env = append(os.Environ(), "HOME="+oramaHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin")
|
cmd.Env = append(os.Environ(), "HOME="+debrosHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin")
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
if output, err := cmd.CombinedOutput(); err != nil {
|
||||||
return fmt.Errorf("failed to build: %v\n%s", err, string(output))
|
return fmt.Errorf("failed to build: %v\n%s", err, string(output))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy binaries
|
// Copy binaries
|
||||||
fmt.Fprintf(bi.logWriter, " Copying binaries...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Copying binaries...\n")
|
||||||
srcBinDir := filepath.Join(srcDir, "bin")
|
cmd = exec.Command("sh", "-c", fmt.Sprintf("cp -r %s/bin/* %s/", srcDir, binDir))
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
// Check if source bin directory exists
|
return fmt.Errorf("failed to copy binaries: %w", err)
|
||||||
if _, err := os.Stat(srcBinDir); os.IsNotExist(err) {
|
|
||||||
return fmt.Errorf("source bin directory does not exist at %s - build may have failed", srcBinDir)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if there are any files to copy
|
exec.Command("chmod", "-R", "755", binDir).Run()
|
||||||
entries, err := os.ReadDir(srcBinDir)
|
exec.Command("chown", "-R", "debros:debros", binDir).Run()
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read source bin directory: %w", err)
|
|
||||||
}
|
|
||||||
if len(entries) == 0 {
|
|
||||||
return fmt.Errorf("source bin directory is empty - build may have failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy each binary individually to avoid wildcard expansion issues
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ DeBros binaries installed\n")
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
srcPath := filepath.Join(srcBinDir, entry.Name())
|
|
||||||
dstPath := filepath.Join(binDir, entry.Name())
|
|
||||||
|
|
||||||
// Read source file
|
|
||||||
data, err := os.ReadFile(srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read binary %s: %w", entry.Name(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write destination file
|
|
||||||
if err := os.WriteFile(dstPath, data, 0755); err != nil {
|
|
||||||
return fmt.Errorf("failed to write binary %s: %w", entry.Name(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := exec.Command("chmod", "-R", "755", binDir).Run(); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chmod bin directory: %v\n", err)
|
|
||||||
}
|
|
||||||
if err := exec.Command("chown", "-R", "debros:debros", binDir).Run(); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown bin directory: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Grant CAP_NET_BIND_SERVICE to orama-node to allow binding to ports 80/443 without root
|
|
||||||
nodeBinary := filepath.Join(binDir, "orama-node")
|
|
||||||
if _, err := os.Stat(nodeBinary); err == nil {
|
|
||||||
if err := exec.Command("setcap", "cap_net_bind_service=+ep", nodeBinary).Run(); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to setcap on orama-node: %v\n", err)
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Gateway may not be able to bind to port 80/443\n")
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ Set CAP_NET_BIND_SERVICE on orama-node\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ DeBros binaries installed\n")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstallSystemDependencies installs system-level dependencies via apt
|
// InstallSystemDependencies installs system-level dependencies via apt
|
||||||
func (bi *BinaryInstaller) InstallSystemDependencies() error {
|
func (bi *BinaryInstaller) InstallSystemDependencies() error {
|
||||||
fmt.Fprintf(bi.logWriter, " Installing system dependencies...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Installing system dependencies...\n")
|
||||||
|
|
||||||
// Update package list
|
// Update package list
|
||||||
cmd := exec.Command("apt-get", "update")
|
cmd := exec.Command("apt-get", "update")
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
fmt.Fprintf(bi.logWriter, " Warning: apt update failed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Warning: apt update failed\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install dependencies including Node.js for anyone-client
|
// Install dependencies
|
||||||
cmd = exec.Command("apt-get", "install", "-y", "curl", "git", "make", "build-essential", "wget", "nodejs", "npm")
|
cmd = exec.Command("apt-get", "install", "-y", "curl", "git", "make", "build-essential", "wget")
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
return fmt.Errorf("failed to install dependencies: %w", err)
|
return fmt.Errorf("failed to install dependencies: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ System dependencies installed\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " ✓ System dependencies installed\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IPFSPeerInfo holds IPFS peer information for configuring Peering.Peers
|
// InitializeIPFSRepo initializes an IPFS repository for a node
|
||||||
type IPFSPeerInfo struct {
|
func (bi *BinaryInstaller) InitializeIPFSRepo(nodeType, ipfsRepoPath string, swarmKeyPath string, apiPort, gatewayPort, swarmPort int) error {
|
||||||
PeerID string
|
|
||||||
Addrs []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPFSClusterPeerInfo contains IPFS Cluster peer information for cluster peer discovery
|
|
||||||
type IPFSClusterPeerInfo struct {
|
|
||||||
PeerID string // Cluster peer ID (different from IPFS peer ID)
|
|
||||||
Addrs []string // Cluster multiaddresses (e.g., /ip4/x.x.x.x/tcp/9098)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitializeIPFSRepo initializes an IPFS repository for a node (unified - no bootstrap/node distinction)
|
|
||||||
// If ipfsPeer is provided, configures Peering.Peers for peer discovery in private networks
|
|
||||||
func (bi *BinaryInstaller) InitializeIPFSRepo(ipfsRepoPath string, swarmKeyPath string, apiPort, gatewayPort, swarmPort int, ipfsPeer *IPFSPeerInfo) error {
|
|
||||||
configPath := filepath.Join(ipfsRepoPath, "config")
|
configPath := filepath.Join(ipfsRepoPath, "config")
|
||||||
repoExists := false
|
repoExists := false
|
||||||
if _, err := os.Stat(configPath); err == nil {
|
if _, err := os.Stat(configPath); err == nil {
|
||||||
repoExists = true
|
repoExists = true
|
||||||
fmt.Fprintf(bi.logWriter, " IPFS repo already exists, ensuring configuration...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS repo for %s already exists, ensuring configuration...\n", nodeType)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(bi.logWriter, " Initializing IPFS repo...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS repo for %s...\n", nodeType)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(ipfsRepoPath, 0755); err != nil {
|
if err := os.MkdirAll(ipfsRepoPath, 0755); err != nil {
|
||||||
@ -477,7 +395,7 @@ func (bi *BinaryInstaller) InitializeIPFSRepo(ipfsRepoPath string, swarmKeyPath
|
|||||||
|
|
||||||
// Configure IPFS addresses (API, Gateway, Swarm) by modifying the config file directly
|
// Configure IPFS addresses (API, Gateway, Swarm) by modifying the config file directly
|
||||||
// This ensures the ports are set correctly and avoids conflicts with RQLite on port 5001
|
// This ensures the ports are set correctly and avoids conflicts with RQLite on port 5001
|
||||||
fmt.Fprintf(bi.logWriter, " Configuring IPFS addresses (API: %d, Gateway: %d, Swarm: %d)...\n", apiPort, gatewayPort, swarmPort)
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Configuring IPFS addresses (API: %d, Gateway: %d, Swarm: %d)...\n", apiPort, gatewayPort, swarmPort)
|
||||||
if err := bi.configureIPFSAddresses(ipfsRepoPath, apiPort, gatewayPort, swarmPort); err != nil {
|
if err := bi.configureIPFSAddresses(ipfsRepoPath, apiPort, gatewayPort, swarmPort); err != nil {
|
||||||
return fmt.Errorf("failed to configure IPFS addresses: %w", err)
|
return fmt.Errorf("failed to configure IPFS addresses: %w", err)
|
||||||
}
|
}
|
||||||
@ -486,7 +404,7 @@ func (bi *BinaryInstaller) InitializeIPFSRepo(ipfsRepoPath string, swarmKeyPath
|
|||||||
// This is critical - IPFS will fail to start if AutoConf is enabled on a private network
|
// This is critical - IPFS will fail to start if AutoConf is enabled on a private network
|
||||||
// We do this even for existing repos to fix repos initialized before this fix was applied
|
// We do this even for existing repos to fix repos initialized before this fix was applied
|
||||||
if swarmKeyExists {
|
if swarmKeyExists {
|
||||||
fmt.Fprintf(bi.logWriter, " Disabling AutoConf for private swarm...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Disabling AutoConf for private swarm...\n")
|
||||||
cmd := exec.Command(ipfsBinary, "config", "--json", "AutoConf.Enabled", "false")
|
cmd := exec.Command(ipfsBinary, "config", "--json", "AutoConf.Enabled", "false")
|
||||||
cmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
|
cmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
if output, err := cmd.CombinedOutput(); err != nil {
|
||||||
@ -495,7 +413,7 @@ func (bi *BinaryInstaller) InitializeIPFSRepo(ipfsRepoPath string, swarmKeyPath
|
|||||||
|
|
||||||
// Clear AutoConf placeholders from config to prevent Kubo startup errors
|
// Clear AutoConf placeholders from config to prevent Kubo startup errors
|
||||||
// When AutoConf is disabled, 'auto' placeholders must be replaced with explicit values or empty
|
// When AutoConf is disabled, 'auto' placeholders must be replaced with explicit values or empty
|
||||||
fmt.Fprintf(bi.logWriter, " Clearing AutoConf placeholders from IPFS config...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Clearing AutoConf placeholders from IPFS config...\n")
|
||||||
|
|
||||||
type configCommand struct {
|
type configCommand struct {
|
||||||
desc string
|
desc string
|
||||||
@ -511,27 +429,17 @@ func (bi *BinaryInstaller) InitializeIPFSRepo(ipfsRepoPath string, swarmKeyPath
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, step := range cleanup {
|
for _, step := range cleanup {
|
||||||
fmt.Fprintf(bi.logWriter, " %s...\n", step.desc)
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " %s...\n", step.desc)
|
||||||
cmd := exec.Command(ipfsBinary, step.args...)
|
cmd := exec.Command(ipfsBinary, step.args...)
|
||||||
cmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
|
cmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
if output, err := cmd.CombinedOutput(); err != nil {
|
||||||
return fmt.Errorf("failed while %s: %v\n%s", step.desc, err, string(output))
|
return fmt.Errorf("failed while %s: %v\n%s", step.desc, err, string(output))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure Peering.Peers if we have peer info (for private network discovery)
|
|
||||||
if ipfsPeer != nil && ipfsPeer.PeerID != "" && len(ipfsPeer.Addrs) > 0 {
|
|
||||||
fmt.Fprintf(bi.logWriter, " Configuring Peering.Peers for private network discovery...\n")
|
|
||||||
if err := bi.configureIPFSPeering(ipfsRepoPath, ipfsPeer); err != nil {
|
|
||||||
return fmt.Errorf("failed to configure IPFS peering: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fix ownership (best-effort, don't fail if it doesn't work)
|
// Fix ownership
|
||||||
if err := exec.Command("chown", "-R", "debros:debros", ipfsRepoPath).Run(); err != nil {
|
exec.Command("chown", "-R", "debros:debros", ipfsRepoPath).Run()
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown IPFS repo: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -551,29 +459,16 @@ func (bi *BinaryInstaller) configureIPFSAddresses(ipfsRepoPath string, apiPort,
|
|||||||
return fmt.Errorf("failed to parse IPFS config: %w", err)
|
return fmt.Errorf("failed to parse IPFS config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get existing Addresses section or create new one
|
// Set Addresses
|
||||||
// This preserves any existing settings like Announce, AppendAnnounce, NoAnnounce
|
config["Addresses"] = map[string]interface{}{
|
||||||
addresses, ok := config["Addresses"].(map[string]interface{})
|
"API": []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort)},
|
||||||
if !ok {
|
"Gateway": []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", gatewayPort)},
|
||||||
addresses = make(map[string]interface{})
|
"Swarm": []string{
|
||||||
}
|
|
||||||
|
|
||||||
// Update specific address fields while preserving others
|
|
||||||
// Bind API and Gateway to localhost only for security
|
|
||||||
// Swarm binds to all interfaces for peer connections
|
|
||||||
addresses["API"] = []string{
|
|
||||||
fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort),
|
|
||||||
}
|
|
||||||
addresses["Gateway"] = []string{
|
|
||||||
fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", gatewayPort),
|
|
||||||
}
|
|
||||||
addresses["Swarm"] = []string{
|
|
||||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", swarmPort),
|
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", swarmPort),
|
||||||
fmt.Sprintf("/ip6/::/tcp/%d", swarmPort),
|
fmt.Sprintf("/ip6/::/tcp/%d", swarmPort),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
config["Addresses"] = addresses
|
|
||||||
|
|
||||||
// Write config back
|
// Write config back
|
||||||
updatedData, err := json.MarshalIndent(config, "", " ")
|
updatedData, err := json.MarshalIndent(config, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -587,75 +482,25 @@ func (bi *BinaryInstaller) configureIPFSAddresses(ipfsRepoPath string, apiPort,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// configureIPFSPeering configures Peering.Peers in the IPFS config for private network discovery
|
// InitializeIPFSClusterConfig initializes IPFS Cluster configuration
|
||||||
// This allows nodes in a private swarm to find each other even without bootstrap peers
|
|
||||||
func (bi *BinaryInstaller) configureIPFSPeering(ipfsRepoPath string, peer *IPFSPeerInfo) error {
|
|
||||||
configPath := filepath.Join(ipfsRepoPath, "config")
|
|
||||||
|
|
||||||
// Read existing config
|
|
||||||
data, err := os.ReadFile(configPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read IPFS config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var config map[string]interface{}
|
|
||||||
if err := json.Unmarshal(data, &config); err != nil {
|
|
||||||
return fmt.Errorf("failed to parse IPFS config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get existing Peering section or create new one
|
|
||||||
peering, ok := config["Peering"].(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
peering = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create peer entry
|
|
||||||
peerEntry := map[string]interface{}{
|
|
||||||
"ID": peer.PeerID,
|
|
||||||
"Addrs": peer.Addrs,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set Peering.Peers
|
|
||||||
peering["Peers"] = []interface{}{peerEntry}
|
|
||||||
config["Peering"] = peering
|
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " Adding peer: %s (%d addresses)\n", peer.PeerID, len(peer.Addrs))
|
|
||||||
|
|
||||||
// Write config back
|
|
||||||
updatedData, err := json.MarshalIndent(config, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal IPFS config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.WriteFile(configPath, updatedData, 0600); err != nil {
|
|
||||||
return fmt.Errorf("failed to write IPFS config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitializeIPFSClusterConfig initializes IPFS Cluster configuration (unified - no bootstrap/node distinction)
|
|
||||||
// This runs `ipfs-cluster-service init` to create the service.json configuration file.
|
// This runs `ipfs-cluster-service init` to create the service.json configuration file.
|
||||||
// For existing installations, it ensures the cluster secret is up to date.
|
// For existing installations, it ensures the cluster secret is up to date.
|
||||||
// clusterPeers should be in format: ["/ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>"]
|
func (bi *BinaryInstaller) InitializeIPFSClusterConfig(nodeType, clusterPath, clusterSecret string, ipfsAPIPort int) error {
|
||||||
func (bi *BinaryInstaller) InitializeIPFSClusterConfig(clusterPath, clusterSecret string, ipfsAPIPort int, clusterPeers []string) error {
|
|
||||||
serviceJSONPath := filepath.Join(clusterPath, "service.json")
|
serviceJSONPath := filepath.Join(clusterPath, "service.json")
|
||||||
configExists := false
|
configExists := false
|
||||||
if _, err := os.Stat(serviceJSONPath); err == nil {
|
if _, err := os.Stat(serviceJSONPath); err == nil {
|
||||||
configExists = true
|
configExists = true
|
||||||
fmt.Fprintf(bi.logWriter, " IPFS Cluster config already exists, ensuring it's up to date...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS Cluster config for %s already exists, ensuring it's up to date...\n", nodeType)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(bi.logWriter, " Preparing IPFS Cluster path...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Preparing IPFS Cluster path for %s...\n", nodeType)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(clusterPath, 0755); err != nil {
|
if err := os.MkdirAll(clusterPath, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create IPFS Cluster directory: %w", err)
|
return fmt.Errorf("failed to create IPFS Cluster directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fix ownership before running init (best-effort)
|
// Fix ownership before running init
|
||||||
if err := exec.Command("chown", "-R", "debros:debros", clusterPath).Run(); err != nil {
|
exec.Command("chown", "-R", "debros:debros", clusterPath).Run()
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown cluster path before init: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve ipfs-cluster-service binary path
|
// Resolve ipfs-cluster-service binary path
|
||||||
clusterBinary, err := bi.ResolveBinaryPath("ipfs-cluster-service", "/usr/local/bin/ipfs-cluster-service", "/usr/bin/ipfs-cluster-service")
|
clusterBinary, err := bi.ResolveBinaryPath("ipfs-cluster-service", "/usr/local/bin/ipfs-cluster-service", "/usr/bin/ipfs-cluster-service")
|
||||||
@ -667,44 +512,31 @@ func (bi *BinaryInstaller) InitializeIPFSClusterConfig(clusterPath, clusterSecre
|
|||||||
if !configExists {
|
if !configExists {
|
||||||
// Initialize cluster config with ipfs-cluster-service init
|
// Initialize cluster config with ipfs-cluster-service init
|
||||||
// This creates the service.json file with all required sections
|
// This creates the service.json file with all required sections
|
||||||
fmt.Fprintf(bi.logWriter, " Initializing IPFS Cluster config...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS Cluster config...\n")
|
||||||
cmd := exec.Command(clusterBinary, "init", "--force")
|
cmd := exec.Command(clusterBinary, "init", "--force")
|
||||||
cmd.Env = append(os.Environ(), "IPFS_CLUSTER_PATH="+clusterPath)
|
cmd.Env = append(os.Environ(), "IPFS_CLUSTER_PATH="+clusterPath)
|
||||||
// Pass CLUSTER_SECRET to init so it writes the correct secret to service.json directly
|
|
||||||
if clusterSecret != "" {
|
|
||||||
cmd.Env = append(cmd.Env, "CLUSTER_SECRET="+clusterSecret)
|
|
||||||
}
|
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
if output, err := cmd.CombinedOutput(); err != nil {
|
||||||
return fmt.Errorf("failed to initialize IPFS Cluster config: %v\n%s", err, string(output))
|
return fmt.Errorf("failed to initialize IPFS Cluster config: %v\n%s", err, string(output))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Always update the cluster secret, IPFS port, and peer addresses (for both new and existing configs)
|
// Always update the cluster secret (for both new and existing configs)
|
||||||
// This ensures existing installations get the secret and port synchronized
|
// This ensures existing installations get the secret synchronized
|
||||||
// We do this AFTER init to ensure our secret takes precedence
|
|
||||||
if clusterSecret != "" {
|
if clusterSecret != "" {
|
||||||
fmt.Fprintf(bi.logWriter, " Updating cluster secret, IPFS port, and peer addresses...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Updating cluster secret...\n")
|
||||||
if err := bi.updateClusterConfig(clusterPath, clusterSecret, ipfsAPIPort, clusterPeers); err != nil {
|
if err := bi.updateClusterSecret(clusterPath, clusterSecret); err != nil {
|
||||||
return fmt.Errorf("failed to update cluster config: %w", err)
|
return fmt.Errorf("failed to update cluster secret: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify the secret was written correctly
|
// Fix ownership again after updates
|
||||||
if err := bi.verifyClusterSecret(clusterPath, clusterSecret); err != nil {
|
exec.Command("chown", "-R", "debros:debros", clusterPath).Run()
|
||||||
return fmt.Errorf("cluster secret verification failed: %w", err)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ Cluster secret verified\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fix ownership again after updates (best-effort)
|
|
||||||
if err := exec.Command("chown", "-R", "debros:debros", clusterPath).Run(); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown cluster path after updates: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateClusterConfig updates the secret, IPFS port, and peer addresses in IPFS Cluster service.json
|
// updateClusterSecret updates the secret field in IPFS Cluster service.json
|
||||||
func (bi *BinaryInstaller) updateClusterConfig(clusterPath, secret string, ipfsAPIPort int, bootstrapClusterPeers []string) error {
|
func (bi *BinaryInstaller) updateClusterSecret(clusterPath, secret string) error {
|
||||||
serviceJSONPath := filepath.Join(clusterPath, "service.json")
|
serviceJSONPath := filepath.Join(clusterPath, "service.json")
|
||||||
|
|
||||||
// Read existing config
|
// Read existing config
|
||||||
@ -719,40 +551,12 @@ func (bi *BinaryInstaller) updateClusterConfig(clusterPath, secret string, ipfsA
|
|||||||
return fmt.Errorf("failed to parse service.json: %w", err)
|
return fmt.Errorf("failed to parse service.json: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update cluster secret, listen_multiaddress, and peer addresses
|
// Update cluster secret
|
||||||
if cluster, ok := config["cluster"].(map[string]interface{}); ok {
|
if cluster, ok := config["cluster"].(map[string]interface{}); ok {
|
||||||
cluster["secret"] = secret
|
cluster["secret"] = secret
|
||||||
// Set consistent listen_multiaddress - port 9098 for cluster LibP2P communication
|
|
||||||
// This MUST match the port used in GetClusterPeerMultiaddr() and peer_addresses
|
|
||||||
cluster["listen_multiaddress"] = []interface{}{"/ip4/0.0.0.0/tcp/9098"}
|
|
||||||
// Configure peer addresses for cluster discovery
|
|
||||||
// This allows nodes to find and connect to each other
|
|
||||||
if len(bootstrapClusterPeers) > 0 {
|
|
||||||
cluster["peer_addresses"] = bootstrapClusterPeers
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
clusterConfig := map[string]interface{}{
|
config["cluster"] = map[string]interface{}{
|
||||||
"secret": secret,
|
"secret": secret,
|
||||||
"listen_multiaddress": []interface{}{"/ip4/0.0.0.0/tcp/9098"},
|
|
||||||
}
|
|
||||||
if len(bootstrapClusterPeers) > 0 {
|
|
||||||
clusterConfig["peer_addresses"] = bootstrapClusterPeers
|
|
||||||
}
|
|
||||||
config["cluster"] = clusterConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update IPFS port in IPFS Proxy configuration
|
|
||||||
ipfsNodeMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsAPIPort)
|
|
||||||
if api, ok := config["api"].(map[string]interface{}); ok {
|
|
||||||
if ipfsproxy, ok := api["ipfsproxy"].(map[string]interface{}); ok {
|
|
||||||
ipfsproxy["node_multiaddress"] = ipfsNodeMultiaddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update IPFS port in IPFS Connector configuration
|
|
||||||
if ipfsConnector, ok := config["ipfs_connector"].(map[string]interface{}); ok {
|
|
||||||
if ipfshttp, ok := ipfsConnector["ipfshttp"].(map[string]interface{}); ok {
|
|
||||||
ipfshttp["node_multiaddress"] = ipfsNodeMultiaddr
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -769,173 +573,14 @@ func (bi *BinaryInstaller) updateClusterConfig(clusterPath, secret string, ipfsA
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyClusterSecret verifies that the secret in service.json matches the expected value
|
|
||||||
func (bi *BinaryInstaller) verifyClusterSecret(clusterPath, expectedSecret string) error {
|
|
||||||
serviceJSONPath := filepath.Join(clusterPath, "service.json")
|
|
||||||
|
|
||||||
data, err := os.ReadFile(serviceJSONPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read service.json for verification: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var config map[string]interface{}
|
|
||||||
if err := json.Unmarshal(data, &config); err != nil {
|
|
||||||
return fmt.Errorf("failed to parse service.json for verification: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cluster, ok := config["cluster"].(map[string]interface{}); ok {
|
|
||||||
if secret, ok := cluster["secret"].(string); ok {
|
|
||||||
if secret != expectedSecret {
|
|
||||||
return fmt.Errorf("secret mismatch: expected %s, got %s", expectedSecret, secret)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("secret not found in cluster config")
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("cluster section not found in service.json")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetClusterPeerMultiaddr reads the IPFS Cluster peer ID and returns its multiaddress
|
|
||||||
// Returns format: /ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>
|
|
||||||
func (bi *BinaryInstaller) GetClusterPeerMultiaddr(clusterPath string, nodeIP string) (string, error) {
|
|
||||||
identityPath := filepath.Join(clusterPath, "identity.json")
|
|
||||||
|
|
||||||
// Read identity file
|
|
||||||
data, err := os.ReadFile(identityPath)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to read identity.json: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse JSON
|
|
||||||
var identity map[string]interface{}
|
|
||||||
if err := json.Unmarshal(data, &identity); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to parse identity.json: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get peer ID
|
|
||||||
peerID, ok := identity["id"].(string)
|
|
||||||
if !ok || peerID == "" {
|
|
||||||
return "", fmt.Errorf("peer ID not found in identity.json")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct multiaddress: /ip4/<ip>/tcp/9098/p2p/<peer-id>
|
|
||||||
// Port 9098 is the default cluster listen port
|
|
||||||
multiaddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", nodeIP, peerID)
|
|
||||||
return multiaddr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitializeRQLiteDataDir initializes RQLite data directory
|
// InitializeRQLiteDataDir initializes RQLite data directory
|
||||||
func (bi *BinaryInstaller) InitializeRQLiteDataDir(dataDir string) error {
|
func (bi *BinaryInstaller) InitializeRQLiteDataDir(nodeType, dataDir string) error {
|
||||||
fmt.Fprintf(bi.logWriter, " Initializing RQLite data dir...\n")
|
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing RQLite data dir for %s...\n", nodeType)
|
||||||
|
|
||||||
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
if err := os.MkdirAll(dataDir, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create RQLite data directory: %w", err)
|
return fmt.Errorf("failed to create RQLite data directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := exec.Command("chown", "-R", "debros:debros", dataDir).Run(); err != nil {
|
exec.Command("chown", "-R", "debros:debros", dataDir).Run()
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown RQLite data dir: %v\n", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstallAnyoneClient installs the anyone-client npm package globally
|
|
||||||
func (bi *BinaryInstaller) InstallAnyoneClient() error {
|
|
||||||
// Check if anyone-client is already available via npx (more reliable for scoped packages)
|
|
||||||
// Note: the CLI binary is "anyone-client", not the full scoped package name
|
|
||||||
if cmd := exec.Command("npx", "anyone-client", "--help"); cmd.Run() == nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ anyone-client already installed\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " Installing anyone-client...\n")
|
|
||||||
|
|
||||||
// Initialize NPM cache structure to ensure all directories exist
|
|
||||||
// This prevents "mkdir" errors when NPM tries to create nested cache directories
|
|
||||||
fmt.Fprintf(bi.logWriter, " Initializing NPM cache...\n")
|
|
||||||
|
|
||||||
// Create nested cache directories with proper permissions
|
|
||||||
debrosHome := "/home/debros"
|
|
||||||
npmCacheDirs := []string{
|
|
||||||
filepath.Join(debrosHome, ".npm"),
|
|
||||||
filepath.Join(debrosHome, ".npm", "_cacache"),
|
|
||||||
filepath.Join(debrosHome, ".npm", "_cacache", "tmp"),
|
|
||||||
filepath.Join(debrosHome, ".npm", "_logs"),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, dir := range npmCacheDirs {
|
|
||||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Failed to create %s: %v\n", dir, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Fix ownership to debros user (sequential to avoid race conditions)
|
|
||||||
if err := exec.Command("chown", "debros:debros", dir).Run(); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown %s: %v\n", dir, err)
|
|
||||||
}
|
|
||||||
if err := exec.Command("chmod", "700", dir).Run(); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chmod %s: %v\n", dir, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recursively fix ownership of entire .npm directory to ensure all nested files are owned by debros
|
|
||||||
if err := exec.Command("chown", "-R", "debros:debros", filepath.Join(debrosHome, ".npm")).Run(); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown .npm directory: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run npm cache verify as debros user with proper environment
|
|
||||||
cacheInitCmd := exec.Command("sudo", "-u", "debros", "npm", "cache", "verify", "--silent")
|
|
||||||
cacheInitCmd.Env = append(os.Environ(), "HOME="+debrosHome)
|
|
||||||
if err := cacheInitCmd.Run(); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ NPM cache verify warning: %v (continuing anyway)\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Install anyone-client globally via npm (using scoped package name)
|
|
||||||
cmd := exec.Command("npm", "install", "-g", "@anyone-protocol/anyone-client")
|
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
|
||||||
return fmt.Errorf("failed to install anyone-client: %w\n%s", err, string(output))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create terms-agreement file to bypass interactive prompt when running as a service
|
|
||||||
termsFile := filepath.Join(debrosHome, "terms-agreement")
|
|
||||||
if err := os.WriteFile(termsFile, []byte("agreed"), 0644); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to create terms-agreement: %v\n", err)
|
|
||||||
} else {
|
|
||||||
if err := exec.Command("chown", "debros:debros", termsFile).Run(); err != nil {
|
|
||||||
fmt.Fprintf(bi.logWriter, " ⚠️ Warning: failed to chown terms-agreement: %v\n", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify installation - try npx with the correct CLI name (anyone-client, not full scoped package name)
|
|
||||||
verifyCmd := exec.Command("npx", "anyone-client", "--help")
|
|
||||||
if err := verifyCmd.Run(); err != nil {
|
|
||||||
// Fallback: check if binary exists in common locations
|
|
||||||
possiblePaths := []string{
|
|
||||||
"/usr/local/bin/anyone-client",
|
|
||||||
"/usr/bin/anyone-client",
|
|
||||||
}
|
|
||||||
found := false
|
|
||||||
for _, path := range possiblePaths {
|
|
||||||
if info, err := os.Stat(path); err == nil && !info.IsDir() {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
// Try npm bin -g to find global bin directory
|
|
||||||
cmd := exec.Command("npm", "bin", "-g")
|
|
||||||
if output, err := cmd.Output(); err == nil {
|
|
||||||
npmBinDir := strings.TrimSpace(string(output))
|
|
||||||
candidate := filepath.Join(npmBinDir, "anyone-client")
|
|
||||||
if info, err := os.Stat(candidate); err == nil && !info.IsDir() {
|
|
||||||
found = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return fmt.Errorf("anyone-client installation verification failed - package may not provide a binary, but npx should work")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(bi.logWriter, " ✓ anyone-client installed\n")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -7,24 +7,21 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProductionSetup orchestrates the entire production deployment
|
// ProductionSetup orchestrates the entire production deployment
|
||||||
type ProductionSetup struct {
|
type ProductionSetup struct {
|
||||||
osInfo *OSInfo
|
osInfo *OSInfo
|
||||||
arch string
|
arch string
|
||||||
oramaHome string
|
debrosHome string
|
||||||
oramaDir string
|
debrosDir string
|
||||||
logWriter io.Writer
|
logWriter io.Writer
|
||||||
forceReconfigure bool
|
forceReconfigure bool
|
||||||
skipOptionalDeps bool
|
skipOptionalDeps bool
|
||||||
skipResourceChecks bool
|
|
||||||
privChecker *PrivilegeChecker
|
privChecker *PrivilegeChecker
|
||||||
osDetector *OSDetector
|
osDetector *OSDetector
|
||||||
archDetector *ArchitectureDetector
|
archDetector *ArchitectureDetector
|
||||||
resourceChecker *ResourceChecker
|
resourceChecker *ResourceChecker
|
||||||
portChecker *PortChecker
|
|
||||||
fsProvisioner *FilesystemProvisioner
|
fsProvisioner *FilesystemProvisioner
|
||||||
userProvisioner *UserProvisioner
|
userProvisioner *UserProvisioner
|
||||||
stateDetector *StateDetector
|
stateDetector *StateDetector
|
||||||
@ -34,13 +31,12 @@ type ProductionSetup struct {
|
|||||||
serviceController *SystemdController
|
serviceController *SystemdController
|
||||||
binaryInstaller *BinaryInstaller
|
binaryInstaller *BinaryInstaller
|
||||||
branch string
|
branch string
|
||||||
skipRepoUpdate bool
|
|
||||||
NodePeerID string // Captured during Phase3 for later display
|
NodePeerID string // Captured during Phase3 for later display
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadBranchPreference reads the stored branch preference from disk
|
// ReadBranchPreference reads the stored branch preference from disk
|
||||||
func ReadBranchPreference(oramaDir string) string {
|
func ReadBranchPreference(debrosDir string) string {
|
||||||
branchFile := filepath.Join(oramaDir, ".branch")
|
branchFile := filepath.Join(debrosDir, ".branch")
|
||||||
data, err := os.ReadFile(branchFile)
|
data, err := os.ReadFile(branchFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "main" // Default to main if file doesn't exist
|
return "main" // Default to main if file doesn't exist
|
||||||
@ -53,9 +49,9 @@ func ReadBranchPreference(oramaDir string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SaveBranchPreference saves the branch preference to disk
|
// SaveBranchPreference saves the branch preference to disk
|
||||||
func SaveBranchPreference(oramaDir, branch string) error {
|
func SaveBranchPreference(debrosDir, branch string) error {
|
||||||
branchFile := filepath.Join(oramaDir, ".branch")
|
branchFile := filepath.Join(debrosDir, ".branch")
|
||||||
if err := os.MkdirAll(oramaDir, 0755); err != nil {
|
if err := os.MkdirAll(debrosDir, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create debros directory: %w", err)
|
return fmt.Errorf("failed to create debros directory: %w", err)
|
||||||
}
|
}
|
||||||
if err := os.WriteFile(branchFile, []byte(branch), 0644); err != nil {
|
if err := os.WriteFile(branchFile, []byte(branch), 0644); err != nil {
|
||||||
@ -66,35 +62,32 @@ func SaveBranchPreference(oramaDir, branch string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewProductionSetup creates a new production setup orchestrator
|
// NewProductionSetup creates a new production setup orchestrator
|
||||||
func NewProductionSetup(oramaHome string, logWriter io.Writer, forceReconfigure bool, branch string, skipRepoUpdate bool, skipResourceChecks bool) *ProductionSetup {
|
func NewProductionSetup(debrosHome string, logWriter io.Writer, forceReconfigure bool, branch string) *ProductionSetup {
|
||||||
oramaDir := filepath.Join(oramaHome, ".orama")
|
debrosDir := debrosHome + "/.debros"
|
||||||
arch, _ := (&ArchitectureDetector{}).Detect()
|
arch, _ := (&ArchitectureDetector{}).Detect()
|
||||||
|
|
||||||
// If branch is empty, try to read from stored preference, otherwise default to main
|
// If branch is empty, try to read from stored preference, otherwise default to main
|
||||||
if branch == "" {
|
if branch == "" {
|
||||||
branch = ReadBranchPreference(oramaDir)
|
branch = ReadBranchPreference(debrosDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ProductionSetup{
|
return &ProductionSetup{
|
||||||
oramaHome: oramaHome,
|
debrosHome: debrosHome,
|
||||||
oramaDir: oramaDir,
|
debrosDir: debrosDir,
|
||||||
logWriter: logWriter,
|
logWriter: logWriter,
|
||||||
forceReconfigure: forceReconfigure,
|
forceReconfigure: forceReconfigure,
|
||||||
arch: arch,
|
arch: arch,
|
||||||
branch: branch,
|
branch: branch,
|
||||||
skipRepoUpdate: skipRepoUpdate,
|
|
||||||
skipResourceChecks: skipResourceChecks,
|
|
||||||
privChecker: &PrivilegeChecker{},
|
privChecker: &PrivilegeChecker{},
|
||||||
osDetector: &OSDetector{},
|
osDetector: &OSDetector{},
|
||||||
archDetector: &ArchitectureDetector{},
|
archDetector: &ArchitectureDetector{},
|
||||||
resourceChecker: NewResourceChecker(),
|
resourceChecker: NewResourceChecker(),
|
||||||
portChecker: NewPortChecker(),
|
fsProvisioner: NewFilesystemProvisioner(debrosHome),
|
||||||
fsProvisioner: NewFilesystemProvisioner(oramaHome),
|
userProvisioner: NewUserProvisioner("debros", debrosHome, "/bin/bash"),
|
||||||
userProvisioner: NewUserProvisioner("debros", oramaHome, "/bin/bash"),
|
stateDetector: NewStateDetector(debrosDir),
|
||||||
stateDetector: NewStateDetector(oramaDir),
|
configGenerator: NewConfigGenerator(debrosDir),
|
||||||
configGenerator: NewConfigGenerator(oramaDir),
|
secretGenerator: NewSecretGenerator(debrosDir),
|
||||||
secretGenerator: NewSecretGenerator(oramaDir),
|
serviceGenerator: NewSystemdServiceGenerator(debrosHome, debrosDir),
|
||||||
serviceGenerator: NewSystemdServiceGenerator(oramaHome, oramaDir),
|
|
||||||
serviceController: NewSystemdController(),
|
serviceController: NewSystemdController(),
|
||||||
binaryInstaller: NewBinaryInstaller(arch, logWriter),
|
binaryInstaller: NewBinaryInstaller(arch, logWriter),
|
||||||
}
|
}
|
||||||
@ -162,10 +155,7 @@ func (ps *ProductionSetup) Phase1CheckPrerequisites() error {
|
|||||||
ps.logf(" ✓ Basic dependencies available")
|
ps.logf(" ✓ Basic dependencies available")
|
||||||
|
|
||||||
// Check system resources
|
// Check system resources
|
||||||
if ps.skipResourceChecks {
|
if err := ps.resourceChecker.CheckDiskSpace(ps.debrosHome); err != nil {
|
||||||
ps.logf(" ⚠️ Skipping system resource checks (disk, RAM, CPU) due to --ignore-resource-checks flag")
|
|
||||||
} else {
|
|
||||||
if err := ps.resourceChecker.CheckDiskSpace(ps.oramaHome); err != nil {
|
|
||||||
ps.logf(" ❌ %v", err)
|
ps.logf(" ❌ %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -182,7 +172,6 @@ func (ps *ProductionSetup) Phase1CheckPrerequisites() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ps.logf(" ✓ Sufficient CPU cores available")
|
ps.logf(" ✓ Sufficient CPU cores available")
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -211,7 +200,7 @@ func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create directory structure (unified structure)
|
// Create directory structure
|
||||||
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
|
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
|
||||||
return fmt.Errorf("failed to create directory structure: %w", err)
|
return fmt.Errorf("failed to create directory structure: %w", err)
|
||||||
}
|
}
|
||||||
@ -257,13 +246,8 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
|
|||||||
ps.logf(" ⚠️ Olric install warning: %v", err)
|
ps.logf(" ⚠️ Olric install warning: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Install anyone-client for SOCKS5 proxy
|
|
||||||
if err := ps.binaryInstaller.InstallAnyoneClient(); err != nil {
|
|
||||||
ps.logf(" ⚠️ anyone-client install warning: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Install DeBros binaries
|
// Install DeBros binaries
|
||||||
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.oramaHome, ps.skipRepoUpdate); err != nil {
|
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.debrosHome); err != nil {
|
||||||
return fmt.Errorf("failed to install DeBros binaries: %w", err)
|
return fmt.Errorf("failed to install DeBros binaries: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -272,23 +256,16 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Phase2cInitializeServices initializes service repositories and configurations
|
// Phase2cInitializeServices initializes service repositories and configurations
|
||||||
// ipfsPeer can be nil for the first node, or contain peer info for joining nodes
|
func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string) error {
|
||||||
// ipfsClusterPeer can be nil for the first node, or contain IPFS Cluster peer info for joining nodes
|
|
||||||
func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vpsIP string, ipfsPeer *IPFSPeerInfo, ipfsClusterPeer *IPFSClusterPeerInfo) error {
|
|
||||||
ps.logf("Phase 2c: Initializing services...")
|
ps.logf("Phase 2c: Initializing services...")
|
||||||
|
|
||||||
// Ensure directories exist (unified structure)
|
// Build paths with nodeType awareness to match systemd unit definitions
|
||||||
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
|
dataDir := filepath.Join(ps.debrosDir, "data", nodeType)
|
||||||
return fmt.Errorf("failed to create directories: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build paths - unified data directory (all nodes equal)
|
|
||||||
dataDir := filepath.Join(ps.oramaDir, "data")
|
|
||||||
|
|
||||||
// Initialize IPFS repo with correct path structure
|
// Initialize IPFS repo with correct path structure
|
||||||
// Use port 4501 for API (to avoid conflict with RQLite on 5001), 8080 for gateway (standard), 4101 for swarm (to avoid conflict with LibP2P on 4001)
|
// Use port 4501 for API (to avoid conflict with RQLite on 5001), 8080 for gateway (standard), 4001 for swarm
|
||||||
ipfsRepoPath := filepath.Join(dataDir, "ipfs", "repo")
|
ipfsRepoPath := filepath.Join(dataDir, "ipfs", "repo")
|
||||||
if err := ps.binaryInstaller.InitializeIPFSRepo(ipfsRepoPath, filepath.Join(ps.oramaDir, "secrets", "swarm.key"), 4501, 8080, 4101, ipfsPeer); err != nil {
|
if err := ps.binaryInstaller.InitializeIPFSRepo(nodeType, ipfsRepoPath, filepath.Join(ps.debrosDir, "secrets", "swarm.key"), 4501, 8080, 4001); err != nil {
|
||||||
return fmt.Errorf("failed to initialize IPFS repo: %w", err)
|
return fmt.Errorf("failed to initialize IPFS repo: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -298,54 +275,22 @@ func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vps
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get cluster secret: %w", err)
|
return fmt.Errorf("failed to get cluster secret: %w", err)
|
||||||
}
|
}
|
||||||
|
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(nodeType, clusterPath, clusterSecret, 4501); err != nil {
|
||||||
// Get cluster peer addresses from IPFS Cluster peer info if available
|
|
||||||
var clusterPeers []string
|
|
||||||
if ipfsClusterPeer != nil && ipfsClusterPeer.PeerID != "" {
|
|
||||||
// Construct cluster peer multiaddress using the discovered peer ID
|
|
||||||
// Format: /ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>
|
|
||||||
peerIP := inferPeerIP(peerAddresses, vpsIP)
|
|
||||||
if peerIP != "" {
|
|
||||||
// Construct the bootstrap multiaddress for IPFS Cluster
|
|
||||||
// Note: IPFS Cluster listens on port 9098 for cluster communication
|
|
||||||
clusterBootstrapAddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", peerIP, ipfsClusterPeer.PeerID)
|
|
||||||
clusterPeers = []string{clusterBootstrapAddr}
|
|
||||||
ps.logf(" ℹ️ IPFS Cluster will connect to peer: %s", clusterBootstrapAddr)
|
|
||||||
} else if len(ipfsClusterPeer.Addrs) > 0 {
|
|
||||||
// Fallback: use the addresses from discovery (if they include peer ID)
|
|
||||||
for _, addr := range ipfsClusterPeer.Addrs {
|
|
||||||
if strings.Contains(addr, ipfsClusterPeer.PeerID) {
|
|
||||||
clusterPeers = append(clusterPeers, addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(clusterPeers) > 0 {
|
|
||||||
ps.logf(" ℹ️ IPFS Cluster will connect to discovered peers: %v", clusterPeers)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(clusterPath, clusterSecret, 4501, clusterPeers); err != nil {
|
|
||||||
return fmt.Errorf("failed to initialize IPFS Cluster: %w", err)
|
return fmt.Errorf("failed to initialize IPFS Cluster: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize RQLite data directory
|
// Initialize RQLite data directory
|
||||||
rqliteDataDir := filepath.Join(dataDir, "rqlite")
|
rqliteDataDir := filepath.Join(dataDir, "rqlite")
|
||||||
if err := ps.binaryInstaller.InitializeRQLiteDataDir(rqliteDataDir); err != nil {
|
if err := ps.binaryInstaller.InitializeRQLiteDataDir(nodeType, rqliteDataDir); err != nil {
|
||||||
ps.logf(" ⚠️ RQLite initialization warning: %v", err)
|
ps.logf(" ⚠️ RQLite initialization warning: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure all directories and files created during service initialization have correct ownership
|
|
||||||
// This is critical because directories/files created as root need to be owned by debros user
|
|
||||||
if err := ps.fsProvisioner.FixOwnership(); err != nil {
|
|
||||||
return fmt.Errorf("failed to fix ownership after service initialization: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ps.logf(" ✓ Services initialized")
|
ps.logf(" ✓ Services initialized")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase3GenerateSecrets generates shared secrets and keys
|
// Phase3GenerateSecrets generates shared secrets and keys
|
||||||
func (ps *ProductionSetup) Phase3GenerateSecrets() error {
|
func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
|
||||||
ps.logf("Phase 3: Generating secrets...")
|
ps.logf("Phase 3: Generating secrets...")
|
||||||
|
|
||||||
// Cluster secret
|
// Cluster secret
|
||||||
@ -360,8 +305,13 @@ func (ps *ProductionSetup) Phase3GenerateSecrets() error {
|
|||||||
}
|
}
|
||||||
ps.logf(" ✓ IPFS swarm key ensured")
|
ps.logf(" ✓ IPFS swarm key ensured")
|
||||||
|
|
||||||
// Node identity (unified architecture)
|
// Node identity
|
||||||
peerID, err := ps.secretGenerator.EnsureNodeIdentity()
|
nodeType := "node"
|
||||||
|
if isBootstrap {
|
||||||
|
nodeType = "bootstrap"
|
||||||
|
}
|
||||||
|
|
||||||
|
peerID, err := ps.secretGenerator.EnsureNodeIdentity(nodeType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to ensure node identity: %w", err)
|
return fmt.Errorf("failed to ensure node identity: %w", err)
|
||||||
}
|
}
|
||||||
@ -373,7 +323,7 @@ func (ps *ProductionSetup) Phase3GenerateSecrets() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Phase4GenerateConfigs generates node, gateway, and service configs
|
// Phase4GenerateConfigs generates node, gateway, and service configs
|
||||||
func (ps *ProductionSetup) Phase4GenerateConfigs(peerAddresses []string, vpsIP string, enableHTTPS bool, domain string, joinAddress string) error {
|
func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeers []string, vpsIP string, enableHTTPS bool, domain string, bootstrapJoin string) error {
|
||||||
if ps.IsUpdate() {
|
if ps.IsUpdate() {
|
||||||
ps.logf("Phase 4: Updating configurations...")
|
ps.logf("Phase 4: Updating configurations...")
|
||||||
ps.logf(" (Existing configs will be updated to latest format)")
|
ps.logf(" (Existing configs will be updated to latest format)")
|
||||||
@ -381,38 +331,44 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(peerAddresses []string, vpsIP s
|
|||||||
ps.logf("Phase 4: Generating configurations...")
|
ps.logf("Phase 4: Generating configurations...")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node config (unified architecture)
|
// Node config
|
||||||
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(peerAddresses, vpsIP, joinAddress, domain, enableHTTPS)
|
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(isBootstrap, bootstrapPeers, vpsIP, bootstrapJoin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to generate node config: %w", err)
|
return fmt.Errorf("failed to generate node config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
configFile := "node.yaml"
|
var configFile string
|
||||||
|
if isBootstrap {
|
||||||
|
configFile = "bootstrap.yaml"
|
||||||
|
} else {
|
||||||
|
configFile = "node.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
if err := ps.secretGenerator.SaveConfig(configFile, nodeConfig); err != nil {
|
if err := ps.secretGenerator.SaveConfig(configFile, nodeConfig); err != nil {
|
||||||
return fmt.Errorf("failed to save node config: %w", err)
|
return fmt.Errorf("failed to save node config: %w", err)
|
||||||
}
|
}
|
||||||
ps.logf(" ✓ Node config generated: %s", configFile)
|
ps.logf(" ✓ Node config generated: %s", configFile)
|
||||||
|
|
||||||
// Gateway configuration is now embedded in each node's config
|
// Gateway config
|
||||||
// No separate gateway.yaml needed - each node runs its own embedded gateway
|
olricServers := []string{"127.0.0.1:3320"}
|
||||||
|
gatewayConfig, err := ps.configGenerator.GenerateGatewayConfig(bootstrapPeers, enableHTTPS, domain, olricServers)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate gateway config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Olric config:
|
if err := ps.secretGenerator.SaveConfig("gateway.yaml", gatewayConfig); err != nil {
|
||||||
// - HTTP API binds to localhost for security (accessed via gateway)
|
return fmt.Errorf("failed to save gateway config: %w", err)
|
||||||
// - Memberlist binds to 0.0.0.0 for cluster communication across nodes
|
}
|
||||||
// - Environment "lan" for production multi-node clustering
|
ps.logf(" ✓ Gateway config generated")
|
||||||
olricConfig, err := ps.configGenerator.GenerateOlricConfig(
|
|
||||||
"127.0.0.1", // HTTP API on localhost
|
// Olric config
|
||||||
3320,
|
olricConfig, err := ps.configGenerator.GenerateOlricConfig("localhost", 3320, 3322)
|
||||||
"0.0.0.0", // Memberlist on all interfaces for clustering
|
|
||||||
3322,
|
|
||||||
"lan", // Production environment
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to generate olric config: %w", err)
|
return fmt.Errorf("failed to generate olric config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create olric config directory
|
// Create olric config directory
|
||||||
olricConfigDir := ps.oramaDir + "/configs/olric"
|
olricConfigDir := ps.debrosDir + "/configs/olric"
|
||||||
if err := os.MkdirAll(olricConfigDir, 0755); err != nil {
|
if err := os.MkdirAll(olricConfigDir, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create olric config directory: %w", err)
|
return fmt.Errorf("failed to create olric config directory: %w", err)
|
||||||
}
|
}
|
||||||
@ -428,8 +384,7 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(peerAddresses []string, vpsIP s
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Phase5CreateSystemdServices creates and enables systemd units
|
// Phase5CreateSystemdServices creates and enables systemd units
|
||||||
// enableHTTPS determines the RQLite Raft port (7002 when SNI is enabled, 7001 otherwise)
|
func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP string) error {
|
||||||
func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
|
|
||||||
ps.logf("Phase 5: Creating systemd services...")
|
ps.logf("Phase 5: Creating systemd services...")
|
||||||
|
|
||||||
// Validate all required binaries are available before creating services
|
// Validate all required binaries are available before creating services
|
||||||
@ -441,26 +396,50 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("ipfs-cluster-service binary not available: %w", err)
|
return fmt.Errorf("ipfs-cluster-service binary not available: %w", err)
|
||||||
}
|
}
|
||||||
|
rqliteBinary, err := ps.binaryInstaller.ResolveBinaryPath("rqlited", "/usr/local/bin/rqlited", "/usr/bin/rqlited")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("rqlited binary not available: %w", err)
|
||||||
|
}
|
||||||
olricBinary, err := ps.binaryInstaller.ResolveBinaryPath("olric-server", "/usr/local/bin/olric-server", "/usr/bin/olric-server")
|
olricBinary, err := ps.binaryInstaller.ResolveBinaryPath("olric-server", "/usr/local/bin/olric-server", "/usr/bin/olric-server")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("olric-server binary not available: %w", err)
|
return fmt.Errorf("olric-server binary not available: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IPFS service (unified - no bootstrap/node distinction)
|
// IPFS service
|
||||||
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(ipfsBinary)
|
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(nodeType, ipfsBinary)
|
||||||
if err := ps.serviceController.WriteServiceUnit("debros-ipfs.service", ipfsUnit); err != nil {
|
unitName := fmt.Sprintf("debros-ipfs-%s.service", nodeType)
|
||||||
|
if err := ps.serviceController.WriteServiceUnit(unitName, ipfsUnit); err != nil {
|
||||||
return fmt.Errorf("failed to write IPFS service: %w", err)
|
return fmt.Errorf("failed to write IPFS service: %w", err)
|
||||||
}
|
}
|
||||||
ps.logf(" ✓ IPFS service created: debros-ipfs.service")
|
ps.logf(" ✓ IPFS service created: %s", unitName)
|
||||||
|
|
||||||
// IPFS Cluster service
|
// IPFS Cluster service
|
||||||
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(clusterBinary)
|
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(nodeType, clusterBinary)
|
||||||
if err := ps.serviceController.WriteServiceUnit("debros-ipfs-cluster.service", clusterUnit); err != nil {
|
clusterUnitName := fmt.Sprintf("debros-ipfs-cluster-%s.service", nodeType)
|
||||||
|
if err := ps.serviceController.WriteServiceUnit(clusterUnitName, clusterUnit); err != nil {
|
||||||
return fmt.Errorf("failed to write IPFS Cluster service: %w", err)
|
return fmt.Errorf("failed to write IPFS Cluster service: %w", err)
|
||||||
}
|
}
|
||||||
ps.logf(" ✓ IPFS Cluster service created: debros-ipfs-cluster.service")
|
ps.logf(" ✓ IPFS Cluster service created: %s", clusterUnitName)
|
||||||
|
|
||||||
// RQLite is managed internally by each node - no separate systemd service needed
|
// RQLite service with join address for non-bootstrap nodes
|
||||||
|
rqliteJoinAddr := ""
|
||||||
|
if nodeType != "bootstrap" && vpsIP != "" {
|
||||||
|
rqliteJoinAddr = vpsIP + ":7001"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the advertise configuration for verification
|
||||||
|
advertiseIP := vpsIP
|
||||||
|
if advertiseIP == "" {
|
||||||
|
advertiseIP = "127.0.0.1"
|
||||||
|
}
|
||||||
|
ps.logf(" RQLite will advertise: %s (advertise IP: %s)", rqliteJoinAddr, advertiseIP)
|
||||||
|
|
||||||
|
rqliteUnit := ps.serviceGenerator.GenerateRQLiteService(nodeType, rqliteBinary, 5001, 7001, rqliteJoinAddr, advertiseIP)
|
||||||
|
rqliteUnitName := fmt.Sprintf("debros-rqlite-%s.service", nodeType)
|
||||||
|
if err := ps.serviceController.WriteServiceUnit(rqliteUnitName, rqliteUnit); err != nil {
|
||||||
|
return fmt.Errorf("failed to write RQLite service: %w", err)
|
||||||
|
}
|
||||||
|
ps.logf(" ✓ RQLite service created: %s", rqliteUnitName)
|
||||||
|
|
||||||
// Olric service
|
// Olric service
|
||||||
olricUnit := ps.serviceGenerator.GenerateOlricService(olricBinary)
|
olricUnit := ps.serviceGenerator.GenerateOlricService(olricBinary)
|
||||||
@ -469,19 +448,20 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
|
|||||||
}
|
}
|
||||||
ps.logf(" ✓ Olric service created")
|
ps.logf(" ✓ Olric service created")
|
||||||
|
|
||||||
// Node service (unified - includes embedded gateway)
|
// Node service
|
||||||
nodeUnit := ps.serviceGenerator.GenerateNodeService()
|
nodeUnit := ps.serviceGenerator.GenerateNodeService(nodeType)
|
||||||
if err := ps.serviceController.WriteServiceUnit("debros-node.service", nodeUnit); err != nil {
|
nodeUnitName := fmt.Sprintf("debros-node-%s.service", nodeType)
|
||||||
|
if err := ps.serviceController.WriteServiceUnit(nodeUnitName, nodeUnit); err != nil {
|
||||||
return fmt.Errorf("failed to write Node service: %w", err)
|
return fmt.Errorf("failed to write Node service: %w", err)
|
||||||
}
|
}
|
||||||
ps.logf(" ✓ Node service created: debros-node.service (with embedded gateway)")
|
ps.logf(" ✓ Node service created: %s", nodeUnitName)
|
||||||
|
|
||||||
// Anyone Client service (SOCKS5 proxy)
|
// Gateway service (optional, only on specific nodes)
|
||||||
anyoneUnit := ps.serviceGenerator.GenerateAnyoneClientService()
|
gatewayUnit := ps.serviceGenerator.GenerateGatewayService(nodeType)
|
||||||
if err := ps.serviceController.WriteServiceUnit("debros-anyone-client.service", anyoneUnit); err != nil {
|
if err := ps.serviceController.WriteServiceUnit("debros-gateway.service", gatewayUnit); err != nil {
|
||||||
return fmt.Errorf("failed to write Anyone Client service: %w", err)
|
return fmt.Errorf("failed to write Gateway service: %w", err)
|
||||||
}
|
}
|
||||||
ps.logf(" ✓ Anyone Client service created")
|
ps.logf(" ✓ Gateway service created")
|
||||||
|
|
||||||
// Reload systemd daemon
|
// Reload systemd daemon
|
||||||
if err := ps.serviceController.DaemonReload(); err != nil {
|
if err := ps.serviceController.DaemonReload(); err != nil {
|
||||||
@ -489,10 +469,8 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
|
|||||||
}
|
}
|
||||||
ps.logf(" ✓ Systemd daemon reloaded")
|
ps.logf(" ✓ Systemd daemon reloaded")
|
||||||
|
|
||||||
// Enable services (unified names - no bootstrap/node distinction)
|
// Enable services
|
||||||
// Note: debros-gateway.service is no longer needed - each node has an embedded gateway
|
services := []string{unitName, clusterUnitName, rqliteUnitName, "debros-olric.service", nodeUnitName, "debros-gateway.service"}
|
||||||
// Note: debros-rqlite.service is NOT created - RQLite is managed by each node internally
|
|
||||||
services := []string{"debros-ipfs.service", "debros-ipfs-cluster.service", "debros-olric.service", "debros-node.service", "debros-anyone-client.service"}
|
|
||||||
for _, svc := range services {
|
for _, svc := range services {
|
||||||
if err := ps.serviceController.EnableService(svc); err != nil {
|
if err := ps.serviceController.EnableService(svc); err != nil {
|
||||||
ps.logf(" ⚠️ Failed to enable %s: %v", svc, err)
|
ps.logf(" ⚠️ Failed to enable %s: %v", svc, err)
|
||||||
@ -504,17 +482,8 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
|
|||||||
// Start services in dependency order
|
// Start services in dependency order
|
||||||
ps.logf(" Starting services...")
|
ps.logf(" Starting services...")
|
||||||
|
|
||||||
// Start infrastructure first (IPFS, Olric, Anyone Client) - RQLite is managed internally by each node
|
// Start infrastructure first (IPFS, RQLite, Olric)
|
||||||
infraServices := []string{"debros-ipfs.service", "debros-olric.service"}
|
infraServices := []string{unitName, rqliteUnitName, "debros-olric.service"}
|
||||||
|
|
||||||
// Check if port 9050 is already in use (e.g., another anyone-client or similar service)
|
|
||||||
if ps.portChecker.IsPortInUse(9050) {
|
|
||||||
ps.logf(" ℹ️ Port 9050 is already in use (anyone-client or similar service running)")
|
|
||||||
ps.logf(" ℹ️ Skipping debros-anyone-client startup - using existing service")
|
|
||||||
} else {
|
|
||||||
infraServices = append(infraServices, "debros-anyone-client.service")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, svc := range infraServices {
|
for _, svc := range infraServices {
|
||||||
if err := ps.serviceController.StartService(svc); err != nil {
|
if err := ps.serviceController.StartService(svc); err != nil {
|
||||||
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
|
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
|
||||||
@ -524,20 +493,23 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait a moment for infrastructure to stabilize
|
// Wait a moment for infrastructure to stabilize
|
||||||
time.Sleep(2 * time.Second)
|
exec.Command("sleep", "2").Run()
|
||||||
|
|
||||||
// Start IPFS Cluster
|
// Start IPFS Cluster
|
||||||
if err := ps.serviceController.StartService("debros-ipfs-cluster.service"); err != nil {
|
if err := ps.serviceController.StartService(clusterUnitName); err != nil {
|
||||||
ps.logf(" ⚠️ Failed to start debros-ipfs-cluster.service: %v", err)
|
ps.logf(" ⚠️ Failed to start %s: %v", clusterUnitName, err)
|
||||||
} else {
|
} else {
|
||||||
ps.logf(" - debros-ipfs-cluster.service started")
|
ps.logf(" - %s started", clusterUnitName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start node service (gateway is embedded in node, no separate service needed)
|
// Start application services
|
||||||
if err := ps.serviceController.StartService("debros-node.service"); err != nil {
|
appServices := []string{nodeUnitName, "debros-gateway.service"}
|
||||||
ps.logf(" ⚠️ Failed to start debros-node.service: %v", err)
|
for _, svc := range appServices {
|
||||||
|
if err := ps.serviceController.StartService(svc); err != nil {
|
||||||
|
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
|
||||||
} else {
|
} else {
|
||||||
ps.logf(" - debros-node.service started (with embedded gateway)")
|
ps.logf(" - %s started", svc)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.logf(" ✓ All services started")
|
ps.logf(" ✓ All services started")
|
||||||
@ -551,20 +523,19 @@ func (ps *ProductionSetup) LogSetupComplete(peerID string) {
|
|||||||
ps.logf(strings.Repeat("=", 70))
|
ps.logf(strings.Repeat("=", 70))
|
||||||
ps.logf("\nNode Peer ID: %s", peerID)
|
ps.logf("\nNode Peer ID: %s", peerID)
|
||||||
ps.logf("\nService Management:")
|
ps.logf("\nService Management:")
|
||||||
ps.logf(" systemctl status debros-ipfs")
|
ps.logf(" systemctl status debros-ipfs-bootstrap")
|
||||||
ps.logf(" journalctl -u debros-node -f")
|
ps.logf(" journalctl -u debros-node-bootstrap -f")
|
||||||
ps.logf(" tail -f %s/logs/node.log", ps.oramaDir)
|
ps.logf(" tail -f %s/logs/node-bootstrap.log", ps.debrosDir)
|
||||||
ps.logf("\nLog Files:")
|
ps.logf("\nLog Files:")
|
||||||
ps.logf(" %s/logs/ipfs.log", ps.oramaDir)
|
ps.logf(" %s/logs/ipfs-bootstrap.log", ps.debrosDir)
|
||||||
ps.logf(" %s/logs/ipfs-cluster.log", ps.oramaDir)
|
ps.logf(" %s/logs/ipfs-cluster-bootstrap.log", ps.debrosDir)
|
||||||
ps.logf(" %s/logs/olric.log", ps.oramaDir)
|
ps.logf(" %s/logs/rqlite-bootstrap.log", ps.debrosDir)
|
||||||
ps.logf(" %s/logs/node.log", ps.oramaDir)
|
ps.logf(" %s/logs/olric.log", ps.debrosDir)
|
||||||
ps.logf(" %s/logs/gateway.log", ps.oramaDir)
|
ps.logf(" %s/logs/node-bootstrap.log", ps.debrosDir)
|
||||||
ps.logf(" %s/logs/anyone-client.log", ps.oramaDir)
|
ps.logf(" %s/logs/gateway.log", ps.debrosDir)
|
||||||
ps.logf("\nStart All Services:")
|
ps.logf("\nStart All Services:")
|
||||||
ps.logf(" systemctl start debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-client debros-node")
|
ps.logf(" systemctl start debros-ipfs-bootstrap debros-ipfs-cluster-bootstrap debros-rqlite-bootstrap debros-olric debros-node-bootstrap debros-gateway")
|
||||||
ps.logf("\nVerify Installation:")
|
ps.logf("\nVerify Installation:")
|
||||||
ps.logf(" curl http://localhost:6001/health")
|
ps.logf(" curl http://localhost:6001/health")
|
||||||
ps.logf(" curl http://localhost:5001/status")
|
ps.logf(" curl http://localhost:5001/status\n")
|
||||||
ps.logf(" # Anyone Client SOCKS5 proxy on localhost:9050\n")
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -10,36 +10,37 @@ import (
|
|||||||
|
|
||||||
// FilesystemProvisioner manages directory creation and permissions
|
// FilesystemProvisioner manages directory creation and permissions
|
||||||
type FilesystemProvisioner struct {
|
type FilesystemProvisioner struct {
|
||||||
oramaHome string
|
debrosHome string
|
||||||
oramaDir string
|
debrosDir string
|
||||||
logWriter interface{} // Can be io.Writer for logging
|
logWriter interface{} // Can be io.Writer for logging
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFilesystemProvisioner creates a new provisioner
|
// NewFilesystemProvisioner creates a new provisioner
|
||||||
func NewFilesystemProvisioner(oramaHome string) *FilesystemProvisioner {
|
func NewFilesystemProvisioner(debrosHome string) *FilesystemProvisioner {
|
||||||
return &FilesystemProvisioner{
|
return &FilesystemProvisioner{
|
||||||
oramaHome: oramaHome,
|
debrosHome: debrosHome,
|
||||||
oramaDir: filepath.Join(oramaHome, ".orama"),
|
debrosDir: filepath.Join(debrosHome, ".debros"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnsureDirectoryStructure creates all required directories (unified structure)
|
// EnsureDirectoryStructure creates all required directories
|
||||||
func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
|
func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
|
||||||
// All directories needed for unified node structure
|
|
||||||
dirs := []string{
|
dirs := []string{
|
||||||
fp.oramaDir,
|
fp.debrosDir,
|
||||||
filepath.Join(fp.oramaDir, "configs"),
|
filepath.Join(fp.debrosDir, "configs"),
|
||||||
filepath.Join(fp.oramaDir, "secrets"),
|
filepath.Join(fp.debrosDir, "secrets"),
|
||||||
filepath.Join(fp.oramaDir, "data"),
|
filepath.Join(fp.debrosDir, "data"),
|
||||||
filepath.Join(fp.oramaDir, "data", "ipfs", "repo"),
|
filepath.Join(fp.debrosDir, "data", "bootstrap", "ipfs", "repo"),
|
||||||
filepath.Join(fp.oramaDir, "data", "ipfs-cluster"),
|
filepath.Join(fp.debrosDir, "data", "bootstrap", "ipfs-cluster"),
|
||||||
filepath.Join(fp.oramaDir, "data", "rqlite"),
|
filepath.Join(fp.debrosDir, "data", "bootstrap", "rqlite"),
|
||||||
filepath.Join(fp.oramaDir, "logs"),
|
filepath.Join(fp.debrosDir, "data", "node", "ipfs", "repo"),
|
||||||
filepath.Join(fp.oramaDir, "tls-cache"),
|
filepath.Join(fp.debrosDir, "data", "node", "ipfs-cluster"),
|
||||||
filepath.Join(fp.oramaDir, "backups"),
|
filepath.Join(fp.debrosDir, "data", "node", "rqlite"),
|
||||||
filepath.Join(fp.oramaHome, "bin"),
|
filepath.Join(fp.debrosDir, "logs"),
|
||||||
filepath.Join(fp.oramaHome, "src"),
|
filepath.Join(fp.debrosDir, "tls-cache"),
|
||||||
filepath.Join(fp.oramaHome, ".npm"),
|
filepath.Join(fp.debrosDir, "backups"),
|
||||||
|
filepath.Join(fp.debrosHome, "bin"),
|
||||||
|
filepath.Join(fp.debrosHome, "src"),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dir := range dirs {
|
for _, dir := range dirs {
|
||||||
@ -48,65 +49,27 @@ func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove any stray cluster-secret file from root .orama directory
|
|
||||||
// The correct location is .orama/secrets/cluster-secret
|
|
||||||
strayClusterSecret := filepath.Join(fp.oramaDir, "cluster-secret")
|
|
||||||
if _, err := os.Stat(strayClusterSecret); err == nil {
|
|
||||||
if err := os.Remove(strayClusterSecret); err != nil {
|
|
||||||
return fmt.Errorf("failed to remove stray cluster-secret file: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create log files with correct permissions so systemd can write to them
|
|
||||||
logsDir := filepath.Join(fp.oramaDir, "logs")
|
|
||||||
logFiles := []string{
|
|
||||||
"olric.log",
|
|
||||||
"gateway.log",
|
|
||||||
"ipfs.log",
|
|
||||||
"ipfs-cluster.log",
|
|
||||||
"node.log",
|
|
||||||
"anyone-client.log",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, logFile := range logFiles {
|
|
||||||
logPath := filepath.Join(logsDir, logFile)
|
|
||||||
// Create empty file if it doesn't exist
|
|
||||||
if _, err := os.Stat(logPath); os.IsNotExist(err) {
|
|
||||||
if err := os.WriteFile(logPath, []byte{}, 0644); err != nil {
|
|
||||||
return fmt.Errorf("failed to create log file %s: %w", logPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FixOwnership changes ownership of .orama directory to debros user
|
// FixOwnership changes ownership of .debros directory to debros user
|
||||||
func (fp *FilesystemProvisioner) FixOwnership() error {
|
func (fp *FilesystemProvisioner) FixOwnership() error {
|
||||||
// Fix entire .orama directory recursively (includes all data, configs, logs, etc.)
|
cmd := exec.Command("chown", "-R", "debros:debros", fp.debrosDir)
|
||||||
cmd := exec.Command("chown", "-R", "debros:debros", fp.oramaDir)
|
if err := cmd.Run(); err != nil {
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
return fmt.Errorf("failed to set ownership for %s: %w", fp.debrosDir, err)
|
||||||
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaDir, err, string(output))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also fix home directory ownership
|
// Also fix home directory ownership
|
||||||
cmd = exec.Command("chown", "debros:debros", fp.oramaHome)
|
cmd = exec.Command("chown", "debros:debros", fp.debrosHome)
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaHome, err, string(output))
|
return fmt.Errorf("failed to set ownership for %s: %w", fp.debrosHome, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fix bin directory
|
// Fix bin directory
|
||||||
binDir := filepath.Join(fp.oramaHome, "bin")
|
binDir := filepath.Join(fp.debrosHome, "bin")
|
||||||
cmd = exec.Command("chown", "-R", "debros:debros", binDir)
|
cmd = exec.Command("chown", "-R", "debros:debros", binDir)
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", binDir, err, string(output))
|
return fmt.Errorf("failed to set ownership for %s: %w", binDir, err)
|
||||||
}
|
|
||||||
|
|
||||||
// Fix npm cache directory
|
|
||||||
npmDir := filepath.Join(fp.oramaHome, ".npm")
|
|
||||||
cmd = exec.Command("chown", "-R", "debros:debros", npmDir)
|
|
||||||
if output, err := cmd.CombinedOutput(); err != nil {
|
|
||||||
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", npmDir, err, string(output))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -184,20 +147,20 @@ func (up *UserProvisioner) SetupSudoersAccess(invokerUser string) error {
|
|||||||
|
|
||||||
// StateDetector checks for existing production state
|
// StateDetector checks for existing production state
|
||||||
type StateDetector struct {
|
type StateDetector struct {
|
||||||
oramaDir string
|
debrosDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateDetector creates a state detector
|
// NewStateDetector creates a state detector
|
||||||
func NewStateDetector(oramaDir string) *StateDetector {
|
func NewStateDetector(debrosDir string) *StateDetector {
|
||||||
return &StateDetector{
|
return &StateDetector{
|
||||||
oramaDir: oramaDir,
|
debrosDir: debrosDir,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsConfigured checks if basic configs exist
|
// IsConfigured checks if basic configs exist
|
||||||
func (sd *StateDetector) IsConfigured() bool {
|
func (sd *StateDetector) IsConfigured() bool {
|
||||||
nodeConfig := filepath.Join(sd.oramaDir, "configs", "node.yaml")
|
nodeConfig := filepath.Join(sd.debrosDir, "configs", "node.yaml")
|
||||||
gatewayConfig := filepath.Join(sd.oramaDir, "configs", "gateway.yaml")
|
gatewayConfig := filepath.Join(sd.debrosDir, "configs", "gateway.yaml")
|
||||||
_, err1 := os.Stat(nodeConfig)
|
_, err1 := os.Stat(nodeConfig)
|
||||||
_, err2 := os.Stat(gatewayConfig)
|
_, err2 := os.Stat(gatewayConfig)
|
||||||
return err1 == nil || err2 == nil
|
return err1 == nil || err2 == nil
|
||||||
@ -205,36 +168,24 @@ func (sd *StateDetector) IsConfigured() bool {
|
|||||||
|
|
||||||
// HasSecrets checks if cluster secret and swarm key exist
|
// HasSecrets checks if cluster secret and swarm key exist
|
||||||
func (sd *StateDetector) HasSecrets() bool {
|
func (sd *StateDetector) HasSecrets() bool {
|
||||||
clusterSecret := filepath.Join(sd.oramaDir, "secrets", "cluster-secret")
|
clusterSecret := filepath.Join(sd.debrosDir, "secrets", "cluster-secret")
|
||||||
swarmKey := filepath.Join(sd.oramaDir, "secrets", "swarm.key")
|
swarmKey := filepath.Join(sd.debrosDir, "secrets", "swarm.key")
|
||||||
_, err1 := os.Stat(clusterSecret)
|
_, err1 := os.Stat(clusterSecret)
|
||||||
_, err2 := os.Stat(swarmKey)
|
_, err2 := os.Stat(swarmKey)
|
||||||
return err1 == nil && err2 == nil
|
return err1 == nil && err2 == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasIPFSData checks if IPFS repo is initialized (unified path)
|
// HasIPFSData checks if IPFS repo is initialized
|
||||||
func (sd *StateDetector) HasIPFSData() bool {
|
func (sd *StateDetector) HasIPFSData() bool {
|
||||||
// Check unified path first
|
ipfsRepoPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "ipfs", "repo", "config")
|
||||||
ipfsRepoPath := filepath.Join(sd.oramaDir, "data", "ipfs", "repo", "config")
|
_, err := os.Stat(ipfsRepoPath)
|
||||||
if _, err := os.Stat(ipfsRepoPath); err == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Fallback: check legacy bootstrap path for migration
|
|
||||||
legacyPath := filepath.Join(sd.oramaDir, "data", "bootstrap", "ipfs", "repo", "config")
|
|
||||||
_, err := os.Stat(legacyPath)
|
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasRQLiteData checks if RQLite data exists (unified path)
|
// HasRQLiteData checks if RQLite data exists
|
||||||
func (sd *StateDetector) HasRQLiteData() bool {
|
func (sd *StateDetector) HasRQLiteData() bool {
|
||||||
// Check unified path first
|
rqliteDataPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "rqlite")
|
||||||
rqliteDataPath := filepath.Join(sd.oramaDir, "data", "rqlite")
|
info, err := os.Stat(rqliteDataPath)
|
||||||
if info, err := os.Stat(rqliteDataPath); err == nil && info.IsDir() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Fallback: check legacy bootstrap path for migration
|
|
||||||
legacyPath := filepath.Join(sd.oramaDir, "data", "bootstrap", "rqlite")
|
|
||||||
info, err := os.Stat(legacyPath)
|
|
||||||
return err == nil && info.IsDir()
|
return err == nil && info.IsDir()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -10,25 +10,31 @@ import (
|
|||||||
|
|
||||||
// SystemdServiceGenerator generates systemd unit files
|
// SystemdServiceGenerator generates systemd unit files
|
||||||
type SystemdServiceGenerator struct {
|
type SystemdServiceGenerator struct {
|
||||||
oramaHome string
|
debrosHome string
|
||||||
oramaDir string
|
debrosDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSystemdServiceGenerator creates a new service generator
|
// NewSystemdServiceGenerator creates a new service generator
|
||||||
func NewSystemdServiceGenerator(oramaHome, oramaDir string) *SystemdServiceGenerator {
|
func NewSystemdServiceGenerator(debrosHome, debrosDir string) *SystemdServiceGenerator {
|
||||||
return &SystemdServiceGenerator{
|
return &SystemdServiceGenerator{
|
||||||
oramaHome: oramaHome,
|
debrosHome: debrosHome,
|
||||||
oramaDir: oramaDir,
|
debrosDir: debrosDir,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateIPFSService generates the IPFS daemon systemd unit
|
// GenerateIPFSService generates the IPFS daemon systemd unit
|
||||||
func (ssg *SystemdServiceGenerator) GenerateIPFSService(ipfsBinary string) string {
|
func (ssg *SystemdServiceGenerator) GenerateIPFSService(nodeType string, ipfsBinary string) string {
|
||||||
ipfsRepoPath := filepath.Join(ssg.oramaDir, "data", "ipfs", "repo")
|
var ipfsRepoPath string
|
||||||
logFile := filepath.Join(ssg.oramaDir, "logs", "ipfs.log")
|
if nodeType == "bootstrap" {
|
||||||
|
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs", "repo")
|
||||||
|
} else {
|
||||||
|
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs", "repo")
|
||||||
|
}
|
||||||
|
|
||||||
|
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("ipfs-%s.log", nodeType))
|
||||||
|
|
||||||
return fmt.Sprintf(`[Unit]
|
return fmt.Sprintf(`[Unit]
|
||||||
Description=IPFS Daemon
|
Description=IPFS Daemon (%[1]s)
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
|
|
||||||
@ -36,95 +42,83 @@ Wants=network-online.target
|
|||||||
Type=simple
|
Type=simple
|
||||||
User=debros
|
User=debros
|
||||||
Group=debros
|
Group=debros
|
||||||
Environment=HOME=%[1]s
|
Environment=HOME=%[2]s
|
||||||
Environment=IPFS_PATH=%[2]s
|
Environment=IPFS_PATH=%[3]s
|
||||||
ExecStartPre=/bin/bash -c 'if [ -f %[3]s/secrets/swarm.key ] && [ ! -f %[2]s/swarm.key ]; then cp %[3]s/secrets/swarm.key %[2]s/swarm.key && chmod 600 %[2]s/swarm.key; fi'
|
ExecStartPre=/bin/bash -c 'if [ -f %[4]s/secrets/swarm.key ] && [ ! -f %[3]s/swarm.key ]; then cp %[4]s/secrets/swarm.key %[3]s/swarm.key && chmod 600 %[3]s/swarm.key; fi'
|
||||||
ExecStart=%[5]s daemon --enable-pubsub-experiment --repo-dir=%[2]s
|
ExecStart=%[6]s daemon --enable-pubsub-experiment --repo-dir=%[3]s
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=append:%[4]s
|
StandardOutput=file:%[5]s
|
||||||
StandardError=append:%[4]s
|
StandardError=file:%[5]s
|
||||||
SyslogIdentifier=debros-ipfs
|
SyslogIdentifier=ipfs-%[1]s
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ProtectHome=read-only
|
ReadWritePaths=%[4]s
|
||||||
ProtectKernelTunables=yes
|
|
||||||
ProtectKernelModules=yes
|
|
||||||
ProtectControlGroups=yes
|
|
||||||
RestrictRealtime=yes
|
|
||||||
RestrictSUIDSGID=yes
|
|
||||||
ReadWritePaths=%[3]s
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
`, ssg.oramaHome, ipfsRepoPath, ssg.oramaDir, logFile, ipfsBinary)
|
`, nodeType, ssg.debrosHome, ipfsRepoPath, ssg.debrosDir, logFile, ipfsBinary)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateIPFSClusterService generates the IPFS Cluster systemd unit
|
// GenerateIPFSClusterService generates the IPFS Cluster systemd unit
|
||||||
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(clusterBinary string) string {
|
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(nodeType string, clusterBinary string) string {
|
||||||
clusterPath := filepath.Join(ssg.oramaDir, "data", "ipfs-cluster")
|
var clusterPath string
|
||||||
logFile := filepath.Join(ssg.oramaDir, "logs", "ipfs-cluster.log")
|
if nodeType == "bootstrap" {
|
||||||
|
clusterPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs-cluster")
|
||||||
// Read cluster secret from file to pass to daemon
|
} else {
|
||||||
clusterSecretPath := filepath.Join(ssg.oramaDir, "secrets", "cluster-secret")
|
clusterPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs-cluster")
|
||||||
clusterSecret := ""
|
|
||||||
if data, err := os.ReadFile(clusterSecretPath); err == nil {
|
|
||||||
clusterSecret = strings.TrimSpace(string(data))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", nodeType))
|
||||||
|
|
||||||
return fmt.Sprintf(`[Unit]
|
return fmt.Sprintf(`[Unit]
|
||||||
Description=IPFS Cluster Service
|
Description=IPFS Cluster Service (%[1]s)
|
||||||
After=debros-ipfs.service
|
After=debros-ipfs-%[1]s.service
|
||||||
Wants=debros-ipfs.service
|
Wants=debros-ipfs-%[1]s.service
|
||||||
Requires=debros-ipfs.service
|
Requires=debros-ipfs-%[1]s.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
User=debros
|
User=debros
|
||||||
Group=debros
|
Group=debros
|
||||||
WorkingDirectory=%[1]s
|
WorkingDirectory=%[2]s
|
||||||
Environment=HOME=%[1]s
|
Environment=HOME=%[2]s
|
||||||
Environment=IPFS_CLUSTER_PATH=%[2]s
|
Environment=IPFS_CLUSTER_PATH=%[3]s
|
||||||
Environment=CLUSTER_SECRET=%[5]s
|
ExecStart=%[6]s daemon
|
||||||
ExecStartPre=/bin/bash -c 'mkdir -p %[2]s && chmod 700 %[2]s'
|
|
||||||
ExecStart=%[4]s daemon
|
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=append:%[3]s
|
StandardOutput=file:%[4]s
|
||||||
StandardError=append:%[3]s
|
StandardError=file:%[4]s
|
||||||
SyslogIdentifier=debros-ipfs-cluster
|
SyslogIdentifier=ipfs-cluster-%[1]s
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ProtectHome=read-only
|
ReadWritePaths=%[5]s
|
||||||
ProtectKernelTunables=yes
|
|
||||||
ProtectKernelModules=yes
|
|
||||||
ProtectControlGroups=yes
|
|
||||||
RestrictRealtime=yes
|
|
||||||
RestrictSUIDSGID=yes
|
|
||||||
ReadWritePaths=%[1]s
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
`, ssg.oramaHome, clusterPath, logFile, clusterBinary, clusterSecret)
|
`, nodeType, ssg.debrosHome, clusterPath, logFile, ssg.debrosDir, clusterBinary)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateRQLiteService generates the RQLite systemd unit
|
// GenerateRQLiteService generates the RQLite systemd unit
|
||||||
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(rqliteBinary string, httpPort, raftPort int, joinAddr string, advertiseIP string) string {
|
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(nodeType string, rqliteBinary string, httpPort, raftPort int, joinAddr string, advertiseIP string) string {
|
||||||
dataDir := filepath.Join(ssg.oramaDir, "data", "rqlite")
|
var dataDir string
|
||||||
logFile := filepath.Join(ssg.oramaDir, "logs", "rqlite.log")
|
if nodeType == "bootstrap" {
|
||||||
|
dataDir = filepath.Join(ssg.debrosDir, "data", "bootstrap", "rqlite")
|
||||||
|
} else {
|
||||||
|
dataDir = filepath.Join(ssg.debrosDir, "data", "node", "rqlite")
|
||||||
|
}
|
||||||
|
|
||||||
// Use public IP for advertise if provided, otherwise default to localhost
|
// Use public IP for advertise if provided, otherwise default to localhost
|
||||||
if advertiseIP == "" {
|
if advertiseIP == "" {
|
||||||
advertiseIP = "127.0.0.1"
|
advertiseIP = "127.0.0.1"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bind RQLite to localhost only - external access via SNI gateway
|
|
||||||
args := fmt.Sprintf(
|
args := fmt.Sprintf(
|
||||||
`-http-addr 127.0.0.1:%d -http-adv-addr %s:%d -raft-adv-addr %s:%d -raft-addr 127.0.0.1:%d`,
|
`-http-addr 0.0.0.0:%d -http-adv-addr %s:%d -raft-adv-addr %s:%d -raft-addr 0.0.0.0:%d`,
|
||||||
httpPort, advertiseIP, httpPort, advertiseIP, raftPort, raftPort,
|
httpPort, advertiseIP, httpPort, advertiseIP, raftPort, raftPort,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -134,8 +128,10 @@ func (ssg *SystemdServiceGenerator) GenerateRQLiteService(rqliteBinary string, h
|
|||||||
|
|
||||||
args += fmt.Sprintf(` %s`, dataDir)
|
args += fmt.Sprintf(` %s`, dataDir)
|
||||||
|
|
||||||
|
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("rqlite-%s.log", nodeType))
|
||||||
|
|
||||||
return fmt.Sprintf(`[Unit]
|
return fmt.Sprintf(`[Unit]
|
||||||
Description=RQLite Database
|
Description=RQLite Database (%[1]s)
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
|
|
||||||
@ -143,34 +139,28 @@ Wants=network-online.target
|
|||||||
Type=simple
|
Type=simple
|
||||||
User=debros
|
User=debros
|
||||||
Group=debros
|
Group=debros
|
||||||
Environment=HOME=%[1]s
|
Environment=HOME=%[2]s
|
||||||
ExecStart=%[5]s %[2]s
|
ExecStart=%[6]s %[3]s
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=append:%[3]s
|
StandardOutput=file:%[4]s
|
||||||
StandardError=append:%[3]s
|
StandardError=file:%[4]s
|
||||||
SyslogIdentifier=debros-rqlite
|
SyslogIdentifier=rqlite-%[1]s
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ProtectHome=read-only
|
ReadWritePaths=%[5]s
|
||||||
ProtectKernelTunables=yes
|
|
||||||
ProtectKernelModules=yes
|
|
||||||
ProtectControlGroups=yes
|
|
||||||
RestrictRealtime=yes
|
|
||||||
RestrictSUIDSGID=yes
|
|
||||||
ReadWritePaths=%[4]s
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
`, ssg.oramaHome, args, logFile, dataDir, rqliteBinary)
|
`, nodeType, ssg.debrosHome, args, logFile, ssg.debrosDir, rqliteBinary)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateOlricService generates the Olric systemd unit
|
// GenerateOlricService generates the Olric systemd unit
|
||||||
func (ssg *SystemdServiceGenerator) GenerateOlricService(olricBinary string) string {
|
func (ssg *SystemdServiceGenerator) GenerateOlricService(olricBinary string) string {
|
||||||
olricConfigPath := filepath.Join(ssg.oramaDir, "configs", "olric", "config.yaml")
|
olricConfigPath := filepath.Join(ssg.debrosDir, "configs", "olric", "config.yaml")
|
||||||
logFile := filepath.Join(ssg.oramaDir, "logs", "olric.log")
|
logFile := filepath.Join(ssg.debrosDir, "logs", "olric.log")
|
||||||
|
|
||||||
return fmt.Sprintf(`[Unit]
|
return fmt.Sprintf(`[Unit]
|
||||||
Description=Olric Cache Server
|
Description=Olric Cache Server
|
||||||
@ -186,147 +176,93 @@ Environment=OLRIC_SERVER_CONFIG=%[2]s
|
|||||||
ExecStart=%[5]s
|
ExecStart=%[5]s
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=append:%[3]s
|
StandardOutput=file:%[3]s
|
||||||
StandardError=append:%[3]s
|
StandardError=file:%[3]s
|
||||||
SyslogIdentifier=olric
|
SyslogIdentifier=olric
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ProtectHome=read-only
|
|
||||||
ProtectKernelTunables=yes
|
|
||||||
ProtectKernelModules=yes
|
|
||||||
ProtectControlGroups=yes
|
|
||||||
RestrictRealtime=yes
|
|
||||||
RestrictSUIDSGID=yes
|
|
||||||
ReadWritePaths=%[4]s
|
ReadWritePaths=%[4]s
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
`, ssg.oramaHome, olricConfigPath, logFile, ssg.oramaDir, olricBinary)
|
`, ssg.debrosHome, olricConfigPath, logFile, ssg.debrosDir, olricBinary)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateNodeService generates the DeBros Node systemd unit
|
// GenerateNodeService generates the DeBros Node systemd unit
|
||||||
func (ssg *SystemdServiceGenerator) GenerateNodeService() string {
|
func (ssg *SystemdServiceGenerator) GenerateNodeService(nodeType string) string {
|
||||||
configFile := "node.yaml"
|
var configFile string
|
||||||
logFile := filepath.Join(ssg.oramaDir, "logs", "node.log")
|
if nodeType == "bootstrap" {
|
||||||
// Note: systemd StandardOutput/StandardError paths should not contain substitution variables
|
configFile = "bootstrap.yaml"
|
||||||
// Use absolute paths directly as they will be resolved by systemd at runtime
|
} else {
|
||||||
|
configFile = "node.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("node-%s.log", nodeType))
|
||||||
|
|
||||||
return fmt.Sprintf(`[Unit]
|
return fmt.Sprintf(`[Unit]
|
||||||
Description=DeBros Network Node
|
Description=DeBros Network Node (%s)
|
||||||
After=debros-ipfs-cluster.service debros-olric.service
|
After=debros-ipfs-cluster-%s.service
|
||||||
Wants=debros-ipfs-cluster.service debros-olric.service
|
Wants=debros-ipfs-cluster-%s.service
|
||||||
|
Requires=debros-ipfs-cluster-%s.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
User=debros
|
User=debros
|
||||||
Group=debros
|
Group=debros
|
||||||
WorkingDirectory=%[1]s
|
WorkingDirectory=%s
|
||||||
Environment=HOME=%[1]s
|
Environment=HOME=%s
|
||||||
ExecStart=%[1]s/bin/orama-node --config %[2]s/configs/%[3]s
|
ExecStart=%s/bin/node --config %s/configs/%s
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=append:%[4]s
|
StandardOutput=file:%s
|
||||||
StandardError=append:%[4]s
|
StandardError=file:%s
|
||||||
SyslogIdentifier=debros-node
|
SyslogIdentifier=debros-node-%s
|
||||||
|
|
||||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
|
||||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
|
||||||
|
|
||||||
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ProtectHome=read-only
|
ReadWritePaths=%s
|
||||||
ProtectKernelTunables=yes
|
|
||||||
ProtectKernelModules=yes
|
|
||||||
ProtectControlGroups=yes
|
|
||||||
RestrictRealtime=yes
|
|
||||||
RestrictSUIDSGID=yes
|
|
||||||
ReadWritePaths=%[2]s
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
`, ssg.oramaHome, ssg.oramaDir, configFile, logFile)
|
`, nodeType, nodeType, nodeType, nodeType, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, configFile, logFile, logFile, nodeType, ssg.debrosDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateGatewayService generates the DeBros Gateway systemd unit
|
// GenerateGatewayService generates the DeBros Gateway systemd unit
|
||||||
func (ssg *SystemdServiceGenerator) GenerateGatewayService() string {
|
func (ssg *SystemdServiceGenerator) GenerateGatewayService(nodeType string) string {
|
||||||
logFile := filepath.Join(ssg.oramaDir, "logs", "gateway.log")
|
nodeService := fmt.Sprintf("debros-node-%s.service", nodeType)
|
||||||
|
logFile := filepath.Join(ssg.debrosDir, "logs", "gateway.log")
|
||||||
return fmt.Sprintf(`[Unit]
|
return fmt.Sprintf(`[Unit]
|
||||||
Description=DeBros Gateway
|
Description=DeBros Gateway
|
||||||
After=debros-node.service debros-olric.service
|
After=%s
|
||||||
Wants=debros-node.service debros-olric.service
|
Wants=%s
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
User=debros
|
User=debros
|
||||||
Group=debros
|
Group=debros
|
||||||
WorkingDirectory=%[1]s
|
WorkingDirectory=%s
|
||||||
Environment=HOME=%[1]s
|
Environment=HOME=%s
|
||||||
ExecStart=%[1]s/bin/gateway --config %[2]s/data/gateway.yaml
|
ExecStart=%s/bin/gateway --config %s/configs/gateway.yaml
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=append:%[3]s
|
StandardOutput=file:%s
|
||||||
StandardError=append:%[3]s
|
StandardError=file:%s
|
||||||
SyslogIdentifier=debros-gateway
|
SyslogIdentifier=debros-gateway
|
||||||
|
|
||||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||||
|
|
||||||
# Note: NoNewPrivileges is omitted because it conflicts with AmbientCapabilities
|
|
||||||
# The service needs CAP_NET_BIND_SERVICE to bind to privileged ports (80, 443)
|
|
||||||
PrivateTmp=yes
|
|
||||||
ProtectSystem=strict
|
|
||||||
ProtectHome=read-only
|
|
||||||
ProtectKernelTunables=yes
|
|
||||||
ProtectKernelModules=yes
|
|
||||||
ProtectControlGroups=yes
|
|
||||||
RestrictRealtime=yes
|
|
||||||
RestrictSUIDSGID=yes
|
|
||||||
ReadWritePaths=%[2]s
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
`, ssg.oramaHome, ssg.oramaDir, logFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateAnyoneClientService generates the Anyone Client SOCKS5 proxy systemd unit
|
|
||||||
func (ssg *SystemdServiceGenerator) GenerateAnyoneClientService() string {
|
|
||||||
logFile := filepath.Join(ssg.oramaDir, "logs", "anyone-client.log")
|
|
||||||
|
|
||||||
return fmt.Sprintf(`[Unit]
|
|
||||||
Description=Anyone Client SOCKS5 Proxy
|
|
||||||
After=network-online.target
|
|
||||||
Wants=network-online.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
User=debros
|
|
||||||
Group=debros
|
|
||||||
Environment=HOME=%[1]s
|
|
||||||
Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/lib/node_modules/.bin
|
|
||||||
WorkingDirectory=%[1]s
|
|
||||||
ExecStart=/usr/bin/npx anyone-client
|
|
||||||
Restart=always
|
|
||||||
RestartSec=5
|
|
||||||
StandardOutput=append:%[2]s
|
|
||||||
StandardError=append:%[2]s
|
|
||||||
SyslogIdentifier=anyone-client
|
|
||||||
|
|
||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ProtectHome=no
|
ReadWritePaths=%s
|
||||||
ProtectKernelTunables=yes
|
|
||||||
ProtectKernelModules=yes
|
|
||||||
ProtectControlGroups=yes
|
|
||||||
RestrictRealtime=yes
|
|
||||||
RestrictSUIDSGID=yes
|
|
||||||
ReadWritePaths=%[3]s
|
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
`, ssg.oramaHome, logFile, ssg.oramaDir)
|
`, nodeService, nodeService, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, logFile, logFile, ssg.debrosDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SystemdController manages systemd service operations
|
// SystemdController manages systemd service operations
|
||||||
|
|||||||
@ -9,20 +9,23 @@ import (
|
|||||||
func TestGenerateRQLiteService(t *testing.T) {
|
func TestGenerateRQLiteService(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
nodeType string
|
||||||
joinAddr string
|
joinAddr string
|
||||||
advertiseIP string
|
advertiseIP string
|
||||||
expectJoinInUnit bool
|
expectJoinInUnit bool
|
||||||
expectAdvertiseIP string
|
expectAdvertiseIP string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "first node with localhost advertise",
|
name: "bootstrap with localhost advertise",
|
||||||
|
nodeType: "bootstrap",
|
||||||
joinAddr: "",
|
joinAddr: "",
|
||||||
advertiseIP: "",
|
advertiseIP: "",
|
||||||
expectJoinInUnit: false,
|
expectJoinInUnit: false,
|
||||||
expectAdvertiseIP: "127.0.0.1",
|
expectAdvertiseIP: "127.0.0.1",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "first node with public IP advertise",
|
name: "bootstrap with public IP advertise",
|
||||||
|
nodeType: "bootstrap",
|
||||||
joinAddr: "",
|
joinAddr: "",
|
||||||
advertiseIP: "10.0.0.1",
|
advertiseIP: "10.0.0.1",
|
||||||
expectJoinInUnit: false,
|
expectJoinInUnit: false,
|
||||||
@ -30,6 +33,7 @@ func TestGenerateRQLiteService(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "node joining cluster",
|
name: "node joining cluster",
|
||||||
|
nodeType: "node",
|
||||||
joinAddr: "10.0.0.1:7001",
|
joinAddr: "10.0.0.1:7001",
|
||||||
advertiseIP: "10.0.0.2",
|
advertiseIP: "10.0.0.2",
|
||||||
expectJoinInUnit: true,
|
expectJoinInUnit: true,
|
||||||
@ -37,6 +41,7 @@ func TestGenerateRQLiteService(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "node with localhost (should still include join)",
|
name: "node with localhost (should still include join)",
|
||||||
|
nodeType: "node",
|
||||||
joinAddr: "localhost:7001",
|
joinAddr: "localhost:7001",
|
||||||
advertiseIP: "127.0.0.1",
|
advertiseIP: "127.0.0.1",
|
||||||
expectJoinInUnit: true,
|
expectJoinInUnit: true,
|
||||||
@ -47,11 +52,11 @@ func TestGenerateRQLiteService(t *testing.T) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
ssg := &SystemdServiceGenerator{
|
ssg := &SystemdServiceGenerator{
|
||||||
oramaHome: "/home/debros",
|
debrosHome: "/home/debros",
|
||||||
oramaDir: "/home/debros/.orama",
|
debrosDir: "/home/debros/.debros",
|
||||||
}
|
}
|
||||||
|
|
||||||
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP)
|
unit := ssg.GenerateRQLiteService(tt.nodeType, "/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP)
|
||||||
|
|
||||||
// Check advertise IP is present
|
// Check advertise IP is present
|
||||||
expectedAdvertise := tt.expectAdvertiseIP + ":5001"
|
expectedAdvertise := tt.expectAdvertiseIP + ":5001"
|
||||||
@ -81,21 +86,21 @@ func TestGenerateRQLiteService(t *testing.T) {
|
|||||||
// TestGenerateRQLiteServiceArgs verifies the ExecStart command arguments
|
// TestGenerateRQLiteServiceArgs verifies the ExecStart command arguments
|
||||||
func TestGenerateRQLiteServiceArgs(t *testing.T) {
|
func TestGenerateRQLiteServiceArgs(t *testing.T) {
|
||||||
ssg := &SystemdServiceGenerator{
|
ssg := &SystemdServiceGenerator{
|
||||||
oramaHome: "/home/debros",
|
debrosHome: "/home/debros",
|
||||||
oramaDir: "/home/debros/.orama",
|
debrosDir: "/home/debros/.debros",
|
||||||
}
|
}
|
||||||
|
|
||||||
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2")
|
unit := ssg.GenerateRQLiteService("node", "/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2")
|
||||||
|
|
||||||
// Verify essential flags are present (localhost binding for security)
|
// Verify essential flags are present
|
||||||
if !strings.Contains(unit, "-http-addr 127.0.0.1:5001") {
|
if !strings.Contains(unit, "-http-addr 0.0.0.0:5001") {
|
||||||
t.Error("missing -http-addr 127.0.0.1:5001")
|
t.Error("missing -http-addr 0.0.0.0:5001")
|
||||||
}
|
}
|
||||||
if !strings.Contains(unit, "-http-adv-addr 10.0.0.2:5001") {
|
if !strings.Contains(unit, "-http-adv-addr 10.0.0.2:5001") {
|
||||||
t.Error("missing -http-adv-addr 10.0.0.2:5001")
|
t.Error("missing -http-adv-addr 10.0.0.2:5001")
|
||||||
}
|
}
|
||||||
if !strings.Contains(unit, "-raft-addr 127.0.0.1:7001") {
|
if !strings.Contains(unit, "-raft-addr 0.0.0.0:7001") {
|
||||||
t.Error("missing -raft-addr 127.0.0.1:7001")
|
t.Error("missing -raft-addr 0.0.0.0:7001")
|
||||||
}
|
}
|
||||||
if !strings.Contains(unit, "-raft-adv-addr 10.0.0.2:7001") {
|
if !strings.Contains(unit, "-raft-adv-addr 10.0.0.2:7001") {
|
||||||
t.Error("missing -raft-adv-addr 10.0.0.2:7001")
|
t.Error("missing -raft-adv-addr 10.0.0.2:7001")
|
||||||
|
|||||||
43
pkg/environments/templates/bootstrap.yaml
Normal file
43
pkg/environments/templates/bootstrap.yaml
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
node:
|
||||||
|
id: "{{.NodeID}}"
|
||||||
|
type: "bootstrap"
|
||||||
|
listen_addresses:
|
||||||
|
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
|
||||||
|
data_dir: "{{.DataDir}}"
|
||||||
|
max_connections: 50
|
||||||
|
|
||||||
|
database:
|
||||||
|
data_dir: "{{.DataDir}}/rqlite"
|
||||||
|
replication_factor: 3
|
||||||
|
shard_count: 16
|
||||||
|
max_database_size: 1073741824
|
||||||
|
backup_interval: "24h"
|
||||||
|
rqlite_port: {{.RQLiteHTTPPort}}
|
||||||
|
rqlite_raft_port: {{.RQLiteRaftPort}}
|
||||||
|
rqlite_join_address: "{{.RQLiteJoinAddress}}"
|
||||||
|
cluster_sync_interval: "30s"
|
||||||
|
peer_inactivity_limit: "24h"
|
||||||
|
min_cluster_size: 3
|
||||||
|
ipfs:
|
||||||
|
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
|
||||||
|
api_url: "http://localhost:{{.IPFSAPIPort}}"
|
||||||
|
timeout: "60s"
|
||||||
|
replication_factor: 3
|
||||||
|
enable_encryption: true
|
||||||
|
|
||||||
|
discovery:
|
||||||
|
bootstrap_peers:
|
||||||
|
{{range .BootstrapPeers}} - "{{.}}"
|
||||||
|
{{end}}
|
||||||
|
discovery_interval: "15s"
|
||||||
|
bootstrap_port: {{.P2PPort}}
|
||||||
|
http_adv_address: "localhost:{{.RQLiteHTTPPort}}"
|
||||||
|
raft_adv_address: "localhost:{{.RQLiteRaftPort}}"
|
||||||
|
node_namespace: "default"
|
||||||
|
|
||||||
|
security:
|
||||||
|
enable_tls: false
|
||||||
|
|
||||||
|
logging:
|
||||||
|
level: "info"
|
||||||
|
format: "console"
|
||||||
@ -1,10 +1,10 @@
|
|||||||
node:
|
node:
|
||||||
id: "{{.NodeID}}"
|
id: "{{.NodeID}}"
|
||||||
|
type: "node"
|
||||||
listen_addresses:
|
listen_addresses:
|
||||||
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
|
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
|
||||||
data_dir: "{{.DataDir}}"
|
data_dir: "{{.DataDir}}"
|
||||||
max_connections: 50
|
max_connections: 50
|
||||||
domain: "{{.Domain}}"
|
|
||||||
|
|
||||||
database:
|
database:
|
||||||
data_dir: "{{.DataDir}}/rqlite"
|
data_dir: "{{.DataDir}}/rqlite"
|
||||||
@ -13,16 +13,11 @@ database:
|
|||||||
max_database_size: 1073741824
|
max_database_size: 1073741824
|
||||||
backup_interval: "24h"
|
backup_interval: "24h"
|
||||||
rqlite_port: {{.RQLiteHTTPPort}}
|
rqlite_port: {{.RQLiteHTTPPort}}
|
||||||
rqlite_raft_port: {{.RQLiteRaftInternalPort}}
|
rqlite_raft_port: {{.RQLiteRaftPort}}
|
||||||
rqlite_join_address: "{{.RQLiteJoinAddress}}"
|
rqlite_join_address: "{{.RQLiteJoinAddress}}"
|
||||||
{{if .NodeCert}}# Node-to-node TLS encryption for Raft communication (direct RQLite TLS on port 7002)
|
cluster_sync_interval: "30s"
|
||||||
node_cert: "{{.NodeCert}}"
|
|
||||||
node_key: "{{.NodeKey}}"
|
|
||||||
{{if .NodeCACert}}node_ca_cert: "{{.NodeCACert}}"
|
|
||||||
{{end}}{{if .NodeNoVerify}}node_no_verify: true
|
|
||||||
{{end}}{{end}}cluster_sync_interval: "30s"
|
|
||||||
peer_inactivity_limit: "24h"
|
peer_inactivity_limit: "24h"
|
||||||
min_cluster_size: 1
|
min_cluster_size: 3
|
||||||
ipfs:
|
ipfs:
|
||||||
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
|
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
|
||||||
api_url: "http://localhost:{{.IPFSAPIPort}}"
|
api_url: "http://localhost:{{.IPFSAPIPort}}"
|
||||||
@ -36,8 +31,8 @@ discovery:
|
|||||||
{{end}}
|
{{end}}
|
||||||
discovery_interval: "15s"
|
discovery_interval: "15s"
|
||||||
bootstrap_port: {{.P2PPort}}
|
bootstrap_port: {{.P2PPort}}
|
||||||
http_adv_address: "{{.HTTPAdvAddress}}"
|
http_adv_address: "localhost:{{.RQLiteHTTPPort}}"
|
||||||
raft_adv_address: "{{.RaftAdvAddress}}"
|
raft_adv_address: "localhost:{{.RQLiteRaftPort}}"
|
||||||
node_namespace: "default"
|
node_namespace: "default"
|
||||||
|
|
||||||
security:
|
security:
|
||||||
@ -47,42 +42,3 @@ logging:
|
|||||||
level: "info"
|
level: "info"
|
||||||
format: "console"
|
format: "console"
|
||||||
|
|
||||||
http_gateway:
|
|
||||||
enabled: true
|
|
||||||
listen_addr: "{{if .EnableHTTPS}}:{{.HTTPSPort}}{{else}}:{{.UnifiedGatewayPort}}{{end}}"
|
|
||||||
node_name: "{{.NodeID}}"
|
|
||||||
|
|
||||||
{{if .EnableHTTPS}}https:
|
|
||||||
enabled: true
|
|
||||||
domain: "{{.Domain}}"
|
|
||||||
auto_cert: true
|
|
||||||
cache_dir: "{{.TLSCacheDir}}"
|
|
||||||
http_port: {{.HTTPPort}}
|
|
||||||
https_port: {{.HTTPSPort}}
|
|
||||||
email: "admin@{{.Domain}}"
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
{{if .EnableHTTPS}}sni:
|
|
||||||
enabled: true
|
|
||||||
listen_addr: ":{{.RQLiteRaftPort}}"
|
|
||||||
cert_file: "{{.TLSCacheDir}}/{{.Domain}}.crt"
|
|
||||||
key_file: "{{.TLSCacheDir}}/{{.Domain}}.key"
|
|
||||||
routes:
|
|
||||||
# Note: Raft traffic bypasses SNI gateway - RQLite uses native TLS on port 7002
|
|
||||||
ipfs.{{.Domain}}: "localhost:4101"
|
|
||||||
ipfs-cluster.{{.Domain}}: "localhost:9098"
|
|
||||||
olric.{{.Domain}}: "localhost:3322"
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
# Full gateway configuration (for API, auth, pubsub, and internal service routing)
|
|
||||||
client_namespace: "default"
|
|
||||||
rqlite_dsn: "http://localhost:{{.RQLiteHTTPPort}}"
|
|
||||||
olric_servers:
|
|
||||||
- "127.0.0.1:3320"
|
|
||||||
olric_timeout: "10s"
|
|
||||||
ipfs_cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
|
|
||||||
ipfs_api_url: "http://localhost:{{.IPFSAPIPort}}"
|
|
||||||
ipfs_timeout: "60s"
|
|
||||||
|
|
||||||
# Routes for internal service reverse proxy (kept for backwards compatibility but not used by full gateway)
|
|
||||||
routes: {}
|
|
||||||
|
|||||||
@ -1,8 +1,8 @@
|
|||||||
server:
|
server:
|
||||||
bindAddr: "{{.ServerBindAddr}}"
|
bindAddr: "{{.BindAddr}}"
|
||||||
bindPort: { { .HTTPPort } }
|
bindPort: { { .HTTPPort } }
|
||||||
|
|
||||||
memberlist:
|
memberlist:
|
||||||
environment: { { .MemberlistEnvironment } }
|
environment: local
|
||||||
bindAddr: "{{.MemberlistBindAddr}}"
|
bindAddr: "{{.BindAddr}}"
|
||||||
bindPort: { { .MemberlistPort } }
|
bindPort: { { .MemberlistPort } }
|
||||||
|
|||||||
@ -11,33 +11,30 @@ import (
|
|||||||
//go:embed *.yaml *.service
|
//go:embed *.yaml *.service
|
||||||
var templatesFS embed.FS
|
var templatesFS embed.FS
|
||||||
|
|
||||||
// NodeConfigData holds parameters for node.yaml rendering (unified - no bootstrap/node distinction)
|
// BootstrapConfigData holds parameters for bootstrap.yaml rendering
|
||||||
|
type BootstrapConfigData struct {
|
||||||
|
NodeID string
|
||||||
|
P2PPort int
|
||||||
|
DataDir string
|
||||||
|
RQLiteHTTPPort int
|
||||||
|
RQLiteRaftPort int
|
||||||
|
ClusterAPIPort int
|
||||||
|
IPFSAPIPort int // Default: 4501
|
||||||
|
BootstrapPeers []string // List of bootstrap peer multiaddrs
|
||||||
|
RQLiteJoinAddress string // Optional: join address for secondary bootstraps
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeConfigData holds parameters for node.yaml rendering
|
||||||
type NodeConfigData struct {
|
type NodeConfigData struct {
|
||||||
NodeID string
|
NodeID string
|
||||||
P2PPort int
|
P2PPort int
|
||||||
DataDir string
|
DataDir string
|
||||||
RQLiteHTTPPort int
|
RQLiteHTTPPort int
|
||||||
RQLiteRaftPort int // External Raft port for advertisement (7001 for SNI)
|
RQLiteRaftPort int
|
||||||
RQLiteRaftInternalPort int // Internal Raft port for local binding (7002 when SNI enabled)
|
RQLiteJoinAddress string
|
||||||
RQLiteJoinAddress string // Optional: join address for joining existing cluster
|
BootstrapPeers []string
|
||||||
BootstrapPeers []string // List of peer multiaddrs to connect to
|
|
||||||
ClusterAPIPort int
|
ClusterAPIPort int
|
||||||
IPFSAPIPort int // Default: 4501
|
IPFSAPIPort int // Default: 4501+
|
||||||
HTTPAdvAddress string // Advertised HTTP address (IP:port)
|
|
||||||
RaftAdvAddress string // Advertised Raft address (IP:port or domain:port for SNI)
|
|
||||||
UnifiedGatewayPort int // Unified gateway port for all node services
|
|
||||||
Domain string // Domain for this node (e.g., node-123.orama.network)
|
|
||||||
EnableHTTPS bool // Enable HTTPS/TLS with ACME
|
|
||||||
TLSCacheDir string // Directory for ACME certificate cache
|
|
||||||
HTTPPort int // HTTP port for ACME challenges (usually 80)
|
|
||||||
HTTPSPort int // HTTPS port (usually 443)
|
|
||||||
|
|
||||||
// Node-to-node TLS encryption for RQLite Raft communication
|
|
||||||
// Required when using SNI gateway for Raft traffic routing
|
|
||||||
NodeCert string // Path to X.509 certificate for node-to-node communication
|
|
||||||
NodeKey string // Path to X.509 private key for node-to-node communication
|
|
||||||
NodeCACert string // Path to CA certificate (optional)
|
|
||||||
NodeNoVerify bool // Skip certificate verification (for self-signed certs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GatewayConfigData holds parameters for gateway.yaml rendering
|
// GatewayConfigData holds parameters for gateway.yaml rendering
|
||||||
@ -55,46 +52,63 @@ type GatewayConfigData struct {
|
|||||||
|
|
||||||
// OlricConfigData holds parameters for olric.yaml rendering
|
// OlricConfigData holds parameters for olric.yaml rendering
|
||||||
type OlricConfigData struct {
|
type OlricConfigData struct {
|
||||||
ServerBindAddr string // HTTP API bind address (127.0.0.1 for security)
|
BindAddr string
|
||||||
HTTPPort int
|
HTTPPort int
|
||||||
MemberlistBindAddr string // Memberlist bind address (0.0.0.0 for clustering)
|
|
||||||
MemberlistPort int
|
MemberlistPort int
|
||||||
MemberlistEnvironment string // "local", "lan", or "wan"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SystemdIPFSData holds parameters for systemd IPFS service rendering
|
// SystemdIPFSData holds parameters for systemd IPFS service rendering
|
||||||
type SystemdIPFSData struct {
|
type SystemdIPFSData struct {
|
||||||
|
NodeType string
|
||||||
HomeDir string
|
HomeDir string
|
||||||
IPFSRepoPath string
|
IPFSRepoPath string
|
||||||
SecretsDir string
|
SecretsDir string
|
||||||
OramaDir string
|
DebrosDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// SystemdIPFSClusterData holds parameters for systemd IPFS Cluster service rendering
|
// SystemdIPFSClusterData holds parameters for systemd IPFS Cluster service rendering
|
||||||
type SystemdIPFSClusterData struct {
|
type SystemdIPFSClusterData struct {
|
||||||
|
NodeType string
|
||||||
HomeDir string
|
HomeDir string
|
||||||
ClusterPath string
|
ClusterPath string
|
||||||
OramaDir string
|
DebrosDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// SystemdRQLiteData holds parameters for systemd RQLite service rendering
|
||||||
|
type SystemdRQLiteData struct {
|
||||||
|
NodeType string
|
||||||
|
HomeDir string
|
||||||
|
HTTPPort int
|
||||||
|
RaftPort int
|
||||||
|
DataDir string
|
||||||
|
JoinAddr string
|
||||||
|
DebrosDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// SystemdOlricData holds parameters for systemd Olric service rendering
|
// SystemdOlricData holds parameters for systemd Olric service rendering
|
||||||
type SystemdOlricData struct {
|
type SystemdOlricData struct {
|
||||||
HomeDir string
|
HomeDir string
|
||||||
ConfigPath string
|
ConfigPath string
|
||||||
OramaDir string
|
DebrosDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// SystemdNodeData holds parameters for systemd Node service rendering
|
// SystemdNodeData holds parameters for systemd Node service rendering
|
||||||
type SystemdNodeData struct {
|
type SystemdNodeData struct {
|
||||||
|
NodeType string
|
||||||
HomeDir string
|
HomeDir string
|
||||||
ConfigFile string
|
ConfigFile string
|
||||||
OramaDir string
|
DebrosDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// SystemdGatewayData holds parameters for systemd Gateway service rendering
|
// SystemdGatewayData holds parameters for systemd Gateway service rendering
|
||||||
type SystemdGatewayData struct {
|
type SystemdGatewayData struct {
|
||||||
HomeDir string
|
HomeDir string
|
||||||
OramaDir string
|
DebrosDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderBootstrapConfig renders the bootstrap config template with the given data
|
||||||
|
func RenderBootstrapConfig(data BootstrapConfigData) (string, error) {
|
||||||
|
return renderTemplate("bootstrap.yaml", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RenderNodeConfig renders the node config template with the given data
|
// RenderNodeConfig renders the node config template with the given data
|
||||||
@ -122,6 +136,11 @@ func RenderIPFSClusterService(data SystemdIPFSClusterData) (string, error) {
|
|||||||
return renderTemplate("systemd_ipfs_cluster.service", data)
|
return renderTemplate("systemd_ipfs_cluster.service", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RenderRQLiteService renders the RQLite systemd service template
|
||||||
|
func RenderRQLiteService(data SystemdRQLiteData) (string, error) {
|
||||||
|
return renderTemplate("systemd_rqlite.service", data)
|
||||||
|
}
|
||||||
|
|
||||||
// RenderOlricService renders the Olric systemd service template
|
// RenderOlricService renders the Olric systemd service template
|
||||||
func RenderOlricService(data SystemdOlricData) (string, error) {
|
func RenderOlricService(data SystemdOlricData) (string, error) {
|
||||||
return renderTemplate("systemd_olric.service", data)
|
return renderTemplate("systemd_olric.service", data)
|
||||||
|
|||||||
@ -5,12 +5,46 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestRenderBootstrapConfig(t *testing.T) {
|
||||||
|
data := BootstrapConfigData{
|
||||||
|
NodeID: "bootstrap",
|
||||||
|
P2PPort: 4001,
|
||||||
|
DataDir: "/home/debros/.debros/bootstrap",
|
||||||
|
RQLiteHTTPPort: 5001,
|
||||||
|
RQLiteRaftPort: 7001,
|
||||||
|
ClusterAPIPort: 9094,
|
||||||
|
IPFSAPIPort: 5001,
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := RenderBootstrapConfig(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("RenderBootstrapConfig failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for required fields
|
||||||
|
checks := []string{
|
||||||
|
"id: \"bootstrap\"",
|
||||||
|
"type: \"bootstrap\"",
|
||||||
|
"tcp/4001",
|
||||||
|
"rqlite_port: 5001",
|
||||||
|
"rqlite_raft_port: 7001",
|
||||||
|
"cluster_api_url: \"http://localhost:9094\"",
|
||||||
|
"api_url: \"http://localhost:5001\"",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, check := range checks {
|
||||||
|
if !strings.Contains(result, check) {
|
||||||
|
t.Errorf("Bootstrap config missing: %s", check)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestRenderNodeConfig(t *testing.T) {
|
func TestRenderNodeConfig(t *testing.T) {
|
||||||
bootstrapMultiaddr := "/ip4/127.0.0.1/tcp/4001/p2p/Qm1234567890"
|
bootstrapMultiaddr := "/ip4/127.0.0.1/tcp/4001/p2p/Qm1234567890"
|
||||||
data := NodeConfigData{
|
data := NodeConfigData{
|
||||||
NodeID: "node2",
|
NodeID: "node2",
|
||||||
P2PPort: 4002,
|
P2PPort: 4002,
|
||||||
DataDir: "/home/debros/.orama/node2",
|
DataDir: "/home/debros/.debros/node2",
|
||||||
RQLiteHTTPPort: 5002,
|
RQLiteHTTPPort: 5002,
|
||||||
RQLiteRaftPort: 7002,
|
RQLiteRaftPort: 7002,
|
||||||
RQLiteJoinAddress: "localhost:5001",
|
RQLiteJoinAddress: "localhost:5001",
|
||||||
@ -27,8 +61,10 @@ func TestRenderNodeConfig(t *testing.T) {
|
|||||||
// Check for required fields
|
// Check for required fields
|
||||||
checks := []string{
|
checks := []string{
|
||||||
"id: \"node2\"",
|
"id: \"node2\"",
|
||||||
|
"type: \"node\"",
|
||||||
"tcp/4002",
|
"tcp/4002",
|
||||||
"rqlite_port: 5002",
|
"rqlite_port: 5002",
|
||||||
|
"rqlite_raft_port: 7002",
|
||||||
"rqlite_join_address: \"localhost:5001\"",
|
"rqlite_join_address: \"localhost:5001\"",
|
||||||
bootstrapMultiaddr,
|
bootstrapMultiaddr,
|
||||||
"cluster_api_url: \"http://localhost:9104\"",
|
"cluster_api_url: \"http://localhost:9104\"",
|
||||||
@ -74,11 +110,9 @@ func TestRenderGatewayConfig(t *testing.T) {
|
|||||||
|
|
||||||
func TestRenderOlricConfig(t *testing.T) {
|
func TestRenderOlricConfig(t *testing.T) {
|
||||||
data := OlricConfigData{
|
data := OlricConfigData{
|
||||||
ServerBindAddr: "127.0.0.1",
|
BindAddr: "127.0.0.1",
|
||||||
HTTPPort: 3320,
|
HTTPPort: 3320,
|
||||||
MemberlistBindAddr: "0.0.0.0",
|
|
||||||
MemberlistPort: 3322,
|
MemberlistPort: 3322,
|
||||||
MemberlistEnvironment: "lan",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := RenderOlricConfig(data)
|
result, err := RenderOlricConfig(data)
|
||||||
@ -92,7 +126,6 @@ func TestRenderOlricConfig(t *testing.T) {
|
|||||||
"bindPort: 3320",
|
"bindPort: 3320",
|
||||||
"memberlist",
|
"memberlist",
|
||||||
"bindPort: 3322",
|
"bindPort: 3322",
|
||||||
"environment: lan",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, check := range checks {
|
for _, check := range checks {
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=DeBros Gateway
|
Description=DeBros Gateway
|
||||||
After=debros-node.service
|
After=debros-node-node.service
|
||||||
Wants=debros-node.service
|
Wants=debros-node-node.service
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
@ -9,7 +9,7 @@ User=debros
|
|||||||
Group=debros
|
Group=debros
|
||||||
WorkingDirectory={{.HomeDir}}
|
WorkingDirectory={{.HomeDir}}
|
||||||
Environment=HOME={{.HomeDir}}
|
Environment=HOME={{.HomeDir}}
|
||||||
ExecStart={{.HomeDir}}/bin/gateway --config {{.OramaDir}}/data/gateway.yaml
|
ExecStart={{.HomeDir}}/bin/gateway --config {{.DebrosDir}}/configs/gateway.yaml
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=journal
|
StandardOutput=journal
|
||||||
@ -22,7 +22,7 @@ CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
|||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ReadWritePaths={{.OramaDir}}
|
ReadWritePaths={{.DebrosDir}}
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|||||||
@ -20,7 +20,7 @@ SyslogIdentifier=ipfs-{{.NodeType}}
|
|||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ReadWritePaths={{.OramaDir}}
|
ReadWritePaths={{.DebrosDir}}
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|||||||
@ -21,7 +21,7 @@ SyslogIdentifier=ipfs-cluster-{{.NodeType}}
|
|||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ReadWritePaths={{.OramaDir}}
|
ReadWritePaths={{.DebrosDir}}
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|||||||
@ -10,7 +10,7 @@ User=debros
|
|||||||
Group=debros
|
Group=debros
|
||||||
WorkingDirectory={{.HomeDir}}
|
WorkingDirectory={{.HomeDir}}
|
||||||
Environment=HOME={{.HomeDir}}
|
Environment=HOME={{.HomeDir}}
|
||||||
ExecStart={{.HomeDir}}/bin/orama-node --config {{.OramaDir}}/configs/{{.ConfigFile}}
|
ExecStart={{.HomeDir}}/bin/node --config {{.DebrosDir}}/configs/{{.ConfigFile}}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=5
|
RestartSec=5
|
||||||
StandardOutput=journal
|
StandardOutput=journal
|
||||||
@ -20,7 +20,7 @@ SyslogIdentifier=debros-node-{{.NodeType}}
|
|||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ReadWritePaths={{.OramaDir}}
|
ReadWritePaths={{.DebrosDir}}
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|||||||
@ -19,7 +19,7 @@ SyslogIdentifier=olric
|
|||||||
NoNewPrivileges=yes
|
NoNewPrivileges=yes
|
||||||
PrivateTmp=yes
|
PrivateTmp=yes
|
||||||
ProtectSystem=strict
|
ProtectSystem=strict
|
||||||
ReadWritePaths={{.OramaDir}}
|
ReadWritePaths={{.DebrosDir}}
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|||||||
25
pkg/environments/templates/systemd_rqlite.service
Normal file
25
pkg/environments/templates/systemd_rqlite.service
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=RQLite Database ({{.NodeType}})
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=debros
|
||||||
|
Group=debros
|
||||||
|
Environment=HOME={{.HomeDir}}
|
||||||
|
ExecStart=/usr/local/bin/rqlited -http-addr 0.0.0.0:{{.HTTPPort}} -http-adv-addr 127.0.0.1:{{.HTTPPort}} -raft-adv-addr 127.0.0.1:{{.RaftPort}} -raft-addr 0.0.0.0:{{.RaftPort}}{{if .JoinAddr}} -join {{.JoinAddr}} -join-attempts 30 -join-interval 10s{{end}} {{.DataDir}}
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
StandardOutput=journal
|
||||||
|
StandardError=journal
|
||||||
|
SyslogIdentifier=rqlite-{{.NodeType}}
|
||||||
|
|
||||||
|
NoNewPrivileges=yes
|
||||||
|
PrivateTmp=yes
|
||||||
|
ProtectSystem=strict
|
||||||
|
ReadWritePaths={{.DebrosDir}}
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
@ -114,11 +114,9 @@ func (g *Gateway) challengeHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
nsID := nres.Rows[0][0]
|
nsID := nres.Rows[0][0]
|
||||||
|
|
||||||
// Store nonce with 5 minute expiry
|
// Store nonce with 5 minute expiry
|
||||||
// Normalize wallet address to lowercase for case-insensitive comparison
|
|
||||||
walletLower := strings.ToLower(strings.TrimSpace(req.Wallet))
|
|
||||||
if _, err := db.Query(internalCtx,
|
if _, err := db.Query(internalCtx,
|
||||||
"INSERT INTO nonces(namespace_id, wallet, nonce, purpose, expires_at) VALUES (?, ?, ?, ?, datetime('now', '+5 minutes'))",
|
"INSERT INTO nonces(namespace_id, wallet, nonce, purpose, expires_at) VALUES (?, ?, ?, ?, datetime('now', '+5 minutes'))",
|
||||||
nsID, walletLower, nonce, req.Purpose,
|
nsID, req.Wallet, nonce, req.Purpose,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
writeError(w, http.StatusInternalServerError, err.Error())
|
writeError(w, http.StatusInternalServerError, err.Error())
|
||||||
return
|
return
|
||||||
@ -173,10 +171,8 @@ func (g *Gateway) verifyHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
writeError(w, http.StatusInternalServerError, err.Error())
|
writeError(w, http.StatusInternalServerError, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Normalize wallet address to lowercase for case-insensitive comparison
|
q := "SELECT id FROM nonces WHERE namespace_id = ? AND wallet = ? AND nonce = ? AND used_at IS NULL AND (expires_at IS NULL OR expires_at > datetime('now')) LIMIT 1"
|
||||||
walletLower := strings.ToLower(strings.TrimSpace(req.Wallet))
|
nres, err := db.Query(internalCtx, q, nsID, req.Wallet, req.Nonce)
|
||||||
q := "SELECT id FROM nonces WHERE namespace_id = ? AND LOWER(wallet) = LOWER(?) AND nonce = ? AND used_at IS NULL AND (expires_at IS NULL OR expires_at > datetime('now')) LIMIT 1"
|
|
||||||
nres, err := db.Query(internalCtx, q, nsID, walletLower, req.Nonce)
|
|
||||||
if err != nil || nres == nil || nres.Count == 0 {
|
if err != nil || nres == nil || nres.Count == 0 {
|
||||||
writeError(w, http.StatusBadRequest, "invalid or expired nonce")
|
writeError(w, http.StatusBadRequest, "invalid or expired nonce")
|
||||||
return
|
return
|
||||||
@ -399,10 +395,8 @@ func (g *Gateway) issueAPIKeyHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Validate nonce exists and not used/expired
|
// Validate nonce exists and not used/expired
|
||||||
// Normalize wallet address to lowercase for case-insensitive comparison
|
q := "SELECT id FROM nonces WHERE namespace_id = ? AND wallet = ? AND nonce = ? AND used_at IS NULL AND (expires_at IS NULL OR expires_at > datetime('now')) LIMIT 1"
|
||||||
walletLower := strings.ToLower(strings.TrimSpace(req.Wallet))
|
nres, err := db.Query(internalCtx, q, nsID, req.Wallet, req.Nonce)
|
||||||
q := "SELECT id FROM nonces WHERE namespace_id = ? AND LOWER(wallet) = LOWER(?) AND nonce = ? AND used_at IS NULL AND (expires_at IS NULL OR expires_at > datetime('now')) LIMIT 1"
|
|
||||||
nres, err := db.Query(internalCtx, q, nsID, walletLower, req.Nonce)
|
|
||||||
if err != nil || nres == nil || nres.Count == 0 {
|
if err != nil || nres == nil || nres.Count == 0 {
|
||||||
writeError(w, http.StatusBadRequest, "invalid or expired nonce")
|
writeError(w, http.StatusBadRequest, "invalid or expired nonce")
|
||||||
return
|
return
|
||||||
|
|||||||
@ -17,8 +17,7 @@ import (
|
|||||||
// Cache HTTP handlers for Olric distributed cache
|
// Cache HTTP handlers for Olric distributed cache
|
||||||
|
|
||||||
func (g *Gateway) cacheHealthHandler(w http.ResponseWriter, r *http.Request) {
|
func (g *Gateway) cacheHealthHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
client := g.getOlricClient()
|
if g.olricClient == nil {
|
||||||
if client == nil {
|
|
||||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -26,7 +25,7 @@ func (g *Gateway) cacheHealthHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
err := client.Health(ctx)
|
err := g.olricClient.Health(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusServiceUnavailable, fmt.Sprintf("cache health check failed: %v", err))
|
writeError(w, http.StatusServiceUnavailable, fmt.Sprintf("cache health check failed: %v", err))
|
||||||
return
|
return
|
||||||
@ -39,8 +38,7 @@ func (g *Gateway) cacheHealthHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) {
|
func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
client := g.getOlricClient()
|
if g.olricClient == nil {
|
||||||
if client == nil {
|
|
||||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -68,8 +66,8 @@ func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
olricCluster := client.GetClient()
|
client := g.olricClient.GetClient()
|
||||||
dm, err := olricCluster.NewDMap(req.DMap)
|
dm, err := client.NewDMap(req.DMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||||
return
|
return
|
||||||
@ -144,8 +142,7 @@ func decodeValueFromOlric(gr *olriclib.GetResponse) (any, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *Gateway) cacheMultiGetHandler(w http.ResponseWriter, r *http.Request) {
|
func (g *Gateway) cacheMultiGetHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
client := g.getOlricClient()
|
if g.olricClient == nil {
|
||||||
if client == nil {
|
|
||||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -178,8 +175,8 @@ func (g *Gateway) cacheMultiGetHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
olricCluster := client.GetClient()
|
client := g.olricClient.GetClient()
|
||||||
dm, err := olricCluster.NewDMap(req.DMap)
|
dm, err := client.NewDMap(req.DMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||||
return
|
return
|
||||||
@ -223,8 +220,7 @@ func (g *Gateway) cacheMultiGetHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *Gateway) cachePutHandler(w http.ResponseWriter, r *http.Request) {
|
func (g *Gateway) cachePutHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
client := g.getOlricClient()
|
if g.olricClient == nil {
|
||||||
if client == nil {
|
|
||||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -259,8 +255,8 @@ func (g *Gateway) cachePutHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
olricCluster := client.GetClient()
|
client := g.olricClient.GetClient()
|
||||||
dm, err := olricCluster.NewDMap(req.DMap)
|
dm, err := client.NewDMap(req.DMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||||
return
|
return
|
||||||
@ -341,8 +337,7 @@ func (g *Gateway) cachePutHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) {
|
func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
client := g.getOlricClient()
|
if g.olricClient == nil {
|
||||||
if client == nil {
|
|
||||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -370,8 +365,8 @@ func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
olricCluster := client.GetClient()
|
client := g.olricClient.GetClient()
|
||||||
dm, err := olricCluster.NewDMap(req.DMap)
|
dm, err := client.NewDMap(req.DMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||||
return
|
return
|
||||||
@ -400,8 +395,7 @@ func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *Gateway) cacheScanHandler(w http.ResponseWriter, r *http.Request) {
|
func (g *Gateway) cacheScanHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
client := g.getOlricClient()
|
if g.olricClient == nil {
|
||||||
if client == nil {
|
|
||||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -429,8 +423,8 @@ func (g *Gateway) cacheScanHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
olricCluster := client.GetClient()
|
client := g.olricClient.GetClient()
|
||||||
dm, err := olricCluster.NewDMap(req.DMap)
|
dm, err := client.NewDMap(req.DMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||||
return
|
return
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -26,18 +25,11 @@ import (
|
|||||||
_ "github.com/rqlite/gorqlite/stdlib"
|
_ "github.com/rqlite/gorqlite/stdlib"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
olricInitMaxAttempts = 5
|
|
||||||
olricInitInitialBackoff = 500 * time.Millisecond
|
|
||||||
olricInitMaxBackoff = 5 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config holds configuration for the gateway server
|
// Config holds configuration for the gateway server
|
||||||
type Config struct {
|
type Config struct {
|
||||||
ListenAddr string
|
ListenAddr string
|
||||||
ClientNamespace string
|
ClientNamespace string
|
||||||
BootstrapPeers []string
|
BootstrapPeers []string
|
||||||
NodePeerID string // The node's actual peer ID from its identity file
|
|
||||||
|
|
||||||
// Optional DSN for rqlite database/sql driver, e.g. "http://localhost:4001"
|
// Optional DSN for rqlite database/sql driver, e.g. "http://localhost:4001"
|
||||||
// If empty, defaults to "http://localhost:4001".
|
// If empty, defaults to "http://localhost:4001".
|
||||||
@ -46,7 +38,7 @@ type Config struct {
|
|||||||
// HTTPS configuration
|
// HTTPS configuration
|
||||||
EnableHTTPS bool // Enable HTTPS with ACME (Let's Encrypt)
|
EnableHTTPS bool // Enable HTTPS with ACME (Let's Encrypt)
|
||||||
DomainName string // Domain name for HTTPS certificate
|
DomainName string // Domain name for HTTPS certificate
|
||||||
TLSCacheDir string // Directory to cache TLS certificates (default: ~/.orama/tls-cache)
|
TLSCacheDir string // Directory to cache TLS certificates (default: ~/.debros/tls-cache)
|
||||||
|
|
||||||
// Olric cache configuration
|
// Olric cache configuration
|
||||||
OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"]
|
OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"]
|
||||||
@ -64,7 +56,6 @@ type Gateway struct {
|
|||||||
logger *logging.ColoredLogger
|
logger *logging.ColoredLogger
|
||||||
cfg *Config
|
cfg *Config
|
||||||
client client.NetworkClient
|
client client.NetworkClient
|
||||||
nodePeerID string // The node's actual peer ID from its identity file (overrides client's peer ID)
|
|
||||||
startedAt time.Time
|
startedAt time.Time
|
||||||
signingKey *rsa.PrivateKey
|
signingKey *rsa.PrivateKey
|
||||||
keyID string
|
keyID string
|
||||||
@ -76,7 +67,6 @@ type Gateway struct {
|
|||||||
|
|
||||||
// Olric cache client
|
// Olric cache client
|
||||||
olricClient *olric.Client
|
olricClient *olric.Client
|
||||||
olricMu sync.RWMutex
|
|
||||||
|
|
||||||
// IPFS storage client
|
// IPFS storage client
|
||||||
ipfsClient ipfs.IPFSClient
|
ipfsClient ipfs.IPFSClient
|
||||||
@ -117,7 +107,7 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
|
|||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentClient, "Network client connected",
|
logger.ComponentInfo(logging.ComponentClient, "Network client connected",
|
||||||
zap.String("namespace", cliCfg.AppName),
|
zap.String("namespace", cliCfg.AppName),
|
||||||
zap.Int("peer_count", len(cliCfg.BootstrapPeers)),
|
zap.Int("bootstrap_peer_count", len(cliCfg.BootstrapPeers)),
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Creating gateway instance...")
|
logger.ComponentInfo(logging.ComponentGeneral, "Creating gateway instance...")
|
||||||
@ -125,7 +115,6 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
|
|||||||
logger: logger,
|
logger: logger,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
client: c,
|
client: c,
|
||||||
nodePeerID: cfg.NodePeerID,
|
|
||||||
startedAt: time.Now(),
|
startedAt: time.Now(),
|
||||||
localSubscribers: make(map[string][]*localSubscriber),
|
localSubscribers: make(map[string][]*localSubscriber),
|
||||||
}
|
}
|
||||||
@ -193,12 +182,11 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
|
|||||||
Servers: olricServers,
|
Servers: olricServers,
|
||||||
Timeout: cfg.OlricTimeout,
|
Timeout: cfg.OlricTimeout,
|
||||||
}
|
}
|
||||||
olricClient, olricErr := initializeOlricClientWithRetry(olricCfg, logger)
|
olricClient, olricErr := olric.NewClient(olricCfg, logger.Logger)
|
||||||
if olricErr != nil {
|
if olricErr != nil {
|
||||||
logger.ComponentWarn(logging.ComponentGeneral, "failed to initialize Olric cache client; cache endpoints disabled", zap.Error(olricErr))
|
logger.ComponentWarn(logging.ComponentGeneral, "failed to initialize Olric cache client; cache endpoints disabled", zap.Error(olricErr))
|
||||||
gw.startOlricReconnectLoop(olricCfg)
|
|
||||||
} else {
|
} else {
|
||||||
gw.setOlricClient(olricClient)
|
gw.olricClient = olricClient
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client ready",
|
logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client ready",
|
||||||
zap.Strings("servers", olricCfg.Servers),
|
zap.Strings("servers", olricCfg.Servers),
|
||||||
zap.Duration("timeout", olricCfg.Timeout),
|
zap.Duration("timeout", olricCfg.Timeout),
|
||||||
@ -317,10 +305,10 @@ func (g *Gateway) Close() {
|
|||||||
if g.sqlDB != nil {
|
if g.sqlDB != nil {
|
||||||
_ = g.sqlDB.Close()
|
_ = g.sqlDB.Close()
|
||||||
}
|
}
|
||||||
if client := g.getOlricClient(); client != nil {
|
if g.olricClient != nil {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := client.Close(ctx); err != nil {
|
if err := g.olricClient.Close(ctx); err != nil {
|
||||||
g.logger.ComponentWarn(logging.ComponentGeneral, "error during Olric client close", zap.Error(err))
|
g.logger.ComponentWarn(logging.ComponentGeneral, "error during Olric client close", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -342,78 +330,6 @@ func (g *Gateway) getLocalSubscribers(topic, namespace string) []*localSubscribe
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *Gateway) setOlricClient(client *olric.Client) {
|
|
||||||
g.olricMu.Lock()
|
|
||||||
defer g.olricMu.Unlock()
|
|
||||||
g.olricClient = client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Gateway) getOlricClient() *olric.Client {
|
|
||||||
g.olricMu.RLock()
|
|
||||||
defer g.olricMu.RUnlock()
|
|
||||||
return g.olricClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *Gateway) startOlricReconnectLoop(cfg olric.Config) {
|
|
||||||
go func() {
|
|
||||||
retryDelay := 5 * time.Second
|
|
||||||
for {
|
|
||||||
client, err := initializeOlricClientWithRetry(cfg, g.logger)
|
|
||||||
if err == nil {
|
|
||||||
g.setOlricClient(client)
|
|
||||||
g.logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client connected after background retries",
|
|
||||||
zap.Strings("servers", cfg.Servers),
|
|
||||||
zap.Duration("timeout", cfg.Timeout))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
g.logger.ComponentWarn(logging.ComponentGeneral, "Olric cache client reconnect failed",
|
|
||||||
zap.Duration("retry_in", retryDelay),
|
|
||||||
zap.Error(err))
|
|
||||||
|
|
||||||
time.Sleep(retryDelay)
|
|
||||||
if retryDelay < olricInitMaxBackoff {
|
|
||||||
retryDelay *= 2
|
|
||||||
if retryDelay > olricInitMaxBackoff {
|
|
||||||
retryDelay = olricInitMaxBackoff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func initializeOlricClientWithRetry(cfg olric.Config, logger *logging.ColoredLogger) (*olric.Client, error) {
|
|
||||||
backoff := olricInitInitialBackoff
|
|
||||||
|
|
||||||
for attempt := 1; attempt <= olricInitMaxAttempts; attempt++ {
|
|
||||||
client, err := olric.NewClient(cfg, logger.Logger)
|
|
||||||
if err == nil {
|
|
||||||
if attempt > 1 {
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client initialized after retries",
|
|
||||||
zap.Int("attempts", attempt))
|
|
||||||
}
|
|
||||||
return client, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.ComponentWarn(logging.ComponentGeneral, "Olric cache client init attempt failed",
|
|
||||||
zap.Int("attempt", attempt),
|
|
||||||
zap.Duration("retry_in", backoff),
|
|
||||||
zap.Error(err))
|
|
||||||
|
|
||||||
if attempt == olricInitMaxAttempts {
|
|
||||||
return nil, fmt.Errorf("failed to initialize Olric cache client after %d attempts: %w", attempt, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(backoff)
|
|
||||||
backoff *= 2
|
|
||||||
if backoff > olricInitMaxBackoff {
|
|
||||||
backoff = olricInitMaxBackoff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("failed to initialize Olric cache client")
|
|
||||||
}
|
|
||||||
|
|
||||||
// discoverOlricServers discovers Olric server addresses from LibP2P peers
|
// discoverOlricServers discovers Olric server addresses from LibP2P peers
|
||||||
// Returns a list of IP:port addresses where Olric servers are expected to run (port 3320)
|
// Returns a list of IP:port addresses where Olric servers are expected to run (port 3320)
|
||||||
func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger) []string {
|
func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger) []string {
|
||||||
@ -468,10 +384,10 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also check peers from config
|
// Also check bootstrap peers from config
|
||||||
if cfg := networkClient.Config(); cfg != nil {
|
if cfg := networkClient.Config(); cfg != nil {
|
||||||
for _, peerAddr := range cfg.BootstrapPeers {
|
for _, bootstrapAddr := range cfg.BootstrapPeers {
|
||||||
ma, err := multiaddr.NewMultiaddr(peerAddr)
|
ma, err := multiaddr.NewMultiaddr(bootstrapAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -517,7 +433,7 @@ type ipfsDiscoveryResult struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// discoverIPFSFromNodeConfigs discovers IPFS configuration from node.yaml files
|
// discoverIPFSFromNodeConfigs discovers IPFS configuration from node.yaml files
|
||||||
// Checks node-1.yaml through node-5.yaml for IPFS configuration
|
// Checks bootstrap.yaml first, then bootstrap2.yaml, node.yaml, node2.yaml, node3.yaml, node4.yaml
|
||||||
func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
|
func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
|
||||||
homeDir, err := os.UserHomeDir()
|
homeDir, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -525,10 +441,10 @@ func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
|
|||||||
return ipfsDiscoveryResult{}
|
return ipfsDiscoveryResult{}
|
||||||
}
|
}
|
||||||
|
|
||||||
configDir := filepath.Join(homeDir, ".orama")
|
configDir := filepath.Join(homeDir, ".debros")
|
||||||
|
|
||||||
// Try all node config files for IPFS settings
|
// Try bootstrap.yaml first, then bootstrap2.yaml, node.yaml, node2.yaml, node3.yaml, node4.yaml
|
||||||
configFiles := []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"}
|
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"}
|
||||||
|
|
||||||
for _, filename := range configFiles {
|
for _, filename := range configFiles {
|
||||||
configPath := filepath.Join(configDir, filename)
|
configPath := filepath.Join(configDir, filename)
|
||||||
|
|||||||
@ -1,257 +0,0 @@
|
|||||||
package gateway
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httputil"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
"github.com/go-chi/chi/v5/middleware"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/config"
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTTPGateway is the main reverse proxy router
|
|
||||||
type HTTPGateway struct {
|
|
||||||
logger *logging.ColoredLogger
|
|
||||||
config *config.HTTPGatewayConfig
|
|
||||||
router chi.Router
|
|
||||||
reverseProxies map[string]*httputil.ReverseProxy
|
|
||||||
mu sync.RWMutex
|
|
||||||
server *http.Server
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPGateway creates a new HTTP reverse proxy gateway
|
|
||||||
func NewHTTPGateway(logger *logging.ColoredLogger, cfg *config.HTTPGatewayConfig) (*HTTPGateway, error) {
|
|
||||||
if !cfg.Enabled {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if logger == nil {
|
|
||||||
var err error
|
|
||||||
logger, err = logging.NewColoredLogger(logging.ComponentGeneral, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create logger: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
gateway := &HTTPGateway{
|
|
||||||
logger: logger,
|
|
||||||
config: cfg,
|
|
||||||
router: chi.NewRouter(),
|
|
||||||
reverseProxies: make(map[string]*httputil.ReverseProxy),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up router middleware
|
|
||||||
gateway.router.Use(middleware.RequestID)
|
|
||||||
gateway.router.Use(middleware.Logger)
|
|
||||||
gateway.router.Use(middleware.Recoverer)
|
|
||||||
gateway.router.Use(middleware.Timeout(30 * time.Second))
|
|
||||||
|
|
||||||
// Add health check endpoint
|
|
||||||
gateway.router.Get("/health", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
fmt.Fprintf(w, `{"status":"ok","node":"%s"}`, cfg.NodeName)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Initialize reverse proxies and routes
|
|
||||||
if err := gateway.initializeRoutes(); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to initialize routes: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gateway.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway initialized",
|
|
||||||
zap.String("node_name", cfg.NodeName),
|
|
||||||
zap.String("listen_addr", cfg.ListenAddr),
|
|
||||||
zap.Int("routes", len(cfg.Routes)),
|
|
||||||
)
|
|
||||||
|
|
||||||
return gateway, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializeRoutes sets up all reverse proxy routes
|
|
||||||
func (hg *HTTPGateway) initializeRoutes() error {
|
|
||||||
hg.mu.Lock()
|
|
||||||
defer hg.mu.Unlock()
|
|
||||||
|
|
||||||
for routeName, routeConfig := range hg.config.Routes {
|
|
||||||
// Validate backend URL
|
|
||||||
_, err := url.Parse(routeConfig.BackendURL)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid backend URL for route %s: %w", routeName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create reverse proxy with custom transport
|
|
||||||
proxy := &httputil.ReverseProxy{
|
|
||||||
Rewrite: func(r *httputil.ProxyRequest) {
|
|
||||||
// Keep original host for Host header
|
|
||||||
r.Out.Host = r.In.Host
|
|
||||||
// Set X-Forwarded-For header for logging
|
|
||||||
r.Out.Header.Set("X-Forwarded-For", getClientIP(r.In))
|
|
||||||
},
|
|
||||||
ErrorHandler: hg.proxyErrorHandler(routeName),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set timeout on transport
|
|
||||||
if routeConfig.Timeout > 0 {
|
|
||||||
proxy.Transport = &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: routeConfig.Timeout,
|
|
||||||
}).Dial,
|
|
||||||
ResponseHeaderTimeout: routeConfig.Timeout,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hg.reverseProxies[routeName] = proxy
|
|
||||||
|
|
||||||
// Register route handler
|
|
||||||
hg.registerRouteHandler(routeName, routeConfig, proxy)
|
|
||||||
|
|
||||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "Route initialized",
|
|
||||||
zap.String("name", routeName),
|
|
||||||
zap.String("path", routeConfig.PathPrefix),
|
|
||||||
zap.String("backend", routeConfig.BackendURL),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// registerRouteHandler registers a route handler with the router
|
|
||||||
func (hg *HTTPGateway) registerRouteHandler(name string, routeConfig config.RouteConfig, proxy *httputil.ReverseProxy) {
|
|
||||||
pathPrefix := strings.TrimSuffix(routeConfig.PathPrefix, "/")
|
|
||||||
|
|
||||||
// Use Mount instead of Route for wildcard path handling
|
|
||||||
hg.router.Mount(pathPrefix, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
|
||||||
hg.handleProxyRequest(w, req, routeConfig, proxy)
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleProxyRequest handles a reverse proxy request
|
|
||||||
func (hg *HTTPGateway) handleProxyRequest(w http.ResponseWriter, req *http.Request, routeConfig config.RouteConfig, proxy *httputil.ReverseProxy) {
|
|
||||||
// Strip path prefix before forwarding
|
|
||||||
originalPath := req.URL.Path
|
|
||||||
pathPrefix := strings.TrimSuffix(routeConfig.PathPrefix, "/")
|
|
||||||
|
|
||||||
if strings.HasPrefix(req.URL.Path, pathPrefix) {
|
|
||||||
// Remove the prefix but keep leading slash
|
|
||||||
strippedPath := strings.TrimPrefix(req.URL.Path, pathPrefix)
|
|
||||||
if strippedPath == "" {
|
|
||||||
strippedPath = "/"
|
|
||||||
}
|
|
||||||
req.URL.Path = strippedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update request URL to point to backend
|
|
||||||
backendURL, _ := url.Parse(routeConfig.BackendURL)
|
|
||||||
req.URL.Scheme = backendURL.Scheme
|
|
||||||
req.URL.Host = backendURL.Host
|
|
||||||
|
|
||||||
// Log the proxy request
|
|
||||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "Proxy request",
|
|
||||||
zap.String("original_path", originalPath),
|
|
||||||
zap.String("stripped_path", req.URL.Path),
|
|
||||||
zap.String("backend", routeConfig.BackendURL),
|
|
||||||
zap.String("method", req.Method),
|
|
||||||
zap.String("client_ip", getClientIP(req)),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Handle WebSocket upgrades if configured
|
|
||||||
if routeConfig.WebSocket && isWebSocketRequest(req) {
|
|
||||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "WebSocket upgrade detected",
|
|
||||||
zap.String("path", originalPath),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Forward the request
|
|
||||||
proxy.ServeHTTP(w, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// proxyErrorHandler returns an error handler for the reverse proxy
|
|
||||||
func (hg *HTTPGateway) proxyErrorHandler(routeName string) func(http.ResponseWriter, *http.Request, error) {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request, err error) {
|
|
||||||
hg.logger.ComponentError(logging.ComponentGeneral, "Proxy error",
|
|
||||||
zap.String("route", routeName),
|
|
||||||
zap.String("path", r.URL.Path),
|
|
||||||
zap.String("method", r.Method),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
w.WriteHeader(http.StatusBadGateway)
|
|
||||||
fmt.Fprintf(w, `{"error":"gateway error","route":"%s","detail":"%s"}`, routeName, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the HTTP gateway server
|
|
||||||
func (hg *HTTPGateway) Start(ctx context.Context) error {
|
|
||||||
if hg == nil || !hg.config.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
hg.server = &http.Server{
|
|
||||||
Addr: hg.config.ListenAddr,
|
|
||||||
Handler: hg.router,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Listen for connections
|
|
||||||
listener, err := net.Listen("tcp", hg.config.ListenAddr)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to listen on %s: %w", hg.config.ListenAddr, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway server starting",
|
|
||||||
zap.String("node_name", hg.config.NodeName),
|
|
||||||
zap.String("listen_addr", hg.config.ListenAddr),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Serve in a goroutine
|
|
||||||
go func() {
|
|
||||||
if err := hg.server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
|
||||||
hg.logger.ComponentError(logging.ComponentGeneral, "HTTP Gateway server error", zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait for context cancellation
|
|
||||||
<-ctx.Done()
|
|
||||||
return hg.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully stops the HTTP gateway server
|
|
||||||
func (hg *HTTPGateway) Stop() error {
|
|
||||||
if hg == nil || hg.server == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway shutting down")
|
|
||||||
|
|
||||||
if err := hg.server.Shutdown(ctx); err != nil {
|
|
||||||
hg.logger.ComponentError(logging.ComponentGeneral, "HTTP Gateway shutdown error", zap.Error(err))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway shutdown complete")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Router returns the chi router for testing or extension
|
|
||||||
func (hg *HTTPGateway) Router() chi.Router {
|
|
||||||
return hg.router
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWebSocketRequest checks if a request is a WebSocket upgrade request
|
|
||||||
func isWebSocketRequest(r *http.Request) bool {
|
|
||||||
return r.Header.Get("Connection") == "Upgrade" &&
|
|
||||||
r.Header.Get("Upgrade") == "websocket"
|
|
||||||
}
|
|
||||||
@ -1,237 +0,0 @@
|
|||||||
package gateway
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"golang.org/x/crypto/acme"
|
|
||||||
"golang.org/x/crypto/acme/autocert"
|
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/config"
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTTPSGateway extends HTTPGateway with HTTPS/TLS support
|
|
||||||
type HTTPSGateway struct {
|
|
||||||
*HTTPGateway
|
|
||||||
httpsConfig *config.HTTPSConfig
|
|
||||||
certManager *autocert.Manager
|
|
||||||
httpsServer *http.Server
|
|
||||||
httpServer *http.Server // For ACME challenge and redirect
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPSGateway creates a new HTTPS gateway with Let's Encrypt autocert
|
|
||||||
func NewHTTPSGateway(logger *logging.ColoredLogger, cfg *config.HTTPGatewayConfig) (*HTTPSGateway, error) {
|
|
||||||
// First create the base HTTP gateway
|
|
||||||
base, err := NewHTTPGateway(logger, cfg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if base == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !cfg.HTTPS.Enabled {
|
|
||||||
// Return base gateway wrapped in HTTPSGateway for consistent interface
|
|
||||||
return &HTTPSGateway{HTTPGateway: base}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
gateway := &HTTPSGateway{
|
|
||||||
HTTPGateway: base,
|
|
||||||
httpsConfig: &cfg.HTTPS,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if using self-signed certificates or Let's Encrypt
|
|
||||||
if cfg.HTTPS.UseSelfSigned || (cfg.HTTPS.CertFile != "" && cfg.HTTPS.KeyFile != "") {
|
|
||||||
// Using self-signed or pre-existing certificates
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Using self-signed or pre-configured certificates for HTTPS",
|
|
||||||
zap.String("domain", cfg.HTTPS.Domain),
|
|
||||||
zap.String("cert_file", cfg.HTTPS.CertFile),
|
|
||||||
zap.String("key_file", cfg.HTTPS.KeyFile),
|
|
||||||
)
|
|
||||||
// Don't set certManager - will use CertFile/KeyFile from config
|
|
||||||
} else if cfg.HTTPS.AutoCert {
|
|
||||||
// Use Let's Encrypt STAGING (consistent with SNI gateway)
|
|
||||||
cacheDir := cfg.HTTPS.CacheDir
|
|
||||||
if cacheDir == "" {
|
|
||||||
cacheDir = "/home/debros/.orama/tls-cache"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use Let's Encrypt STAGING - provides higher rate limits for testing/development
|
|
||||||
directoryURL := "https://acme-staging-v02.api.letsencrypt.org/directory"
|
|
||||||
logger.ComponentWarn(logging.ComponentGeneral,
|
|
||||||
"Using Let's Encrypt STAGING - certificates will not be trusted by production clients",
|
|
||||||
zap.String("domain", cfg.HTTPS.Domain),
|
|
||||||
)
|
|
||||||
|
|
||||||
gateway.certManager = &autocert.Manager{
|
|
||||||
Prompt: autocert.AcceptTOS,
|
|
||||||
HostPolicy: autocert.HostWhitelist(cfg.HTTPS.Domain),
|
|
||||||
Cache: autocert.DirCache(cacheDir),
|
|
||||||
Email: cfg.HTTPS.Email,
|
|
||||||
Client: &acme.Client{
|
|
||||||
DirectoryURL: directoryURL,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "Let's Encrypt autocert configured",
|
|
||||||
zap.String("domain", cfg.HTTPS.Domain),
|
|
||||||
zap.String("cache_dir", cacheDir),
|
|
||||||
zap.String("acme_environment", "staging"),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return gateway, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts both HTTP (for ACME) and HTTPS servers
|
|
||||||
func (g *HTTPSGateway) Start(ctx context.Context) error {
|
|
||||||
if g == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If HTTPS is not enabled, just start the base HTTP gateway
|
|
||||||
if !g.httpsConfig.Enabled {
|
|
||||||
return g.HTTPGateway.Start(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
httpPort := g.httpsConfig.HTTPPort
|
|
||||||
if httpPort == 0 {
|
|
||||||
httpPort = 80
|
|
||||||
}
|
|
||||||
httpsPort := g.httpsConfig.HTTPSPort
|
|
||||||
if httpsPort == 0 {
|
|
||||||
httpsPort = 443
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start HTTP server for ACME challenge and redirect
|
|
||||||
g.httpServer = &http.Server{
|
|
||||||
Addr: fmt.Sprintf(":%d", httpPort),
|
|
||||||
Handler: g.httpHandler(),
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTP server starting (ACME/redirect)",
|
|
||||||
zap.Int("port", httpPort),
|
|
||||||
)
|
|
||||||
if err := g.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
|
||||||
g.logger.ComponentError(logging.ComponentGeneral, "HTTP server error", zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Set up TLS config
|
|
||||||
tlsConfig := &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
}
|
|
||||||
|
|
||||||
if g.certManager != nil {
|
|
||||||
tlsConfig.GetCertificate = g.certManager.GetCertificate
|
|
||||||
} else if g.httpsConfig.CertFile != "" && g.httpsConfig.KeyFile != "" {
|
|
||||||
cert, err := tls.LoadX509KeyPair(g.httpsConfig.CertFile, g.httpsConfig.KeyFile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to load TLS certificate: %w", err)
|
|
||||||
}
|
|
||||||
tlsConfig.Certificates = []tls.Certificate{cert}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("HTTPS enabled but no certificate source configured")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start HTTPS server
|
|
||||||
g.httpsServer = &http.Server{
|
|
||||||
Addr: fmt.Sprintf(":%d", httpsPort),
|
|
||||||
Handler: g.router,
|
|
||||||
TLSConfig: tlsConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
listener, err := tls.Listen("tcp", g.httpsServer.Addr, tlsConfig)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create TLS listener: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway starting",
|
|
||||||
zap.String("domain", g.httpsConfig.Domain),
|
|
||||||
zap.Int("port", httpsPort),
|
|
||||||
)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := g.httpsServer.Serve(listener); err != nil && err != http.ErrServerClosed {
|
|
||||||
g.logger.ComponentError(logging.ComponentGeneral, "HTTPS server error", zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait for context cancellation
|
|
||||||
<-ctx.Done()
|
|
||||||
return g.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// httpHandler returns a handler for the HTTP server (ACME challenge + redirect)
|
|
||||||
func (g *HTTPSGateway) httpHandler() http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Handle ACME challenge
|
|
||||||
if g.certManager != nil && strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") {
|
|
||||||
g.certManager.HTTPHandler(nil).ServeHTTP(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Redirect HTTP to HTTPS
|
|
||||||
httpsPort := g.httpsConfig.HTTPSPort
|
|
||||||
if httpsPort == 0 {
|
|
||||||
httpsPort = 443
|
|
||||||
}
|
|
||||||
|
|
||||||
target := "https://" + r.Host + r.URL.RequestURI()
|
|
||||||
if httpsPort != 443 {
|
|
||||||
host := r.Host
|
|
||||||
if idx := strings.LastIndex(host, ":"); idx > 0 {
|
|
||||||
host = host[:idx]
|
|
||||||
}
|
|
||||||
target = fmt.Sprintf("https://%s:%d%s", host, httpsPort, r.URL.RequestURI())
|
|
||||||
}
|
|
||||||
|
|
||||||
http.Redirect(w, r, target, http.StatusMovedPermanently)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully stops both HTTP and HTTPS servers
|
|
||||||
func (g *HTTPSGateway) Stop() error {
|
|
||||||
if g == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway shutting down")
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
var errs []error
|
|
||||||
|
|
||||||
if g.httpServer != nil {
|
|
||||||
if err := g.httpServer.Shutdown(ctx); err != nil {
|
|
||||||
errs = append(errs, fmt.Errorf("HTTP server shutdown: %w", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if g.httpsServer != nil {
|
|
||||||
if err := g.httpsServer.Shutdown(ctx); err != nil {
|
|
||||||
errs = append(errs, fmt.Errorf("HTTPS server shutdown: %w", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if g.HTTPGateway.server != nil {
|
|
||||||
if err := g.HTTPGateway.Stop(); err != nil {
|
|
||||||
errs = append(errs, fmt.Errorf("base gateway shutdown: %w", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errs) > 0 {
|
|
||||||
return fmt.Errorf("shutdown errors: %v", errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway shutdown complete")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -178,13 +178,8 @@ func extractAPIKey(r *http.Request) string {
|
|||||||
|
|
||||||
// isPublicPath returns true for routes that should be accessible without API key auth
|
// isPublicPath returns true for routes that should be accessible without API key auth
|
||||||
func isPublicPath(p string) bool {
|
func isPublicPath(p string) bool {
|
||||||
// Allow ACME challenges for Let's Encrypt certificate provisioning
|
|
||||||
if strings.HasPrefix(p, "/.well-known/acme-challenge/") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
switch p {
|
switch p {
|
||||||
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key", "/v1/auth/simple-key", "/v1/network/status", "/v1/network/peers":
|
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key", "/v1/auth/simple-key":
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
|
|||||||
@ -1,183 +0,0 @@
|
|||||||
package gateway
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PushNotificationService handles sending push notifications via Expo
|
|
||||||
type PushNotificationService struct {
|
|
||||||
logger *zap.Logger
|
|
||||||
client *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpoTicket represents the response from Expo API
|
|
||||||
type ExpoTicket struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Error string `json:"error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpoPushMessage represents a message to send via Expo
|
|
||||||
type ExpoPushMessage struct {
|
|
||||||
To string `json:"to"`
|
|
||||||
Title string `json:"title"`
|
|
||||||
Body string `json:"body"`
|
|
||||||
Data map[string]interface{} `json:"data,omitempty"`
|
|
||||||
Sound string `json:"sound,omitempty"`
|
|
||||||
Badge int `json:"badge,omitempty"`
|
|
||||||
Priority string `json:"priority,omitempty"`
|
|
||||||
// iOS specific
|
|
||||||
MutableContent bool `json:"mutableContent,omitempty"`
|
|
||||||
IosIcon string `json:"iosIcon,omitempty"`
|
|
||||||
// Android specific
|
|
||||||
AndroidBigLargeIcon string `json:"androidBigLargeIcon,omitempty"`
|
|
||||||
ChannelID string `json:"channelId,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPushNotificationService creates a new push notification service
|
|
||||||
func NewPushNotificationService(logger *zap.Logger) *PushNotificationService {
|
|
||||||
return &PushNotificationService{
|
|
||||||
logger: logger,
|
|
||||||
client: &http.Client{
|
|
||||||
Timeout: 10 * time.Second,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendNotification sends a push notification via Expo
|
|
||||||
func (pns *PushNotificationService) SendNotification(
|
|
||||||
ctx context.Context,
|
|
||||||
expoPushToken string,
|
|
||||||
title string,
|
|
||||||
body string,
|
|
||||||
data map[string]interface{},
|
|
||||||
avatarURL string,
|
|
||||||
) error {
|
|
||||||
if expoPushToken == "" {
|
|
||||||
return fmt.Errorf("empty expo push token")
|
|
||||||
}
|
|
||||||
|
|
||||||
message := ExpoPushMessage{
|
|
||||||
To: expoPushToken,
|
|
||||||
Title: title,
|
|
||||||
Body: body,
|
|
||||||
Data: data,
|
|
||||||
Sound: "default",
|
|
||||||
Priority: "high",
|
|
||||||
// Enable mutable content for iOS to allow Notification Service Extension
|
|
||||||
MutableContent: true,
|
|
||||||
ChannelID: "messages",
|
|
||||||
AndroidBigLargeIcon: avatarURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
// For iOS, include avatar in data so Notification Service Extension can fetch it
|
|
||||||
if avatarURL != "" {
|
|
||||||
if message.Data == nil {
|
|
||||||
message.Data = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
message.Data["avatar_url"] = avatarURL
|
|
||||||
}
|
|
||||||
|
|
||||||
return pns.sendExpoRequest(ctx, message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendBulkNotifications sends notifications to multiple users
|
|
||||||
func (pns *PushNotificationService) SendBulkNotifications(
|
|
||||||
ctx context.Context,
|
|
||||||
expoPushTokens []string,
|
|
||||||
title string,
|
|
||||||
body string,
|
|
||||||
data map[string]interface{},
|
|
||||||
avatarURL string,
|
|
||||||
) []error {
|
|
||||||
errors := make([]error, 0)
|
|
||||||
|
|
||||||
for _, token := range expoPushTokens {
|
|
||||||
if err := pns.SendNotification(ctx, token, title, body, data, avatarURL); err != nil {
|
|
||||||
errors = append(errors, fmt.Errorf("failed to send to token %s: %w", token, err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendExpoRequest sends a request to the Expo push notification API
|
|
||||||
func (pns *PushNotificationService) sendExpoRequest(ctx context.Context, message ExpoPushMessage) error {
|
|
||||||
const expoAPIURL = "https://exp.host/--/api/v2/push/send"
|
|
||||||
|
|
||||||
body, err := json.Marshal(message)
|
|
||||||
if err != nil {
|
|
||||||
pns.logger.Error("failed to marshal push notification",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("to", message.To))
|
|
||||||
return fmt.Errorf("marshal error: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, expoAPIURL, bytes.NewBuffer(body))
|
|
||||||
if err != nil {
|
|
||||||
pns.logger.Error("failed to create push notification request",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("to", message.To))
|
|
||||||
return fmt.Errorf("request creation error: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp, err := pns.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
pns.logger.Error("failed to send push notification",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("to", message.To))
|
|
||||||
return fmt.Errorf("send error: %w", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
respBody, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
pns.logger.Error("failed to read push notification response",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("to", message.To))
|
|
||||||
return fmt.Errorf("response read error: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for API errors
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
pns.logger.Warn("push notification API error",
|
|
||||||
zap.Int("status_code", resp.StatusCode),
|
|
||||||
zap.String("response", string(respBody)),
|
|
||||||
zap.String("to", message.To))
|
|
||||||
return fmt.Errorf("API returned status %d: %s", resp.StatusCode, string(respBody))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse response
|
|
||||||
var tickets []ExpoTicket
|
|
||||||
if err := json.Unmarshal(respBody, &tickets); err != nil {
|
|
||||||
pns.logger.Error("failed to parse push notification response",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("response", string(respBody)))
|
|
||||||
return fmt.Errorf("parse error: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for errors in tickets
|
|
||||||
for _, ticket := range tickets {
|
|
||||||
if ticket.Error != "" {
|
|
||||||
pns.logger.Warn("push notification error in ticket",
|
|
||||||
zap.String("error", ticket.Error),
|
|
||||||
zap.String("to", message.To))
|
|
||||||
return fmt.Errorf("ticket error: %s", ticket.Error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pns.logger.Info("push notification sent successfully",
|
|
||||||
zap.String("to", message.To),
|
|
||||||
zap.String("title", message.Title))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -386,11 +386,6 @@ func (g *Gateway) networkStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
writeError(w, http.StatusInternalServerError, err.Error())
|
writeError(w, http.StatusInternalServerError, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Override with the node's actual peer ID if available
|
|
||||||
// (the client's embedded host has a different temporary peer ID)
|
|
||||||
if g.nodePeerID != "" {
|
|
||||||
status.PeerID = g.nodePeerID
|
|
||||||
}
|
|
||||||
writeJSON(w, http.StatusOK, status)
|
writeJSON(w, http.StatusOK, status)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,211 +0,0 @@
|
|||||||
package gateway
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/config"
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TCPSNIGateway handles SNI-based TCP routing for services like RQLite Raft, IPFS, etc.
|
|
||||||
type TCPSNIGateway struct {
|
|
||||||
logger *logging.ColoredLogger
|
|
||||||
config *config.SNIConfig
|
|
||||||
listener net.Listener
|
|
||||||
routes map[string]string
|
|
||||||
mu sync.RWMutex
|
|
||||||
running bool
|
|
||||||
ctx context.Context
|
|
||||||
cancel context.CancelFunc
|
|
||||||
wg sync.WaitGroup
|
|
||||||
tlsConfig *tls.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTCPSNIGateway creates a new TCP SNI-based gateway
|
|
||||||
func NewTCPSNIGateway(logger *logging.ColoredLogger, cfg *config.SNIConfig) (*TCPSNIGateway, error) {
|
|
||||||
if !cfg.Enabled {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if logger == nil {
|
|
||||||
var err error
|
|
||||||
logger, err = logging.NewColoredLogger(logging.ComponentGeneral, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create logger: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to load TLS certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
gateway := &TCPSNIGateway{
|
|
||||||
logger: logger,
|
|
||||||
config: cfg,
|
|
||||||
routes: make(map[string]string),
|
|
||||||
ctx: ctx,
|
|
||||||
cancel: cancel,
|
|
||||||
tlsConfig: &tls.Config{
|
|
||||||
Certificates: []tls.Certificate{cert},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for hostname, backend := range cfg.Routes {
|
|
||||||
gateway.routes[strings.ToLower(hostname)] = backend
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway initialized",
|
|
||||||
zap.String("listen_addr", cfg.ListenAddr),
|
|
||||||
zap.Int("routes", len(cfg.Routes)),
|
|
||||||
)
|
|
||||||
|
|
||||||
return gateway, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the TCP SNI gateway server
|
|
||||||
func (g *TCPSNIGateway) Start(ctx context.Context) error {
|
|
||||||
if g == nil || !g.config.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
listener, err := tls.Listen("tcp", g.config.ListenAddr, g.tlsConfig)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to listen on %s: %w", g.config.ListenAddr, err)
|
|
||||||
}
|
|
||||||
g.listener = listener
|
|
||||||
g.running = true
|
|
||||||
|
|
||||||
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway starting",
|
|
||||||
zap.String("listen_addr", g.config.ListenAddr),
|
|
||||||
)
|
|
||||||
|
|
||||||
g.wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer g.wg.Done()
|
|
||||||
for {
|
|
||||||
conn, err := listener.Accept()
|
|
||||||
if err != nil {
|
|
||||||
select {
|
|
||||||
case <-g.ctx.Done():
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
g.logger.ComponentError(logging.ComponentGeneral, "Accept error", zap.Error(err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g.wg.Add(1)
|
|
||||||
go func(c net.Conn) {
|
|
||||||
defer g.wg.Done()
|
|
||||||
g.handleConnection(c)
|
|
||||||
}(conn)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
case <-g.ctx.Done():
|
|
||||||
}
|
|
||||||
|
|
||||||
return g.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleConnection routes a TCP connection based on SNI
|
|
||||||
func (g *TCPSNIGateway) handleConnection(conn net.Conn) {
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
tlsConn, ok := conn.(*tls.Conn)
|
|
||||||
if !ok {
|
|
||||||
g.logger.ComponentError(logging.ComponentGeneral, "Expected TLS connection")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tlsConn.Handshake(); err != nil {
|
|
||||||
g.logger.ComponentError(logging.ComponentGeneral, "TLS handshake failed", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
serverName := strings.ToLower(tlsConn.ConnectionState().ServerName)
|
|
||||||
if serverName == "" {
|
|
||||||
g.logger.ComponentError(logging.ComponentGeneral, "No SNI provided")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
g.mu.RLock()
|
|
||||||
backend, found := g.routes[serverName]
|
|
||||||
if !found {
|
|
||||||
for prefix, be := range g.routes {
|
|
||||||
if strings.HasPrefix(serverName, prefix+".") {
|
|
||||||
backend = be
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g.mu.RUnlock()
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
g.logger.ComponentError(logging.ComponentGeneral, "No route for SNI",
|
|
||||||
zap.String("server_name", serverName),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
g.logger.ComponentInfo(logging.ComponentGeneral, "Routing connection",
|
|
||||||
zap.String("server_name", serverName),
|
|
||||||
zap.String("backend", backend),
|
|
||||||
)
|
|
||||||
|
|
||||||
backendConn, err := net.DialTimeout("tcp", backend, 10*time.Second)
|
|
||||||
if err != nil {
|
|
||||||
g.logger.ComponentError(logging.ComponentGeneral, "Backend connect failed",
|
|
||||||
zap.String("backend", backend),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer backendConn.Close()
|
|
||||||
|
|
||||||
errc := make(chan error, 2)
|
|
||||||
go func() { _, err := io.Copy(backendConn, tlsConn); errc <- err }()
|
|
||||||
go func() { _, err := io.Copy(tlsConn, backendConn); errc <- err }()
|
|
||||||
<-errc
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully stops the TCP SNI gateway
|
|
||||||
func (g *TCPSNIGateway) Stop() error {
|
|
||||||
if g == nil || !g.running {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway shutting down")
|
|
||||||
g.cancel()
|
|
||||||
|
|
||||||
if g.listener != nil {
|
|
||||||
g.listener.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() { g.wg.Wait(); close(done) }()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
case <-time.After(10 * time.Second):
|
|
||||||
g.logger.ComponentWarn(logging.ComponentGeneral, "Shutdown timeout")
|
|
||||||
}
|
|
||||||
|
|
||||||
g.running = false
|
|
||||||
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway shutdown complete")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@ -1,956 +0,0 @@
|
|||||||
// Package installer provides an interactive TUI installer for Orama Network
|
|
||||||
package installer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/charmbracelet/bubbles/textinput"
|
|
||||||
tea "github.com/charmbracelet/bubbletea"
|
|
||||||
"github.com/charmbracelet/lipgloss"
|
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/certutil"
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// InstallerConfig holds the configuration gathered from the TUI
|
|
||||||
type InstallerConfig struct {
|
|
||||||
VpsIP string
|
|
||||||
Domain string
|
|
||||||
PeerDomain string // Domain of existing node to join
|
|
||||||
PeerIP string // Resolved IP of peer domain (for Raft join)
|
|
||||||
JoinAddress string // Auto-populated: {PeerIP}:7002 (direct RQLite TLS)
|
|
||||||
Peers []string // Auto-populated: /dns4/{PeerDomain}/tcp/4001/p2p/{PeerID}
|
|
||||||
ClusterSecret string
|
|
||||||
SwarmKeyHex string // 64-hex IPFS swarm key (for joining private network)
|
|
||||||
IPFSPeerID string // IPFS peer ID (auto-discovered from peer domain)
|
|
||||||
IPFSSwarmAddrs []string // IPFS swarm addresses (auto-discovered from peer domain)
|
|
||||||
// IPFS Cluster peer info for cluster discovery
|
|
||||||
IPFSClusterPeerID string // IPFS Cluster peer ID (auto-discovered from peer domain)
|
|
||||||
IPFSClusterAddrs []string // IPFS Cluster addresses (auto-discovered from peer domain)
|
|
||||||
Branch string
|
|
||||||
IsFirstNode bool
|
|
||||||
NoPull bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step represents a step in the installation wizard
|
|
||||||
type Step int
|
|
||||||
|
|
||||||
const (
|
|
||||||
StepWelcome Step = iota
|
|
||||||
StepNodeType
|
|
||||||
StepVpsIP
|
|
||||||
StepDomain
|
|
||||||
StepPeerDomain // Domain of existing node to join (replaces StepJoinAddress)
|
|
||||||
StepClusterSecret
|
|
||||||
StepSwarmKey // 64-hex swarm key for IPFS private network
|
|
||||||
StepBranch
|
|
||||||
StepNoPull
|
|
||||||
StepConfirm
|
|
||||||
StepInstalling
|
|
||||||
StepDone
|
|
||||||
)
|
|
||||||
|
|
||||||
// Model is the bubbletea model for the installer
|
|
||||||
type Model struct {
|
|
||||||
step Step
|
|
||||||
config InstallerConfig
|
|
||||||
textInput textinput.Model
|
|
||||||
err error
|
|
||||||
width int
|
|
||||||
height int
|
|
||||||
installing bool
|
|
||||||
installOutput []string
|
|
||||||
cursor int // For selection menus
|
|
||||||
discovering bool // Whether domain discovery is in progress
|
|
||||||
discoveryInfo string // Info message during discovery
|
|
||||||
discoveredPeer string // Discovered peer ID from domain
|
|
||||||
sniWarning string // Warning about missing SNI DNS records (non-blocking)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Styles
|
|
||||||
var (
|
|
||||||
titleStyle = lipgloss.NewStyle().
|
|
||||||
Bold(true).
|
|
||||||
Foreground(lipgloss.Color("#00D4AA")).
|
|
||||||
MarginBottom(1)
|
|
||||||
|
|
||||||
subtitleStyle = lipgloss.NewStyle().
|
|
||||||
Foreground(lipgloss.Color("#888888")).
|
|
||||||
MarginBottom(1)
|
|
||||||
|
|
||||||
focusedStyle = lipgloss.NewStyle().
|
|
||||||
Foreground(lipgloss.Color("#00D4AA"))
|
|
||||||
|
|
||||||
blurredStyle = lipgloss.NewStyle().
|
|
||||||
Foreground(lipgloss.Color("#666666"))
|
|
||||||
|
|
||||||
cursorStyle = lipgloss.NewStyle().
|
|
||||||
Foreground(lipgloss.Color("#00D4AA"))
|
|
||||||
|
|
||||||
helpStyle = lipgloss.NewStyle().
|
|
||||||
Foreground(lipgloss.Color("#626262")).
|
|
||||||
MarginTop(1)
|
|
||||||
|
|
||||||
errorStyle = lipgloss.NewStyle().
|
|
||||||
Foreground(lipgloss.Color("#FF6B6B")).
|
|
||||||
Bold(true)
|
|
||||||
|
|
||||||
successStyle = lipgloss.NewStyle().
|
|
||||||
Foreground(lipgloss.Color("#00D4AA")).
|
|
||||||
Bold(true)
|
|
||||||
|
|
||||||
boxStyle = lipgloss.NewStyle().
|
|
||||||
Border(lipgloss.RoundedBorder()).
|
|
||||||
BorderForeground(lipgloss.Color("#00D4AA")).
|
|
||||||
Padding(1, 2)
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewModel creates a new installer model
|
|
||||||
func NewModel() Model {
|
|
||||||
ti := textinput.New()
|
|
||||||
ti.Focus()
|
|
||||||
ti.CharLimit = 256
|
|
||||||
ti.Width = 50
|
|
||||||
|
|
||||||
return Model{
|
|
||||||
step: StepWelcome,
|
|
||||||
textInput: ti,
|
|
||||||
config: InstallerConfig{
|
|
||||||
Branch: "main",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init initializes the model
|
|
||||||
func (m Model) Init() tea.Cmd {
|
|
||||||
return textinput.Blink
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update handles messages
|
|
||||||
func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|
||||||
switch msg := msg.(type) {
|
|
||||||
case tea.WindowSizeMsg:
|
|
||||||
m.width = msg.Width
|
|
||||||
m.height = msg.Height
|
|
||||||
return m, nil
|
|
||||||
|
|
||||||
case installCompleteMsg:
|
|
||||||
m.step = StepDone
|
|
||||||
return m, nil
|
|
||||||
|
|
||||||
case tea.KeyMsg:
|
|
||||||
switch msg.String() {
|
|
||||||
case "ctrl+c", "q":
|
|
||||||
if m.step != StepInstalling {
|
|
||||||
return m, tea.Quit
|
|
||||||
}
|
|
||||||
|
|
||||||
case "enter":
|
|
||||||
return m.handleEnter()
|
|
||||||
|
|
||||||
case "up", "k":
|
|
||||||
if m.step == StepNodeType || m.step == StepBranch || m.step == StepNoPull {
|
|
||||||
if m.cursor > 0 {
|
|
||||||
m.cursor--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case "down", "j":
|
|
||||||
if m.step == StepNodeType || m.step == StepBranch || m.step == StepNoPull {
|
|
||||||
if m.cursor < 1 {
|
|
||||||
m.cursor++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case "esc":
|
|
||||||
if m.step > StepWelcome && m.step < StepInstalling {
|
|
||||||
m.step--
|
|
||||||
m.err = nil
|
|
||||||
m.setupStepInput()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update text input for input steps
|
|
||||||
if m.step == StepVpsIP || m.step == StepDomain || m.step == StepPeerDomain || m.step == StepClusterSecret || m.step == StepSwarmKey {
|
|
||||||
var cmd tea.Cmd
|
|
||||||
m.textInput, cmd = m.textInput.Update(msg)
|
|
||||||
return m, cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Model) handleEnter() (tea.Model, tea.Cmd) {
|
|
||||||
switch m.step {
|
|
||||||
case StepWelcome:
|
|
||||||
m.step = StepNodeType
|
|
||||||
m.cursor = 0
|
|
||||||
|
|
||||||
case StepNodeType:
|
|
||||||
m.config.IsFirstNode = m.cursor == 0
|
|
||||||
m.step = StepVpsIP
|
|
||||||
m.setupStepInput()
|
|
||||||
|
|
||||||
case StepVpsIP:
|
|
||||||
ip := strings.TrimSpace(m.textInput.Value())
|
|
||||||
if err := validateIP(ip); err != nil {
|
|
||||||
m.err = err
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
m.config.VpsIP = ip
|
|
||||||
m.err = nil
|
|
||||||
m.step = StepDomain
|
|
||||||
m.setupStepInput()
|
|
||||||
|
|
||||||
case StepDomain:
|
|
||||||
domain := strings.TrimSpace(m.textInput.Value())
|
|
||||||
if err := validateDomain(domain); err != nil {
|
|
||||||
m.err = err
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check SNI DNS records for this domain (non-blocking warning)
|
|
||||||
m.discovering = true
|
|
||||||
m.discoveryInfo = "Checking SNI DNS records for " + domain + "..."
|
|
||||||
|
|
||||||
if warning := validateSNIDNSRecords(domain); warning != "" {
|
|
||||||
// Log warning but continue - SNI DNS is optional for single-node setups
|
|
||||||
m.sniWarning = warning
|
|
||||||
}
|
|
||||||
|
|
||||||
m.discovering = false
|
|
||||||
m.config.Domain = domain
|
|
||||||
m.err = nil
|
|
||||||
|
|
||||||
// Auto-generate self-signed certificates for this domain
|
|
||||||
m.discovering = true
|
|
||||||
m.discoveryInfo = "Generating SSL certificates for " + domain + "..."
|
|
||||||
|
|
||||||
if err := ensureCertificatesForDomain(domain); err != nil {
|
|
||||||
m.discovering = false
|
|
||||||
m.err = fmt.Errorf("failed to generate certificates: %w", err)
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
m.discovering = false
|
|
||||||
|
|
||||||
if m.config.IsFirstNode {
|
|
||||||
m.step = StepBranch
|
|
||||||
m.cursor = 0
|
|
||||||
} else {
|
|
||||||
m.step = StepPeerDomain
|
|
||||||
m.setupStepInput()
|
|
||||||
}
|
|
||||||
|
|
||||||
case StepPeerDomain:
|
|
||||||
peerDomain := strings.TrimSpace(m.textInput.Value())
|
|
||||||
if err := validateDomain(peerDomain); err != nil {
|
|
||||||
m.err = err
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check SNI DNS records for peer domain (non-blocking warning)
|
|
||||||
m.discovering = true
|
|
||||||
m.discoveryInfo = "Checking SNI DNS records for " + peerDomain + "..."
|
|
||||||
|
|
||||||
if warning := validateSNIDNSRecords(peerDomain); warning != "" {
|
|
||||||
// Log warning but continue - peer might have different DNS setup
|
|
||||||
m.sniWarning = warning
|
|
||||||
}
|
|
||||||
|
|
||||||
// Discover peer info from domain (try HTTPS first, then HTTP)
|
|
||||||
m.discovering = true
|
|
||||||
m.discoveryInfo = "Discovering peer from " + peerDomain + "..."
|
|
||||||
|
|
||||||
discovery, err := discoverPeerFromDomain(peerDomain)
|
|
||||||
m.discovering = false
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
m.err = fmt.Errorf("failed to discover peer: %w", err)
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store discovered info
|
|
||||||
m.config.PeerDomain = peerDomain
|
|
||||||
m.discoveredPeer = discovery.PeerID
|
|
||||||
|
|
||||||
// Resolve peer domain to IP for direct RQLite TLS connection
|
|
||||||
// RQLite uses native TLS on port 7002 (not SNI gateway on 7001)
|
|
||||||
peerIPs, err := net.LookupIP(peerDomain)
|
|
||||||
if err != nil || len(peerIPs) == 0 {
|
|
||||||
m.err = fmt.Errorf("failed to resolve peer domain %s to IP: %w", peerDomain, err)
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
// Prefer IPv4
|
|
||||||
var peerIP string
|
|
||||||
for _, ip := range peerIPs {
|
|
||||||
if ip.To4() != nil {
|
|
||||||
peerIP = ip.String()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if peerIP == "" {
|
|
||||||
peerIP = peerIPs[0].String()
|
|
||||||
}
|
|
||||||
m.config.PeerIP = peerIP
|
|
||||||
|
|
||||||
// Auto-populate join address (direct RQLite TLS on port 7002) and bootstrap peers
|
|
||||||
m.config.JoinAddress = fmt.Sprintf("%s:7002", peerIP)
|
|
||||||
m.config.Peers = []string{
|
|
||||||
fmt.Sprintf("/dns4/%s/tcp/4001/p2p/%s", peerDomain, discovery.PeerID),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store IPFS peer info for Peering.Peers configuration
|
|
||||||
if discovery.IPFSPeerID != "" {
|
|
||||||
m.config.IPFSPeerID = discovery.IPFSPeerID
|
|
||||||
m.config.IPFSSwarmAddrs = discovery.IPFSSwarmAddrs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store IPFS Cluster peer info for cluster peer_addresses configuration
|
|
||||||
if discovery.IPFSClusterPeerID != "" {
|
|
||||||
m.config.IPFSClusterPeerID = discovery.IPFSClusterPeerID
|
|
||||||
m.config.IPFSClusterAddrs = discovery.IPFSClusterAddrs
|
|
||||||
}
|
|
||||||
|
|
||||||
m.err = nil
|
|
||||||
m.step = StepClusterSecret
|
|
||||||
m.setupStepInput()
|
|
||||||
|
|
||||||
case StepClusterSecret:
|
|
||||||
secret := strings.TrimSpace(m.textInput.Value())
|
|
||||||
if err := validateClusterSecret(secret); err != nil {
|
|
||||||
m.err = err
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
m.config.ClusterSecret = secret
|
|
||||||
m.err = nil
|
|
||||||
m.step = StepSwarmKey
|
|
||||||
m.setupStepInput()
|
|
||||||
|
|
||||||
case StepSwarmKey:
|
|
||||||
swarmKey := strings.TrimSpace(m.textInput.Value())
|
|
||||||
if err := validateSwarmKey(swarmKey); err != nil {
|
|
||||||
m.err = err
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
m.config.SwarmKeyHex = swarmKey
|
|
||||||
m.err = nil
|
|
||||||
m.step = StepBranch
|
|
||||||
m.cursor = 0
|
|
||||||
|
|
||||||
case StepBranch:
|
|
||||||
if m.cursor == 0 {
|
|
||||||
m.config.Branch = "main"
|
|
||||||
} else {
|
|
||||||
m.config.Branch = "nightly"
|
|
||||||
}
|
|
||||||
m.cursor = 0 // Reset cursor for next step
|
|
||||||
m.step = StepNoPull
|
|
||||||
|
|
||||||
case StepNoPull:
|
|
||||||
if m.cursor == 0 {
|
|
||||||
m.config.NoPull = false
|
|
||||||
} else {
|
|
||||||
m.config.NoPull = true
|
|
||||||
}
|
|
||||||
m.step = StepConfirm
|
|
||||||
|
|
||||||
case StepConfirm:
|
|
||||||
m.step = StepInstalling
|
|
||||||
return m, m.startInstallation()
|
|
||||||
|
|
||||||
case StepDone:
|
|
||||||
return m, tea.Quit
|
|
||||||
}
|
|
||||||
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Model) setupStepInput() {
|
|
||||||
m.textInput.Reset()
|
|
||||||
m.textInput.Focus()
|
|
||||||
m.textInput.EchoMode = textinput.EchoNormal // Reset echo mode
|
|
||||||
|
|
||||||
switch m.step {
|
|
||||||
case StepVpsIP:
|
|
||||||
m.textInput.Placeholder = "e.g., 203.0.113.1"
|
|
||||||
// Try to auto-detect public IP
|
|
||||||
if ip := detectPublicIP(); ip != "" {
|
|
||||||
m.textInput.SetValue(ip)
|
|
||||||
}
|
|
||||||
case StepDomain:
|
|
||||||
m.textInput.Placeholder = "e.g., node-1.orama.network"
|
|
||||||
case StepPeerDomain:
|
|
||||||
m.textInput.Placeholder = "e.g., node-123.orama.network"
|
|
||||||
case StepClusterSecret:
|
|
||||||
m.textInput.Placeholder = "64 hex characters"
|
|
||||||
m.textInput.EchoMode = textinput.EchoPassword
|
|
||||||
case StepSwarmKey:
|
|
||||||
m.textInput.Placeholder = "64 hex characters"
|
|
||||||
m.textInput.EchoMode = textinput.EchoPassword
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) startInstallation() tea.Cmd {
|
|
||||||
return func() tea.Msg {
|
|
||||||
// This would trigger the actual installation
|
|
||||||
// For now, we return the config for the CLI to handle
|
|
||||||
return installCompleteMsg{config: m.config}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type installCompleteMsg struct {
|
|
||||||
config InstallerConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// View renders the UI
|
|
||||||
func (m Model) View() string {
|
|
||||||
var s strings.Builder
|
|
||||||
|
|
||||||
// Header
|
|
||||||
s.WriteString(renderHeader())
|
|
||||||
s.WriteString("\n\n")
|
|
||||||
|
|
||||||
switch m.step {
|
|
||||||
case StepWelcome:
|
|
||||||
s.WriteString(m.viewWelcome())
|
|
||||||
case StepNodeType:
|
|
||||||
s.WriteString(m.viewNodeType())
|
|
||||||
case StepVpsIP:
|
|
||||||
s.WriteString(m.viewVpsIP())
|
|
||||||
case StepDomain:
|
|
||||||
s.WriteString(m.viewDomain())
|
|
||||||
case StepPeerDomain:
|
|
||||||
s.WriteString(m.viewPeerDomain())
|
|
||||||
case StepClusterSecret:
|
|
||||||
s.WriteString(m.viewClusterSecret())
|
|
||||||
case StepSwarmKey:
|
|
||||||
s.WriteString(m.viewSwarmKey())
|
|
||||||
case StepBranch:
|
|
||||||
s.WriteString(m.viewBranch())
|
|
||||||
case StepNoPull:
|
|
||||||
s.WriteString(m.viewNoPull())
|
|
||||||
case StepConfirm:
|
|
||||||
s.WriteString(m.viewConfirm())
|
|
||||||
case StepInstalling:
|
|
||||||
s.WriteString(m.viewInstalling())
|
|
||||||
case StepDone:
|
|
||||||
s.WriteString(m.viewDone())
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func renderHeader() string {
|
|
||||||
logo := `
|
|
||||||
___ ____ _ __ __ _
|
|
||||||
/ _ \| _ \ / \ | \/ | / \
|
|
||||||
| | | | |_) | / _ \ | |\/| | / _ \
|
|
||||||
| |_| | _ < / ___ \| | | |/ ___ \
|
|
||||||
\___/|_| \_\/_/ \_\_| |_/_/ \_\
|
|
||||||
`
|
|
||||||
return titleStyle.Render(logo) + "\n" + subtitleStyle.Render("Network Installation Wizard")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewWelcome() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(boxStyle.Render(
|
|
||||||
titleStyle.Render("Welcome to Orama Network!") + "\n\n" +
|
|
||||||
"This wizard will guide you through setting up your node.\n\n" +
|
|
||||||
"You'll need:\n" +
|
|
||||||
" • A public IP address for your server\n" +
|
|
||||||
" • A domain name (e.g., node-1.orama.network)\n" +
|
|
||||||
" • For joining: cluster secret from existing node\n",
|
|
||||||
))
|
|
||||||
s.WriteString("\n\n")
|
|
||||||
s.WriteString(helpStyle.Render("Press Enter to continue • q to quit"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewNodeType() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("Node Type") + "\n\n")
|
|
||||||
s.WriteString("Is this the first node in a new cluster?\n\n")
|
|
||||||
|
|
||||||
options := []string{"Yes, create new cluster", "No, join existing cluster"}
|
|
||||||
for i, opt := range options {
|
|
||||||
if i == m.cursor {
|
|
||||||
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
|
|
||||||
} else {
|
|
||||||
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString("\n")
|
|
||||||
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewVpsIP() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("Server IP Address") + "\n\n")
|
|
||||||
s.WriteString("Enter your server's public IP address:\n\n")
|
|
||||||
s.WriteString(m.textInput.View())
|
|
||||||
|
|
||||||
if m.err != nil {
|
|
||||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString("\n\n")
|
|
||||||
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewDomain() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("Domain Name") + "\n\n")
|
|
||||||
s.WriteString("Enter the domain for this node:\n\n")
|
|
||||||
s.WriteString(m.textInput.View())
|
|
||||||
|
|
||||||
if m.err != nil {
|
|
||||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString("\n\n")
|
|
||||||
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewPeerDomain() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("Existing Node Domain") + "\n\n")
|
|
||||||
s.WriteString("Enter the domain of an existing node to join:\n")
|
|
||||||
s.WriteString(subtitleStyle.Render("The installer will auto-discover peer info via HTTPS/HTTP") + "\n\n")
|
|
||||||
s.WriteString(m.textInput.View())
|
|
||||||
|
|
||||||
if m.discovering {
|
|
||||||
s.WriteString("\n\n" + subtitleStyle.Render("🔍 "+m.discoveryInfo))
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.discoveredPeer != "" && m.err == nil {
|
|
||||||
s.WriteString("\n\n" + successStyle.Render("✓ Discovered peer: "+m.discoveredPeer[:12]+"..."))
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.err != nil {
|
|
||||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString("\n\n")
|
|
||||||
s.WriteString(helpStyle.Render("Enter to discover & continue • Esc to go back"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewClusterSecret() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("Cluster Secret") + "\n\n")
|
|
||||||
s.WriteString("Enter the cluster secret from an existing node:\n")
|
|
||||||
s.WriteString(subtitleStyle.Render("Get it with: cat ~/.orama/secrets/cluster-secret") + "\n\n")
|
|
||||||
s.WriteString(m.textInput.View())
|
|
||||||
|
|
||||||
if m.err != nil {
|
|
||||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString("\n\n")
|
|
||||||
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewSwarmKey() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("IPFS Swarm Key") + "\n\n")
|
|
||||||
s.WriteString("Enter the swarm key from an existing node:\n")
|
|
||||||
s.WriteString(subtitleStyle.Render("Get it with: cat ~/.orama/secrets/swarm.key | tail -1") + "\n\n")
|
|
||||||
s.WriteString(m.textInput.View())
|
|
||||||
|
|
||||||
if m.err != nil {
|
|
||||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString("\n\n")
|
|
||||||
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewBranch() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("Release Channel") + "\n\n")
|
|
||||||
s.WriteString("Select the release channel:\n\n")
|
|
||||||
|
|
||||||
options := []string{"main (stable)", "nightly (latest features)"}
|
|
||||||
for i, opt := range options {
|
|
||||||
if i == m.cursor {
|
|
||||||
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
|
|
||||||
} else {
|
|
||||||
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString("\n")
|
|
||||||
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewNoPull() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("Git Repository") + "\n\n")
|
|
||||||
s.WriteString("Pull latest changes from repository?\n\n")
|
|
||||||
|
|
||||||
options := []string{"Pull latest (recommended)", "Skip git pull (use existing source)"}
|
|
||||||
for i, opt := range options {
|
|
||||||
if i == m.cursor {
|
|
||||||
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
|
|
||||||
} else {
|
|
||||||
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString("\n")
|
|
||||||
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewConfirm() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("Confirm Installation") + "\n\n")
|
|
||||||
|
|
||||||
noPullStr := "Pull latest"
|
|
||||||
if m.config.NoPull {
|
|
||||||
noPullStr = "Skip git pull"
|
|
||||||
}
|
|
||||||
|
|
||||||
config := fmt.Sprintf(
|
|
||||||
" VPS IP: %s\n"+
|
|
||||||
" Domain: %s\n"+
|
|
||||||
" Branch: %s\n"+
|
|
||||||
" Git Pull: %s\n"+
|
|
||||||
" Node Type: %s\n",
|
|
||||||
m.config.VpsIP,
|
|
||||||
m.config.Domain,
|
|
||||||
m.config.Branch,
|
|
||||||
noPullStr,
|
|
||||||
map[bool]string{true: "First node (new cluster)", false: "Join existing cluster"}[m.config.IsFirstNode],
|
|
||||||
)
|
|
||||||
|
|
||||||
if !m.config.IsFirstNode {
|
|
||||||
config += fmt.Sprintf(" Peer Node: %s\n", m.config.PeerDomain)
|
|
||||||
config += fmt.Sprintf(" Join Addr: %s\n", m.config.JoinAddress)
|
|
||||||
if len(m.config.Peers) > 0 {
|
|
||||||
config += fmt.Sprintf(" Bootstrap: %s...\n", m.config.Peers[0][:40])
|
|
||||||
}
|
|
||||||
if len(m.config.ClusterSecret) >= 8 {
|
|
||||||
config += fmt.Sprintf(" Secret: %s...\n", m.config.ClusterSecret[:8])
|
|
||||||
}
|
|
||||||
if len(m.config.SwarmKeyHex) >= 8 {
|
|
||||||
config += fmt.Sprintf(" Swarm Key: %s...\n", m.config.SwarmKeyHex[:8])
|
|
||||||
}
|
|
||||||
if m.config.IPFSPeerID != "" {
|
|
||||||
config += fmt.Sprintf(" IPFS Peer: %s...\n", m.config.IPFSPeerID[:16])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString(boxStyle.Render(config))
|
|
||||||
|
|
||||||
// Show SNI DNS warning if present
|
|
||||||
if m.sniWarning != "" {
|
|
||||||
s.WriteString("\n\n")
|
|
||||||
s.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#FFA500")).Render(m.sniWarning))
|
|
||||||
}
|
|
||||||
|
|
||||||
s.WriteString("\n\n")
|
|
||||||
s.WriteString(helpStyle.Render("Press Enter to install • Esc to go back"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewInstalling() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(titleStyle.Render("Installing...") + "\n\n")
|
|
||||||
s.WriteString("Please wait while the node is being configured.\n\n")
|
|
||||||
for _, line := range m.installOutput {
|
|
||||||
s.WriteString(line + "\n")
|
|
||||||
}
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Model) viewDone() string {
|
|
||||||
var s strings.Builder
|
|
||||||
s.WriteString(successStyle.Render("✓ Installation Complete!") + "\n\n")
|
|
||||||
s.WriteString("Your node is now running.\n\n")
|
|
||||||
s.WriteString("Useful commands:\n")
|
|
||||||
s.WriteString(" orama status - Check service status\n")
|
|
||||||
s.WriteString(" orama logs node - View node logs\n")
|
|
||||||
s.WriteString(" orama logs gateway - View gateway logs\n")
|
|
||||||
s.WriteString("\n")
|
|
||||||
s.WriteString(helpStyle.Render("Press Enter or q to exit"))
|
|
||||||
return s.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetConfig returns the installer configuration after the TUI completes
|
|
||||||
func (m Model) GetConfig() InstallerConfig {
|
|
||||||
return m.config
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validation helpers
|
|
||||||
|
|
||||||
func validateIP(ip string) error {
|
|
||||||
if ip == "" {
|
|
||||||
return fmt.Errorf("IP address is required")
|
|
||||||
}
|
|
||||||
if net.ParseIP(ip) == nil {
|
|
||||||
return fmt.Errorf("invalid IP address format")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateDomain(domain string) error {
|
|
||||||
if domain == "" {
|
|
||||||
return fmt.Errorf("domain is required")
|
|
||||||
}
|
|
||||||
// Basic domain validation
|
|
||||||
domainRegex := regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?)*$`)
|
|
||||||
if !domainRegex.MatchString(domain) {
|
|
||||||
return fmt.Errorf("invalid domain format")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiscoveryResult contains all information discovered from a peer node
|
|
||||||
type DiscoveryResult struct {
|
|
||||||
PeerID string // LibP2P peer ID
|
|
||||||
IPFSPeerID string // IPFS peer ID
|
|
||||||
IPFSSwarmAddrs []string // IPFS swarm addresses
|
|
||||||
// IPFS Cluster info for cluster peer discovery
|
|
||||||
IPFSClusterPeerID string // IPFS Cluster peer ID
|
|
||||||
IPFSClusterAddrs []string // IPFS Cluster multiaddresses
|
|
||||||
}
|
|
||||||
|
|
||||||
// discoverPeerFromDomain queries an existing node to get its peer ID and IPFS info
|
|
||||||
// Tries HTTPS first, then falls back to HTTP
|
|
||||||
// Respects DEBROS_TRUSTED_TLS_DOMAINS and DEBROS_CA_CERT_PATH environment variables for certificate verification
|
|
||||||
func discoverPeerFromDomain(domain string) (*DiscoveryResult, error) {
|
|
||||||
// Use centralized TLS configuration that respects CA certificates and trusted domains
|
|
||||||
client := tlsutil.NewHTTPClientForDomain(10*time.Second, domain)
|
|
||||||
|
|
||||||
// Try HTTPS first
|
|
||||||
url := fmt.Sprintf("https://%s/v1/network/status", domain)
|
|
||||||
resp, err := client.Get(url)
|
|
||||||
|
|
||||||
// If HTTPS fails, try HTTP
|
|
||||||
if err != nil {
|
|
||||||
// Finally try plain HTTP
|
|
||||||
url = fmt.Sprintf("http://%s/v1/network/status", domain)
|
|
||||||
resp, err = client.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not connect to %s (tried HTTPS and HTTP): %w", domain, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("unexpected status from %s: %s", domain, resp.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse response including IPFS and IPFS Cluster info
|
|
||||||
var status struct {
|
|
||||||
PeerID string `json:"peer_id"`
|
|
||||||
NodeID string `json:"node_id"` // fallback for backward compatibility
|
|
||||||
IPFS *struct {
|
|
||||||
PeerID string `json:"peer_id"`
|
|
||||||
SwarmAddresses []string `json:"swarm_addresses"`
|
|
||||||
} `json:"ipfs,omitempty"`
|
|
||||||
IPFSCluster *struct {
|
|
||||||
PeerID string `json:"peer_id"`
|
|
||||||
Addresses []string `json:"addresses"`
|
|
||||||
} `json:"ipfs_cluster,omitempty"`
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse response from %s: %w", domain, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use peer_id if available, otherwise fall back to node_id for backward compatibility
|
|
||||||
peerID := status.PeerID
|
|
||||||
if peerID == "" {
|
|
||||||
peerID = status.NodeID
|
|
||||||
}
|
|
||||||
|
|
||||||
if peerID == "" {
|
|
||||||
return nil, fmt.Errorf("no peer_id or node_id in response from %s", domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &DiscoveryResult{
|
|
||||||
PeerID: peerID,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Include IPFS info if available
|
|
||||||
if status.IPFS != nil {
|
|
||||||
result.IPFSPeerID = status.IPFS.PeerID
|
|
||||||
result.IPFSSwarmAddrs = status.IPFS.SwarmAddresses
|
|
||||||
}
|
|
||||||
|
|
||||||
// Include IPFS Cluster info if available
|
|
||||||
if status.IPFSCluster != nil {
|
|
||||||
result.IPFSClusterPeerID = status.IPFSCluster.PeerID
|
|
||||||
result.IPFSClusterAddrs = status.IPFSCluster.Addresses
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateClusterSecret(secret string) error {
|
|
||||||
if len(secret) != 64 {
|
|
||||||
return fmt.Errorf("cluster secret must be 64 hex characters")
|
|
||||||
}
|
|
||||||
secretRegex := regexp.MustCompile(`^[a-fA-F0-9]{64}$`)
|
|
||||||
if !secretRegex.MatchString(secret) {
|
|
||||||
return fmt.Errorf("cluster secret must be valid hexadecimal")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateSwarmKey(key string) error {
|
|
||||||
if len(key) != 64 {
|
|
||||||
return fmt.Errorf("swarm key must be 64 hex characters")
|
|
||||||
}
|
|
||||||
keyRegex := regexp.MustCompile(`^[a-fA-F0-9]{64}$`)
|
|
||||||
if !keyRegex.MatchString(key) {
|
|
||||||
return fmt.Errorf("swarm key must be valid hexadecimal")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureCertificatesForDomain generates self-signed certificates for the domain
|
|
||||||
func ensureCertificatesForDomain(domain string) error {
|
|
||||||
// Get home directory
|
|
||||||
home, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get home directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create cert directory
|
|
||||||
certDir := filepath.Join(home, ".orama", "certs")
|
|
||||||
if err := os.MkdirAll(certDir, 0700); err != nil {
|
|
||||||
return fmt.Errorf("failed to create cert directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create certificate manager
|
|
||||||
cm := certutil.NewCertificateManager(certDir)
|
|
||||||
|
|
||||||
// Ensure CA certificate exists
|
|
||||||
caCertPEM, caKeyPEM, err := cm.EnsureCACertificate()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to ensure CA certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure node certificate exists for the domain
|
|
||||||
_, _, err = cm.EnsureNodeCertificate(domain, caCertPEM, caKeyPEM)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to ensure node certificate: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also create wildcard certificate if domain is not already wildcard
|
|
||||||
if !strings.HasPrefix(domain, "*.") {
|
|
||||||
wildcardDomain := "*." + domain
|
|
||||||
_, _, err = cm.EnsureNodeCertificate(wildcardDomain, caCertPEM, caKeyPEM)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to ensure wildcard certificate: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func detectPublicIP() string {
|
|
||||||
// Try to detect public IP from common interfaces
|
|
||||||
addrs, err := net.InterfaceAddrs()
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
for _, addr := range addrs {
|
|
||||||
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
|
||||||
if ipnet.IP.To4() != nil && !ipnet.IP.IsPrivate() {
|
|
||||||
return ipnet.IP.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateSNIDNSRecords checks if the required SNI DNS records exist
|
|
||||||
// It tries to resolve the key SNI hostnames for IPFS, IPFS Cluster, and Olric
|
|
||||||
// Note: Raft no longer uses SNI - it uses direct RQLite TLS on port 7002
|
|
||||||
// All should resolve to the same IP (the node's public IP or domain)
|
|
||||||
// Returns a warning string if records are missing (empty string if all OK)
|
|
||||||
func validateSNIDNSRecords(domain string) string {
|
|
||||||
// List of SNI services that need DNS records
|
|
||||||
// Note: raft.domain is NOT included - RQLite uses direct TLS on port 7002
|
|
||||||
sniServices := []string{
|
|
||||||
fmt.Sprintf("ipfs.%s", domain),
|
|
||||||
fmt.Sprintf("ipfs-cluster.%s", domain),
|
|
||||||
fmt.Sprintf("olric.%s", domain),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to resolve the main domain first to get baseline
|
|
||||||
mainIPs, err := net.LookupHost(domain)
|
|
||||||
if err != nil {
|
|
||||||
// Main domain doesn't resolve - this is just a warning now
|
|
||||||
return fmt.Sprintf("Warning: could not resolve main domain %s: %v", domain, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(mainIPs) == 0 {
|
|
||||||
return fmt.Sprintf("Warning: main domain %s resolved to no IP addresses", domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check each SNI service
|
|
||||||
var unresolvedServices []string
|
|
||||||
for _, service := range sniServices {
|
|
||||||
ips, err := net.LookupHost(service)
|
|
||||||
if err != nil || len(ips) == 0 {
|
|
||||||
unresolvedServices = append(unresolvedServices, service)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(unresolvedServices) > 0 {
|
|
||||||
serviceList := strings.Join(unresolvedServices, ", ")
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"⚠️ SNI DNS records not found for: %s\n"+
|
|
||||||
" For multi-node clustering, add wildcard CNAME: *.%s -> %s\n"+
|
|
||||||
" (Continuing anyway - single-node setup will work)",
|
|
||||||
serviceList, domain, domain,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run starts the TUI installer and returns the configuration
|
|
||||||
func Run() (*InstallerConfig, error) {
|
|
||||||
// Check if running as root
|
|
||||||
if os.Geteuid() != 0 {
|
|
||||||
return nil, fmt.Errorf("installer must be run as root (use sudo)")
|
|
||||||
}
|
|
||||||
|
|
||||||
model := NewModel()
|
|
||||||
p := tea.NewProgram(&model, tea.WithAltScreen())
|
|
||||||
finalModel, err := p.Run()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
m := finalModel.(*Model)
|
|
||||||
if m.step == StepInstalling || m.step == StepDone {
|
|
||||||
config := m.GetConfig()
|
|
||||||
return &config, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("installation cancelled")
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -7,7 +7,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
@ -19,9 +18,6 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/config"
|
"github.com/DeBrosOfficial/network/pkg/config"
|
||||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
|
||||||
"github.com/multiformats/go-multiaddr"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClusterConfigManager manages IPFS Cluster configuration files
|
// ClusterConfigManager manages IPFS Cluster configuration files
|
||||||
@ -87,9 +83,9 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Determine cluster path based on data directory structure
|
// Determine cluster path based on data directory structure
|
||||||
// Check if dataDir contains specific node names (e.g., ~/.orama/node-1, ~/.orama/node-2, etc.)
|
// Check if dataDir contains specific node names (e.g., ~/.debros/bootstrap, ~/.debros/bootstrap2, ~/.debros/node2-4)
|
||||||
clusterPath := filepath.Join(dataDir, "ipfs-cluster")
|
clusterPath := filepath.Join(dataDir, "ipfs-cluster")
|
||||||
nodeNames := []string{"node-1", "node-2", "node-3", "node-4", "node-5"}
|
nodeNames := []string{"bootstrap", "bootstrap2", "node2", "node3", "node4"}
|
||||||
for _, nodeName := range nodeNames {
|
for _, nodeName := range nodeNames {
|
||||||
if strings.Contains(dataDir, nodeName) {
|
if strings.Contains(dataDir, nodeName) {
|
||||||
// Check if this is a direct child
|
// Check if this is a direct child
|
||||||
@ -103,17 +99,12 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Load or generate cluster secret
|
// Load or generate cluster secret
|
||||||
// Always use ~/.orama/secrets/cluster-secret (new standard location)
|
|
||||||
secretPath := filepath.Join(dataDir, "..", "cluster-secret")
|
secretPath := filepath.Join(dataDir, "..", "cluster-secret")
|
||||||
if strings.Contains(dataDir, ".orama") {
|
if strings.Contains(dataDir, ".debros") {
|
||||||
// Use the secrets directory for proper file organization
|
// Try to find cluster-secret in ~/.debros
|
||||||
home, err := os.UserHomeDir()
|
home, err := os.UserHomeDir()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
secretsDir := filepath.Join(home, ".orama", "secrets")
|
secretPath = filepath.Join(home, ".debros", "cluster-secret")
|
||||||
// Ensure secrets directory exists
|
|
||||||
if err := os.MkdirAll(secretsDir, 0700); err == nil {
|
|
||||||
secretPath = filepath.Join(secretsDir, "cluster-secret")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,35 +141,26 @@ func (cm *ClusterConfigManager) EnsureConfig() error {
|
|||||||
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
|
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine node name from ID or DataDir
|
// Determine node name
|
||||||
nodeName := "node-1" // Default fallback
|
nodeName := cm.cfg.Node.Type
|
||||||
possibleNames := []string{"node-1", "node-2", "node-3", "node-4", "node-5"}
|
if nodeName == "node" || nodeName == "bootstrap" {
|
||||||
|
// Try to extract from data dir or ID
|
||||||
|
possibleNames := []string{"bootstrap", "bootstrap2", "node2", "node3", "node4"}
|
||||||
for _, name := range possibleNames {
|
for _, name := range possibleNames {
|
||||||
if strings.Contains(cm.cfg.Node.DataDir, name) || strings.Contains(cm.cfg.Node.ID, name) {
|
if strings.Contains(cm.cfg.Node.DataDir, name) || strings.Contains(cm.cfg.Node.ID, name) {
|
||||||
nodeName = name
|
nodeName = name
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If ID contains a node identifier, use it
|
if nodeName == "node" || nodeName == "bootstrap" {
|
||||||
if cm.cfg.Node.ID != "" {
|
nodeName = cm.cfg.Node.Type
|
||||||
for _, name := range possibleNames {
|
|
||||||
if strings.Contains(cm.cfg.Node.ID, name) {
|
|
||||||
nodeName = name
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate ports based on pattern
|
// Calculate ports based on pattern
|
||||||
// REST API: 9094
|
proxyPort := clusterPort - 1
|
||||||
// Proxy: 9094 - 1 = 9093 (NOT USED - keeping for reference)
|
pinSvcPort := clusterPort + 1
|
||||||
// PinSvc: 9094 + 1 = 9095
|
clusterListenPort := clusterPort + 2
|
||||||
// Proxy API: 9094 + 1 = 9095 (actual proxy port)
|
|
||||||
// PinSvc API: 9094 + 3 = 9097
|
|
||||||
// Cluster LibP2P: 9094 + 4 = 9098
|
|
||||||
proxyPort := clusterPort + 1 // 9095 (IPFSProxy API)
|
|
||||||
pinSvcPort := clusterPort + 3 // 9097 (PinSvc API)
|
|
||||||
clusterListenPort := clusterPort + 4 // 9098 (Cluster LibP2P)
|
|
||||||
|
|
||||||
// If config doesn't exist, initialize it with ipfs-cluster-service init
|
// If config doesn't exist, initialize it with ipfs-cluster-service init
|
||||||
// This ensures we have all required sections (datastore, informer, etc.)
|
// This ensures we have all required sections (datastore, informer, etc.)
|
||||||
@ -228,570 +210,64 @@ func (cm *ClusterConfigManager) EnsureConfig() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatePeerAddresses updates peer_addresses and peerstore with peer information
|
// UpdateBootstrapPeers updates peer_addresses and peerstore with bootstrap peer information
|
||||||
// Returns true if update was successful, false if peer is not available yet (non-fatal)
|
func (cm *ClusterConfigManager) UpdateBootstrapPeers(bootstrapAPIURL string) error {
|
||||||
func (cm *ClusterConfigManager) UpdatePeerAddresses(peerAPIURL string) (bool, error) {
|
|
||||||
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
||||||
return false, nil // IPFS not configured
|
return nil // IPFS not configured
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip if this is the first node (creates the cluster, no join address)
|
// Skip if this is the bootstrap node itself
|
||||||
if cm.cfg.Database.RQLiteJoinAddress == "" {
|
if cm.cfg.Node.Type == "bootstrap" {
|
||||||
return false, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Query peer cluster API to get peer ID
|
// Query bootstrap cluster API to get peer ID
|
||||||
peerID, err := getPeerID(peerAPIURL)
|
peerID, err := getBootstrapPeerID(bootstrapAPIURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Non-fatal: peer might not be available yet
|
return fmt.Errorf("failed to get bootstrap peer ID: %w", err)
|
||||||
cm.logger.Debug("Peer not available yet, will retry",
|
|
||||||
zap.String("peer_api", peerAPIURL),
|
|
||||||
zap.Error(err))
|
|
||||||
return false, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if peerID == "" {
|
if peerID == "" {
|
||||||
cm.logger.Debug("Peer ID not available yet")
|
cm.logger.Warn("Bootstrap peer ID not available yet")
|
||||||
return false, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract peer host and cluster port from URL
|
// Extract bootstrap cluster port from URL
|
||||||
peerHost, clusterPort, err := parsePeerHostAndPort(peerAPIURL)
|
_, clusterPort, err := parseClusterPorts(bootstrapAPIURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("failed to parse peer cluster API URL: %w", err)
|
return fmt.Errorf("failed to parse bootstrap cluster API URL: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peer cluster LibP2P listens on clusterPort + 4
|
// Bootstrap listens on clusterPort + 2 (same pattern)
|
||||||
// (REST API is 9094, LibP2P is 9098 = 9094 + 4)
|
bootstrapClusterPort := clusterPort + 2
|
||||||
peerClusterPort := clusterPort + 4
|
bootstrapPeerAddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", bootstrapClusterPort, peerID)
|
||||||
|
|
||||||
// Determine IP protocol (ip4 or ip6) based on the host
|
|
||||||
var ipProtocol string
|
|
||||||
if net.ParseIP(peerHost).To4() != nil {
|
|
||||||
ipProtocol = "ip4"
|
|
||||||
} else {
|
|
||||||
ipProtocol = "ip6"
|
|
||||||
}
|
|
||||||
|
|
||||||
peerAddr := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", ipProtocol, peerHost, peerClusterPort, peerID)
|
|
||||||
|
|
||||||
// Load current config
|
// Load current config
|
||||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
||||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("failed to load config: %w", err)
|
return fmt.Errorf("failed to load config: %w", err)
|
||||||
}
|
|
||||||
|
|
||||||
// CRITICAL: Always update peerstore file to ensure no stale addresses remain
|
|
||||||
// Stale addresses (e.g., from old port configurations) cause LibP2P dial backoff,
|
|
||||||
// preventing cluster peers from connecting even if the correct address is present.
|
|
||||||
// We must clean and rewrite the peerstore on every update to avoid this.
|
|
||||||
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
|
||||||
|
|
||||||
// Check if peerstore needs updating (avoid unnecessary writes but always clean stale entries)
|
|
||||||
needsUpdate := true
|
|
||||||
if peerstoreData, err := os.ReadFile(peerstorePath); err == nil {
|
|
||||||
// Only skip update if peerstore contains EXACTLY the correct address and nothing else
|
|
||||||
existingAddrs := strings.Split(strings.TrimSpace(string(peerstoreData)), "\n")
|
|
||||||
if len(existingAddrs) == 1 && strings.TrimSpace(existingAddrs[0]) == peerAddr {
|
|
||||||
cm.logger.Debug("Peer address already correct in peerstore", zap.String("addr", peerAddr))
|
|
||||||
needsUpdate = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if needsUpdate {
|
|
||||||
// Write ONLY the correct peer address, removing any stale entries
|
|
||||||
if err := os.WriteFile(peerstorePath, []byte(peerAddr+"\n"), 0644); err != nil {
|
|
||||||
return false, fmt.Errorf("failed to write peerstore: %w", err)
|
|
||||||
}
|
|
||||||
cm.logger.Info("Updated peerstore with peer (cleaned stale entries)",
|
|
||||||
zap.String("addr", peerAddr),
|
|
||||||
zap.String("peerstore_path", peerstorePath))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then sync service.json from peerstore to keep them in sync
|
|
||||||
cfg.Cluster.PeerAddresses = []string{peerAddr}
|
|
||||||
|
|
||||||
// Save config
|
|
||||||
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
|
||||||
return false, fmt.Errorf("failed to save config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cm.logger.Info("Updated peer configuration",
|
|
||||||
zap.String("peer_addr", peerAddr),
|
|
||||||
zap.String("peerstore_path", peerstorePath))
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateAllClusterPeers discovers all cluster peers from the local cluster API
|
|
||||||
// and updates peer_addresses in service.json. This allows IPFS Cluster to automatically
|
|
||||||
// connect to all discovered peers in the cluster.
|
|
||||||
// Returns true if update was successful, false if cluster is not available yet (non-fatal)
|
|
||||||
func (cm *ClusterConfigManager) UpdateAllClusterPeers() (bool, error) {
|
|
||||||
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
|
||||||
return false, nil // IPFS not configured
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query local cluster API to get all peers
|
|
||||||
client := newStandardHTTPClient()
|
|
||||||
peersURL := fmt.Sprintf("%s/peers", cm.cfg.Database.IPFS.ClusterAPIURL)
|
|
||||||
resp, err := client.Get(peersURL)
|
|
||||||
if err != nil {
|
|
||||||
// Non-fatal: cluster might not be available yet
|
|
||||||
cm.logger.Debug("Cluster API not available yet, will retry",
|
|
||||||
zap.String("peers_url", peersURL),
|
|
||||||
zap.Error(err))
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse NDJSON response
|
|
||||||
dec := json.NewDecoder(bytes.NewReader(resp))
|
|
||||||
var allPeerAddresses []string
|
|
||||||
seenPeers := make(map[string]bool)
|
|
||||||
peerIDToAddresses := make(map[string][]string)
|
|
||||||
|
|
||||||
// First pass: collect all peer IDs and their addresses
|
|
||||||
for {
|
|
||||||
var peerInfo struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Addresses []string `json:"addresses"`
|
|
||||||
ClusterPeers []string `json:"cluster_peers"`
|
|
||||||
ClusterPeersAddresses []string `json:"cluster_peers_addresses"`
|
|
||||||
}
|
|
||||||
|
|
||||||
err := dec.Decode(&peerInfo)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
cm.logger.Debug("Failed to decode peer info", zap.Error(err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store this peer's addresses
|
|
||||||
if peerInfo.ID != "" {
|
|
||||||
peerIDToAddresses[peerInfo.ID] = peerInfo.Addresses
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also collect cluster peers addresses if available
|
|
||||||
// These are addresses of all peers in the cluster
|
|
||||||
for _, addr := range peerInfo.ClusterPeersAddresses {
|
|
||||||
if ma, err := multiaddr.NewMultiaddr(addr); err == nil {
|
|
||||||
// Validate it has p2p component (peer ID)
|
|
||||||
if _, err := ma.ValueForProtocol(multiaddr.P_P2P); err == nil {
|
|
||||||
addrStr := ma.String()
|
|
||||||
if !seenPeers[addrStr] {
|
|
||||||
allPeerAddresses = append(allPeerAddresses, addrStr)
|
|
||||||
seenPeers[addrStr] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we didn't get cluster_peers_addresses, try to construct them from peer IDs and addresses
|
|
||||||
if len(allPeerAddresses) == 0 && len(peerIDToAddresses) > 0 {
|
|
||||||
// Get cluster listen port from config
|
|
||||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
|
||||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
|
||||||
if err == nil && len(cfg.Cluster.ListenMultiaddress) > 0 {
|
|
||||||
// Extract port from listen_multiaddress (e.g., "/ip4/0.0.0.0/tcp/9098")
|
|
||||||
listenAddr := cfg.Cluster.ListenMultiaddress[0]
|
|
||||||
if ma, err := multiaddr.NewMultiaddr(listenAddr); err == nil {
|
|
||||||
if port, err := ma.ValueForProtocol(multiaddr.P_TCP); err == nil {
|
|
||||||
// For each peer ID, try to find its IP address and construct cluster multiaddr
|
|
||||||
for peerID, addresses := range peerIDToAddresses {
|
|
||||||
// Try to find an IP address in the peer's addresses
|
|
||||||
for _, addrStr := range addresses {
|
|
||||||
if ma, err := multiaddr.NewMultiaddr(addrStr); err == nil {
|
|
||||||
// Extract IP address (IPv4 or IPv6)
|
|
||||||
if ip, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ip != "" {
|
|
||||||
clusterAddr := fmt.Sprintf("/ip4/%s/tcp/%s/p2p/%s", ip, port, peerID)
|
|
||||||
if !seenPeers[clusterAddr] {
|
|
||||||
allPeerAddresses = append(allPeerAddresses, clusterAddr)
|
|
||||||
seenPeers[clusterAddr] = true
|
|
||||||
}
|
|
||||||
break
|
|
||||||
} else if ip, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ip != "" {
|
|
||||||
clusterAddr := fmt.Sprintf("/ip6/%s/tcp/%s/p2p/%s", ip, port, peerID)
|
|
||||||
if !seenPeers[clusterAddr] {
|
|
||||||
allPeerAddresses = append(allPeerAddresses, clusterAddr)
|
|
||||||
seenPeers[clusterAddr] = true
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(allPeerAddresses) == 0 {
|
|
||||||
cm.logger.Debug("No cluster peer addresses found in API response")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load current config
|
|
||||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
|
||||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to load config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if peer addresses have changed
|
|
||||||
addressesChanged := false
|
|
||||||
if len(cfg.Cluster.PeerAddresses) != len(allPeerAddresses) {
|
|
||||||
addressesChanged = true
|
|
||||||
} else {
|
|
||||||
// Check if addresses are different
|
|
||||||
currentAddrs := make(map[string]bool)
|
|
||||||
for _, addr := range cfg.Cluster.PeerAddresses {
|
|
||||||
currentAddrs[addr] = true
|
|
||||||
}
|
|
||||||
for _, addr := range allPeerAddresses {
|
|
||||||
if !currentAddrs[addr] {
|
|
||||||
addressesChanged = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !addressesChanged {
|
|
||||||
cm.logger.Debug("Cluster peer addresses already up to date",
|
|
||||||
zap.Int("peer_count", len(allPeerAddresses)))
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update peerstore file FIRST - this is what IPFS Cluster reads for bootstrapping
|
|
||||||
// Peerstore is the source of truth, service.json is just for our tracking
|
|
||||||
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
|
||||||
peerstoreContent := strings.Join(allPeerAddresses, "\n") + "\n"
|
|
||||||
if err := os.WriteFile(peerstorePath, []byte(peerstoreContent), 0644); err != nil {
|
|
||||||
cm.logger.Warn("Failed to update peerstore file", zap.Error(err))
|
|
||||||
// Non-fatal, continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then sync service.json from peerstore to keep them in sync
|
|
||||||
cfg.Cluster.PeerAddresses = allPeerAddresses
|
|
||||||
|
|
||||||
// Save config
|
|
||||||
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
|
||||||
return false, fmt.Errorf("failed to save config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cm.logger.Info("Updated cluster peer addresses",
|
|
||||||
zap.Int("peer_count", len(allPeerAddresses)),
|
|
||||||
zap.Strings("peer_addresses", allPeerAddresses))
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RepairPeerConfiguration automatically discovers and repairs peer configuration
|
|
||||||
// Tries multiple methods: gateway /v1/network/status, config-based discovery, peer multiaddr
|
|
||||||
func (cm *ClusterConfigManager) RepairPeerConfiguration() (bool, error) {
|
|
||||||
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
|
||||||
return false, nil // IPFS not configured
|
|
||||||
}
|
|
||||||
|
|
||||||
// Method 1: Try to discover cluster peers via /v1/network/status endpoint
|
|
||||||
// This is the most reliable method as it uses the HTTPS gateway
|
|
||||||
if len(cm.cfg.Discovery.BootstrapPeers) > 0 {
|
|
||||||
success, err := cm.DiscoverClusterPeersFromGateway()
|
|
||||||
if err != nil {
|
|
||||||
cm.logger.Debug("Gateway discovery failed, trying direct API", zap.Error(err))
|
|
||||||
} else if success {
|
|
||||||
cm.logger.Info("Successfully discovered cluster peers from gateway")
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip direct API method if this is the first node (creates the cluster, no join address)
|
|
||||||
if cm.cfg.Database.RQLiteJoinAddress == "" {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Method 2: Try direct cluster API (fallback)
|
|
||||||
var peerAPIURL string
|
|
||||||
|
|
||||||
// Try to extract from peers multiaddr
|
|
||||||
if len(cm.cfg.Discovery.BootstrapPeers) > 0 {
|
|
||||||
if ip := extractIPFromMultiaddrForCluster(cm.cfg.Discovery.BootstrapPeers[0]); ip != "" {
|
|
||||||
// Default cluster API port is 9094
|
|
||||||
peerAPIURL = fmt.Sprintf("http://%s:9094", ip)
|
|
||||||
cm.logger.Debug("Inferred peer cluster API from peer",
|
|
||||||
zap.String("peer_api", peerAPIURL))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to localhost if nothing found (for local development)
|
|
||||||
if peerAPIURL == "" {
|
|
||||||
peerAPIURL = "http://localhost:9094"
|
|
||||||
cm.logger.Debug("Using localhost fallback for peer cluster API")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to update peers
|
|
||||||
success, err := cm.UpdatePeerAddresses(peerAPIURL)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if success {
|
|
||||||
cm.logger.Info("Successfully repaired peer configuration via direct API")
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If update failed (peer not available), return false but no error
|
|
||||||
// This allows retries later
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiscoverClusterPeersFromGateway queries bootstrap peers' /v1/network/status endpoint
|
|
||||||
// to discover IPFS Cluster peer information and updates the local service.json
|
|
||||||
func (cm *ClusterConfigManager) DiscoverClusterPeersFromGateway() (bool, error) {
|
|
||||||
if len(cm.cfg.Discovery.BootstrapPeers) == 0 {
|
|
||||||
cm.logger.Debug("No bootstrap peers configured, skipping gateway discovery")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var discoveredPeers []string
|
|
||||||
seenPeers := make(map[string]bool)
|
|
||||||
|
|
||||||
for _, peerAddr := range cm.cfg.Discovery.BootstrapPeers {
|
|
||||||
// Extract domain or IP from multiaddr
|
|
||||||
domain := extractDomainFromMultiaddr(peerAddr)
|
|
||||||
if domain == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query /v1/network/status endpoint
|
|
||||||
statusURL := fmt.Sprintf("https://%s/v1/network/status", domain)
|
|
||||||
cm.logger.Debug("Querying peer network status", zap.String("url", statusURL))
|
|
||||||
|
|
||||||
// Use TLS-aware HTTP client (handles staging certs for *.debros.network)
|
|
||||||
client := tlsutil.NewHTTPClientForDomain(10*time.Second, domain)
|
|
||||||
resp, err := client.Get(statusURL)
|
|
||||||
if err != nil {
|
|
||||||
// Try HTTP fallback
|
|
||||||
statusURL = fmt.Sprintf("http://%s/v1/network/status", domain)
|
|
||||||
resp, err = client.Get(statusURL)
|
|
||||||
if err != nil {
|
|
||||||
cm.logger.Debug("Failed to query peer status", zap.String("domain", domain), zap.Error(err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
cm.logger.Debug("Peer returned non-OK status", zap.String("domain", domain), zap.Int("status", resp.StatusCode))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse response
|
|
||||||
var status struct {
|
|
||||||
IPFSCluster *struct {
|
|
||||||
PeerID string `json:"peer_id"`
|
|
||||||
Addresses []string `json:"addresses"`
|
|
||||||
} `json:"ipfs_cluster"`
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
|
||||||
cm.logger.Debug("Failed to decode peer status", zap.String("domain", domain), zap.Error(err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if status.IPFSCluster == nil || status.IPFSCluster.PeerID == "" {
|
|
||||||
cm.logger.Debug("Peer has no IPFS Cluster info", zap.String("domain", domain))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract IP from domain or addresses
|
|
||||||
peerIP := extractIPFromMultiaddrForCluster(peerAddr)
|
|
||||||
if peerIP == "" {
|
|
||||||
// Try to resolve domain
|
|
||||||
ips, err := net.LookupIP(domain)
|
|
||||||
if err == nil && len(ips) > 0 {
|
|
||||||
for _, ip := range ips {
|
|
||||||
if ip.To4() != nil {
|
|
||||||
peerIP = ip.String()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if peerIP == "" {
|
|
||||||
cm.logger.Debug("Could not determine peer IP", zap.String("domain", domain))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct cluster multiaddr
|
|
||||||
// IPFS Cluster listens on port 9098 (REST API port 9094 + 4)
|
|
||||||
clusterAddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", peerIP, status.IPFSCluster.PeerID)
|
|
||||||
if !seenPeers[clusterAddr] {
|
|
||||||
discoveredPeers = append(discoveredPeers, clusterAddr)
|
|
||||||
seenPeers[clusterAddr] = true
|
|
||||||
cm.logger.Info("Discovered cluster peer from gateway",
|
|
||||||
zap.String("domain", domain),
|
|
||||||
zap.String("peer_id", status.IPFSCluster.PeerID),
|
|
||||||
zap.String("cluster_addr", clusterAddr))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(discoveredPeers) == 0 {
|
|
||||||
cm.logger.Debug("No cluster peers discovered from gateway")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load current config
|
|
||||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
|
||||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to load config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update peerstore file
|
|
||||||
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
|
||||||
peerstoreContent := strings.Join(discoveredPeers, "\n") + "\n"
|
|
||||||
if err := os.WriteFile(peerstorePath, []byte(peerstoreContent), 0644); err != nil {
|
|
||||||
cm.logger.Warn("Failed to update peerstore file", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update peer_addresses in config
|
|
||||||
cfg.Cluster.PeerAddresses = discoveredPeers
|
|
||||||
|
|
||||||
// Save config
|
|
||||||
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
|
||||||
return false, fmt.Errorf("failed to save config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cm.logger.Info("Updated cluster peer addresses from gateway discovery",
|
|
||||||
zap.Int("peer_count", len(discoveredPeers)),
|
|
||||||
zap.Strings("peer_addresses", discoveredPeers))
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractDomainFromMultiaddr extracts domain or IP from a multiaddr string
|
|
||||||
// Handles formats like /dns4/domain/tcp/port/p2p/id or /ip4/ip/tcp/port/p2p/id
|
|
||||||
func extractDomainFromMultiaddr(multiaddrStr string) string {
|
|
||||||
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try DNS4 first (domain name)
|
|
||||||
if domain, err := ma.ValueForProtocol(multiaddr.P_DNS4); err == nil && domain != "" {
|
|
||||||
return domain
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try DNS6
|
|
||||||
if domain, err := ma.ValueForProtocol(multiaddr.P_DNS6); err == nil && domain != "" {
|
|
||||||
return domain
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try IP4
|
|
||||||
if ip, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ip != "" {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try IP6
|
|
||||||
if ip, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ip != "" {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// DiscoverClusterPeersFromLibP2P loads IPFS cluster peer addresses from the peerstore file.
|
|
||||||
// If peerstore is empty, it means there are no peers to connect to.
|
|
||||||
// Returns true if peers were loaded and configured, false otherwise (non-fatal)
|
|
||||||
func (cm *ClusterConfigManager) DiscoverClusterPeersFromLibP2P(host host.Host) (bool, error) {
|
|
||||||
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
|
||||||
return false, nil // IPFS not configured
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load peer addresses from peerstore file
|
|
||||||
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
|
||||||
peerstoreData, err := os.ReadFile(peerstorePath)
|
|
||||||
if err != nil {
|
|
||||||
// Peerstore file doesn't exist or can't be read - no peers to connect to
|
|
||||||
cm.logger.Debug("Peerstore file not found or empty - no cluster peers to connect to",
|
|
||||||
zap.String("peerstore_path", peerstorePath))
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var allPeerAddresses []string
|
|
||||||
seenPeers := make(map[string]bool)
|
|
||||||
|
|
||||||
// Parse peerstore file (one multiaddr per line)
|
|
||||||
lines := strings.Split(strings.TrimSpace(string(peerstoreData)), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if line != "" && strings.HasPrefix(line, "/") {
|
|
||||||
// Validate it's a proper multiaddr with p2p component
|
|
||||||
if ma, err := multiaddr.NewMultiaddr(line); err == nil {
|
|
||||||
if _, err := ma.ValueForProtocol(multiaddr.P_P2P); err == nil {
|
|
||||||
if !seenPeers[line] {
|
|
||||||
allPeerAddresses = append(allPeerAddresses, line)
|
|
||||||
seenPeers[line] = true
|
|
||||||
cm.logger.Debug("Loaded cluster peer address from peerstore",
|
|
||||||
zap.String("addr", line))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(allPeerAddresses) == 0 {
|
|
||||||
cm.logger.Debug("Peerstore file is empty - no cluster peers to connect to")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get config to update peer_addresses
|
|
||||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
|
||||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to load config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if peer addresses have changed
|
|
||||||
addressesChanged := false
|
|
||||||
if len(cfg.Cluster.PeerAddresses) != len(allPeerAddresses) {
|
|
||||||
addressesChanged = true
|
|
||||||
} else {
|
|
||||||
currentAddrs := make(map[string]bool)
|
|
||||||
for _, addr := range cfg.Cluster.PeerAddresses {
|
|
||||||
currentAddrs[addr] = true
|
|
||||||
}
|
|
||||||
for _, addr := range allPeerAddresses {
|
|
||||||
if !currentAddrs[addr] {
|
|
||||||
addressesChanged = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !addressesChanged {
|
|
||||||
cm.logger.Debug("Cluster peer addresses already up to date",
|
|
||||||
zap.Int("peer_count", len(allPeerAddresses)))
|
|
||||||
return true, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update peer_addresses
|
// Update peer_addresses
|
||||||
cfg.Cluster.PeerAddresses = allPeerAddresses
|
cfg.Cluster.PeerAddresses = []string{bootstrapPeerAddr}
|
||||||
|
|
||||||
// Save config
|
// Save config
|
||||||
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
||||||
return false, fmt.Errorf("failed to save config: %w", err)
|
return fmt.Errorf("failed to save config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cm.logger.Info("Loaded cluster peer addresses from peerstore",
|
// Write to peerstore file
|
||||||
zap.Int("peer_count", len(allPeerAddresses)),
|
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
||||||
zap.Strings("peer_addresses", allPeerAddresses))
|
if err := os.WriteFile(peerstorePath, []byte(bootstrapPeerAddr+"\n"), 0644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write peerstore: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return true, nil
|
cm.logger.Info("Updated bootstrap peer configuration",
|
||||||
|
zap.String("bootstrap_peer_addr", bootstrapPeerAddr),
|
||||||
|
zap.String("peerstore_path", peerstorePath))
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadOrCreateConfig loads existing service.json or creates a template
|
// loadOrCreateConfig loads existing service.json or creates a template
|
||||||
@ -991,38 +467,6 @@ func ensureRequiredSection(parent map[string]interface{}, key string, defaults m
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsePeerHostAndPort extracts host and REST API port from peer API URL
|
|
||||||
func parsePeerHostAndPort(peerAPIURL string) (host string, restAPIPort int, err error) {
|
|
||||||
u, err := url.Parse(peerAPIURL)
|
|
||||||
if err != nil {
|
|
||||||
return "", 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
host = u.Hostname()
|
|
||||||
if host == "" {
|
|
||||||
return "", 0, fmt.Errorf("no host in URL: %s", peerAPIURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
portStr := u.Port()
|
|
||||||
if portStr == "" {
|
|
||||||
// Default port based on scheme
|
|
||||||
if u.Scheme == "http" {
|
|
||||||
portStr = "9094"
|
|
||||||
} else if u.Scheme == "https" {
|
|
||||||
portStr = "443"
|
|
||||||
} else {
|
|
||||||
return "", 0, fmt.Errorf("unknown scheme: %s", u.Scheme)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = fmt.Sscanf(portStr, "%d", &restAPIPort)
|
|
||||||
if err != nil {
|
|
||||||
return "", 0, fmt.Errorf("invalid port: %s", portStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return host, restAPIPort, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseClusterPorts extracts cluster port and REST API port from ClusterAPIURL
|
// parseClusterPorts extracts cluster port and REST API port from ClusterAPIURL
|
||||||
func parseClusterPorts(clusterAPIURL string) (clusterPort, restAPIPort int, err error) {
|
func parseClusterPorts(clusterAPIURL string) (clusterPort, restAPIPort int, err error) {
|
||||||
u, err := url.Parse(clusterAPIURL)
|
u, err := url.Parse(clusterAPIURL)
|
||||||
@ -1047,9 +491,8 @@ func parseClusterPorts(clusterAPIURL string) (clusterPort, restAPIPort int, err
|
|||||||
return 0, 0, fmt.Errorf("invalid port: %s", portStr)
|
return 0, 0, fmt.Errorf("invalid port: %s", portStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// clusterPort is used as the base port for calculations
|
// Cluster listen port is typically REST API port + 2
|
||||||
// The actual cluster LibP2P listen port is calculated as clusterPort + 4
|
clusterPort = restAPIPort + 2
|
||||||
clusterPort = restAPIPort
|
|
||||||
|
|
||||||
return clusterPort, restAPIPort, nil
|
return clusterPort, restAPIPort, nil
|
||||||
}
|
}
|
||||||
@ -1082,17 +525,17 @@ func parseIPFSPort(apiURL string) (int, error) {
|
|||||||
return port, nil
|
return port, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPeerID queries the cluster API to get the peer ID
|
// getBootstrapPeerID queries the bootstrap cluster API to get the peer ID
|
||||||
func getPeerID(apiURL string) (string, error) {
|
func getBootstrapPeerID(apiURL string) (string, error) {
|
||||||
// Simple HTTP client to query /peers endpoint
|
// Simple HTTP client to query /peers endpoint
|
||||||
client := newStandardHTTPClient()
|
client := &standardHTTPClient{}
|
||||||
resp, err := client.Get(fmt.Sprintf("%s/peers", apiURL))
|
resp, err := client.Get(fmt.Sprintf("%s/peers", apiURL))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// The /peers endpoint returns NDJSON (newline-delimited JSON)
|
// The /peers endpoint returns NDJSON (newline-delimited JSON)
|
||||||
// We need to read the first peer object to get the peer ID
|
// We need to read the first peer object to get the bootstrap peer ID
|
||||||
dec := json.NewDecoder(bytes.NewReader(resp))
|
dec := json.NewDecoder(bytes.NewReader(resp))
|
||||||
var firstPeer struct {
|
var firstPeer struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
@ -1137,19 +580,11 @@ func generateRandomSecret(length int) string {
|
|||||||
return hex.EncodeToString(bytes)
|
return hex.EncodeToString(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// standardHTTPClient implements HTTP client using net/http with centralized TLS configuration
|
// standardHTTPClient implements HTTP client using net/http
|
||||||
type standardHTTPClient struct {
|
type standardHTTPClient struct{}
|
||||||
client *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStandardHTTPClient() *standardHTTPClient {
|
|
||||||
return &standardHTTPClient{
|
|
||||||
client: tlsutil.NewHTTPClient(30 * time.Second),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *standardHTTPClient) Get(url string) ([]byte, error) {
|
func (c *standardHTTPClient) Get(url string) ([]byte, error) {
|
||||||
resp, err := c.client.Get(url)
|
resp, err := http.Get(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1167,28 +602,6 @@ func (c *standardHTTPClient) Get(url string) ([]byte, error) {
|
|||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractIPFromMultiaddrForCluster extracts IP address from a LibP2P multiaddr string
|
|
||||||
// Used for inferring bootstrap cluster API URL
|
|
||||||
func extractIPFromMultiaddrForCluster(multiaddrStr string) string {
|
|
||||||
// Parse multiaddr
|
|
||||||
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to extract IPv4 address
|
|
||||||
if ipv4, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ipv4 != "" {
|
|
||||||
return ipv4
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to extract IPv6 address
|
|
||||||
if ipv6, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ipv6 != "" {
|
|
||||||
return ipv6
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// FixIPFSConfigAddresses fixes localhost addresses in IPFS config to use 127.0.0.1
|
// FixIPFSConfigAddresses fixes localhost addresses in IPFS config to use 127.0.0.1
|
||||||
// This is necessary because IPFS doesn't accept "localhost" as a valid IP address in multiaddrs
|
// This is necessary because IPFS doesn't accept "localhost" as a valid IP address in multiaddrs
|
||||||
// This function always ensures the config is correct, regardless of current state
|
// This function always ensures the config is correct, regardless of current state
|
||||||
@ -1208,15 +621,15 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try to find IPFS repo path
|
// Try to find IPFS repo path
|
||||||
// Check common locations: dataDir/ipfs/repo, dataDir/node-1/ipfs/repo, etc.
|
// Check common locations: dataDir/ipfs/repo, or dataDir/bootstrap/ipfs/repo, etc.
|
||||||
possiblePaths := []string{
|
possiblePaths := []string{
|
||||||
filepath.Join(dataDir, "ipfs", "repo"),
|
filepath.Join(dataDir, "ipfs", "repo"),
|
||||||
filepath.Join(dataDir, "node-1", "ipfs", "repo"),
|
filepath.Join(dataDir, "bootstrap", "ipfs", "repo"),
|
||||||
filepath.Join(dataDir, "node-2", "ipfs", "repo"),
|
filepath.Join(dataDir, "node2", "ipfs", "repo"),
|
||||||
filepath.Join(dataDir, "node-3", "ipfs", "repo"),
|
filepath.Join(dataDir, "node3", "ipfs", "repo"),
|
||||||
filepath.Join(filepath.Dir(dataDir), "node-1", "ipfs", "repo"),
|
filepath.Join(filepath.Dir(dataDir), "bootstrap", "ipfs", "repo"),
|
||||||
filepath.Join(filepath.Dir(dataDir), "node-2", "ipfs", "repo"),
|
filepath.Join(filepath.Dir(dataDir), "node2", "ipfs", "repo"),
|
||||||
filepath.Join(filepath.Dir(dataDir), "node-3", "ipfs", "repo"),
|
filepath.Join(filepath.Dir(dataDir), "node3", "ipfs", "repo"),
|
||||||
}
|
}
|
||||||
|
|
||||||
var ipfsRepoPath string
|
var ipfsRepoPath string
|
||||||
@ -1238,7 +651,7 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
|
|||||||
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
|
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine gateway port (typically API port + 3079, or 8080 for node-1, 8081 for node-2, etc.)
|
// Determine gateway port (typically API port + 3079, or 8080 for bootstrap, 8081 for node2, etc.)
|
||||||
gatewayPort := 8080
|
gatewayPort := 8080
|
||||||
if strings.Contains(dataDir, "node2") {
|
if strings.Contains(dataDir, "node2") {
|
||||||
gatewayPort = 8081
|
gatewayPort = 8081
|
||||||
@ -1251,7 +664,7 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Always ensure API address is correct (don't just check, always set it)
|
// Always ensure API address is correct (don't just check, always set it)
|
||||||
correctAPIAddr := fmt.Sprintf(`["/ip4/0.0.0.0/tcp/%d"]`, ipfsPort)
|
correctAPIAddr := fmt.Sprintf(`["/ip4/127.0.0.1/tcp/%d"]`, ipfsPort)
|
||||||
cm.logger.Info("Ensuring IPFS API address is correct",
|
cm.logger.Info("Ensuring IPFS API address is correct",
|
||||||
zap.String("repo", ipfsRepoPath),
|
zap.String("repo", ipfsRepoPath),
|
||||||
zap.Int("port", ipfsPort),
|
zap.Int("port", ipfsPort),
|
||||||
@ -1265,7 +678,7 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Always ensure Gateway address is correct
|
// Always ensure Gateway address is correct
|
||||||
correctGatewayAddr := fmt.Sprintf(`["/ip4/0.0.0.0/tcp/%d"]`, gatewayPort)
|
correctGatewayAddr := fmt.Sprintf(`["/ip4/127.0.0.1/tcp/%d"]`, gatewayPort)
|
||||||
cm.logger.Info("Ensuring IPFS Gateway address is correct",
|
cm.logger.Info("Ensuring IPFS Gateway address is correct",
|
||||||
zap.String("repo", ipfsRepoPath),
|
zap.String("repo", ipfsRepoPath),
|
||||||
zap.Int("port", gatewayPort),
|
zap.Int("port", gatewayPort),
|
||||||
|
|||||||
@ -54,7 +54,6 @@ const (
|
|||||||
ComponentClient Component = "CLIENT"
|
ComponentClient Component = "CLIENT"
|
||||||
ComponentGeneral Component = "GENERAL"
|
ComponentGeneral Component = "GENERAL"
|
||||||
ComponentAnyone Component = "ANYONE"
|
ComponentAnyone Component = "ANYONE"
|
||||||
ComponentGateway Component = "GATEWAY"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// getComponentColor returns the color for a specific component
|
// getComponentColor returns the color for a specific component
|
||||||
@ -76,8 +75,6 @@ func getComponentColor(component Component) string {
|
|||||||
return Yellow
|
return Yellow
|
||||||
case ComponentAnyone:
|
case ComponentAnyone:
|
||||||
return Cyan
|
return Cyan
|
||||||
case ComponentGateway:
|
|
||||||
return BrightGreen
|
|
||||||
default:
|
default:
|
||||||
return White
|
return White
|
||||||
}
|
}
|
||||||
@ -104,10 +101,8 @@ func getLevelColor(level zapcore.Level) string {
|
|||||||
// coloredConsoleEncoder creates a custom encoder with colors
|
// coloredConsoleEncoder creates a custom encoder with colors
|
||||||
func coloredConsoleEncoder(enableColors bool) zapcore.Encoder {
|
func coloredConsoleEncoder(enableColors bool) zapcore.Encoder {
|
||||||
config := zap.NewDevelopmentEncoderConfig()
|
config := zap.NewDevelopmentEncoderConfig()
|
||||||
|
|
||||||
// Ultra-short timestamp: HH:MM:SS (no milliseconds, no date, no timezone)
|
|
||||||
config.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
|
config.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
|
||||||
timeStr := t.Format("15:04:05")
|
timeStr := t.Format("2006-01-02T15:04:05.000Z0700")
|
||||||
if enableColors {
|
if enableColors {
|
||||||
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, timeStr, Reset))
|
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, timeStr, Reset))
|
||||||
} else {
|
} else {
|
||||||
@ -115,41 +110,21 @@ func coloredConsoleEncoder(enableColors bool) zapcore.Encoder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Single letter level: D, I, W, E
|
|
||||||
config.EncodeLevel = func(level zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
|
config.EncodeLevel = func(level zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
|
||||||
levelMap := map[zapcore.Level]string{
|
levelStr := strings.ToUpper(level.String())
|
||||||
zapcore.DebugLevel: "D",
|
|
||||||
zapcore.InfoLevel: "I",
|
|
||||||
zapcore.WarnLevel: "W",
|
|
||||||
zapcore.ErrorLevel: "E",
|
|
||||||
}
|
|
||||||
levelStr := levelMap[level]
|
|
||||||
if levelStr == "" {
|
|
||||||
levelStr = "?"
|
|
||||||
}
|
|
||||||
if enableColors {
|
if enableColors {
|
||||||
color := getLevelColor(level)
|
color := getLevelColor(level)
|
||||||
enc.AppendString(fmt.Sprintf("%s%s%s%s", color, Bold, levelStr, Reset))
|
enc.AppendString(fmt.Sprintf("%s%s%-5s%s", color, Bold, levelStr, Reset))
|
||||||
} else {
|
} else {
|
||||||
enc.AppendString(levelStr)
|
enc.AppendString(fmt.Sprintf("%-5s", levelStr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just filename, no line number for cleaner output
|
|
||||||
config.EncodeCaller = func(caller zapcore.EntryCaller, enc zapcore.PrimitiveArrayEncoder) {
|
config.EncodeCaller = func(caller zapcore.EntryCaller, enc zapcore.PrimitiveArrayEncoder) {
|
||||||
file := caller.File
|
|
||||||
// Extract just the filename from the path
|
|
||||||
if idx := strings.LastIndex(file, "/"); idx >= 0 {
|
|
||||||
file = file[idx+1:]
|
|
||||||
}
|
|
||||||
// Remove .go extension for even more compact format
|
|
||||||
if strings.HasSuffix(file, ".go") {
|
|
||||||
file = file[:len(file)-3]
|
|
||||||
}
|
|
||||||
if enableColors {
|
if enableColors {
|
||||||
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, file, Reset))
|
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, caller.TrimmedPath(), Reset))
|
||||||
} else {
|
} else {
|
||||||
enc.AppendString(file)
|
enc.AppendString(caller.TrimmedPath())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,33 +157,6 @@ func NewDefaultLogger(component Component) (*ColoredLogger, error) {
|
|||||||
return NewColoredLogger(component, true)
|
return NewColoredLogger(component, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFileLogger creates a logger that writes to a file
|
|
||||||
func NewFileLogger(component Component, filePath string, enableColors bool) (*ColoredLogger, error) {
|
|
||||||
// Create encoder
|
|
||||||
encoder := coloredConsoleEncoder(enableColors)
|
|
||||||
|
|
||||||
// Create file writer
|
|
||||||
file, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open log file %s: %w", filePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create core
|
|
||||||
core := zapcore.NewCore(
|
|
||||||
encoder,
|
|
||||||
zapcore.AddSync(file),
|
|
||||||
zapcore.DebugLevel,
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create logger with caller information
|
|
||||||
logger := zap.New(core, zap.AddCaller(), zap.AddCallerSkip(1))
|
|
||||||
|
|
||||||
return &ColoredLogger{
|
|
||||||
Logger: logger,
|
|
||||||
enableColors: enableColors,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Component-specific logging methods
|
// Component-specific logging methods
|
||||||
func (l *ColoredLogger) ComponentInfo(component Component, msg string, fields ...zap.Field) {
|
func (l *ColoredLogger) ComponentInfo(component Component, msg string, fields ...zap.Field) {
|
||||||
if l.enableColors {
|
if l.enableColors {
|
||||||
|
|||||||
@ -10,8 +10,6 @@ import (
|
|||||||
"github.com/mackerelio/go-osstat/cpu"
|
"github.com/mackerelio/go-osstat/cpu"
|
||||||
"github.com/mackerelio/go-osstat/memory"
|
"github.com/mackerelio/go-osstat/memory"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func logPeerStatus(n *Node, currentPeerCount int, lastPeerCount int, firstCheck bool) (int, bool) {
|
func logPeerStatus(n *Node, currentPeerCount int, lastPeerCount int, firstCheck bool) (int, bool) {
|
||||||
@ -212,38 +210,6 @@ func (n *Node) startConnectionMonitoring() {
|
|||||||
if err := announceMetrics(n, peers, cpuUsage, mem); err != nil {
|
if err := announceMetrics(n, peers, cpuUsage, mem); err != nil {
|
||||||
n.logger.Error("Failed to announce metrics", zap.Error(err))
|
n.logger.Error("Failed to announce metrics", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Periodically update IPFS Cluster peer addresses
|
|
||||||
// This discovers all cluster peers and updates peer_addresses in service.json
|
|
||||||
// so IPFS Cluster can automatically connect to all discovered peers
|
|
||||||
if n.clusterConfigManager != nil {
|
|
||||||
// First try to discover from LibP2P connections (works even if cluster peers aren't connected yet)
|
|
||||||
// This runs every minute to discover peers automatically via LibP2P discovery
|
|
||||||
if time.Now().Unix()%60 == 0 {
|
|
||||||
if success, err := n.clusterConfigManager.DiscoverClusterPeersFromLibP2P(n.host); err != nil {
|
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to discover cluster peers from LibP2P", zap.Error(err))
|
|
||||||
} else if success {
|
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Cluster peer addresses discovered from LibP2P")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also try to update from cluster API (works once peers are connected)
|
|
||||||
// Update all cluster peers every 2 minutes to discover new peers
|
|
||||||
if time.Now().Unix()%120 == 0 {
|
|
||||||
if success, err := n.clusterConfigManager.UpdateAllClusterPeers(); err != nil {
|
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to update cluster peers during monitoring", zap.Error(err))
|
|
||||||
} else if success {
|
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Cluster peer addresses updated during monitoring")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to repair peer configuration
|
|
||||||
if success, err := n.clusterConfigManager.RepairPeerConfiguration(); err != nil {
|
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair peer addresses during monitoring", zap.Error(err))
|
|
||||||
} else if success {
|
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Peer configuration repaired during monitoring")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|||||||
722
pkg/node/node.go
722
pkg/node/node.go
@ -2,13 +2,8 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
mathrand "math/rand"
|
mathrand "math/rand"
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@ -23,13 +18,10 @@ import (
|
|||||||
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
|
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/crypto/acme"
|
|
||||||
"golang.org/x/crypto/acme/autocert"
|
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/config"
|
"github.com/DeBrosOfficial/network/pkg/config"
|
||||||
"github.com/DeBrosOfficial/network/pkg/discovery"
|
"github.com/DeBrosOfficial/network/pkg/discovery"
|
||||||
"github.com/DeBrosOfficial/network/pkg/encryption"
|
"github.com/DeBrosOfficial/network/pkg/encryption"
|
||||||
"github.com/DeBrosOfficial/network/pkg/gateway"
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
||||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||||
"github.com/DeBrosOfficial/network/pkg/pubsub"
|
"github.com/DeBrosOfficial/network/pkg/pubsub"
|
||||||
@ -47,7 +39,7 @@ type Node struct {
|
|||||||
clusterDiscovery *database.ClusterDiscoveryService
|
clusterDiscovery *database.ClusterDiscoveryService
|
||||||
|
|
||||||
// Peer discovery
|
// Peer discovery
|
||||||
peerDiscoveryCancel context.CancelFunc
|
bootstrapCancel context.CancelFunc
|
||||||
|
|
||||||
// PubSub
|
// PubSub
|
||||||
pubsub *pubsub.ClientAdapter
|
pubsub *pubsub.ClientAdapter
|
||||||
@ -57,20 +49,6 @@ type Node struct {
|
|||||||
|
|
||||||
// IPFS Cluster config manager
|
// IPFS Cluster config manager
|
||||||
clusterConfigManager *ipfs.ClusterConfigManager
|
clusterConfigManager *ipfs.ClusterConfigManager
|
||||||
|
|
||||||
// Full gateway (for API, auth, pubsub, and internal service routing)
|
|
||||||
apiGateway *gateway.Gateway
|
|
||||||
apiGatewayServer *http.Server
|
|
||||||
|
|
||||||
// SNI gateway (for TCP routing of raft, ipfs, olric, etc.)
|
|
||||||
sniGateway *gateway.TCPSNIGateway
|
|
||||||
|
|
||||||
// Shared certificate manager for HTTPS and SNI
|
|
||||||
certManager *autocert.Manager
|
|
||||||
|
|
||||||
// Certificate ready signal - closed when TLS certificates are extracted and ready for use
|
|
||||||
// Used to coordinate RQLite node-to-node TLS startup with certificate provisioning
|
|
||||||
certReady chan struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNode creates a new network node
|
// NewNode creates a new network node
|
||||||
@ -91,26 +69,24 @@ func NewNode(cfg *config.Config) (*Node, error) {
|
|||||||
func (n *Node) startRQLite(ctx context.Context) error {
|
func (n *Node) startRQLite(ctx context.Context) error {
|
||||||
n.logger.Info("Starting RQLite database")
|
n.logger.Info("Starting RQLite database")
|
||||||
|
|
||||||
// Determine node identifier for log filename - use node ID for unique filenames
|
|
||||||
nodeID := n.config.Node.ID
|
|
||||||
if nodeID == "" {
|
|
||||||
// Default to "node" if ID is not set
|
|
||||||
nodeID = "node"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create RQLite manager
|
// Create RQLite manager
|
||||||
n.rqliteManager = database.NewRQLiteManager(&n.config.Database, &n.config.Discovery, n.config.Node.DataDir, n.logger.Logger)
|
n.rqliteManager = database.NewRQLiteManager(&n.config.Database, &n.config.Discovery, n.config.Node.DataDir, n.logger.Logger)
|
||||||
n.rqliteManager.SetNodeType(nodeID)
|
|
||||||
|
|
||||||
// Initialize cluster discovery service if LibP2P host is available
|
// Initialize cluster discovery service if LibP2P host is available
|
||||||
if n.host != nil && n.discoveryManager != nil {
|
if n.host != nil && n.discoveryManager != nil {
|
||||||
// Create cluster discovery service (all nodes are unified)
|
// Determine node type
|
||||||
|
nodeType := "node"
|
||||||
|
if n.config.Node.Type == "bootstrap" {
|
||||||
|
nodeType = "bootstrap"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create cluster discovery service
|
||||||
n.clusterDiscovery = database.NewClusterDiscoveryService(
|
n.clusterDiscovery = database.NewClusterDiscoveryService(
|
||||||
n.host,
|
n.host,
|
||||||
n.discoveryManager,
|
n.discoveryManager,
|
||||||
n.rqliteManager,
|
n.rqliteManager,
|
||||||
n.config.Node.ID,
|
n.config.Node.ID,
|
||||||
"node", // Unified node type
|
nodeType,
|
||||||
n.config.Discovery.RaftAdvAddress,
|
n.config.Discovery.RaftAdvAddress,
|
||||||
n.config.Discovery.HttpAdvAddress,
|
n.config.Discovery.HttpAdvAddress,
|
||||||
n.config.Node.DataDir,
|
n.config.Node.DataDir,
|
||||||
@ -133,25 +109,6 @@ func (n *Node) startRQLite(ctx context.Context) error {
|
|||||||
n.logger.Info("Cluster discovery service started (waiting for RQLite)")
|
n.logger.Info("Cluster discovery service started (waiting for RQLite)")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If node-to-node TLS is configured, wait for certificates to be provisioned
|
|
||||||
// This ensures RQLite can start with TLS when joining through the SNI gateway
|
|
||||||
if n.config.Database.NodeCert != "" && n.config.Database.NodeKey != "" && n.certReady != nil {
|
|
||||||
n.logger.Info("RQLite node TLS configured, waiting for certificates to be provisioned...",
|
|
||||||
zap.String("node_cert", n.config.Database.NodeCert),
|
|
||||||
zap.String("node_key", n.config.Database.NodeKey))
|
|
||||||
|
|
||||||
// Wait for certificate ready signal with timeout
|
|
||||||
certTimeout := 5 * time.Minute
|
|
||||||
select {
|
|
||||||
case <-n.certReady:
|
|
||||||
n.logger.Info("Certificates ready, proceeding with RQLite startup")
|
|
||||||
case <-time.After(certTimeout):
|
|
||||||
return fmt.Errorf("timeout waiting for TLS certificates after %v - ensure HTTPS is configured and ports 80/443 are accessible for ACME challenges", certTimeout)
|
|
||||||
case <-ctx.Done():
|
|
||||||
return fmt.Errorf("context cancelled while waiting for certificates: %w", ctx.Err())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start RQLite FIRST before updating metadata
|
// Start RQLite FIRST before updating metadata
|
||||||
if err := n.rqliteManager.Start(ctx); err != nil {
|
if err := n.rqliteManager.Start(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -174,70 +131,25 @@ func (n *Node) startRQLite(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractIPFromMultiaddr extracts the IP address from a peer multiaddr
|
// bootstrapPeerSource returns a PeerSource that yields peers from BootstrapPeers.
|
||||||
// Supports IP4, IP6, DNS4, DNS6, and DNSADDR protocols
|
func bootstrapPeerSource(bootstrapAddrs []string, logger *zap.Logger) func(context.Context, int) <-chan peer.AddrInfo {
|
||||||
func extractIPFromMultiaddr(multiaddrStr string) string {
|
|
||||||
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, try to extract direct IP address
|
|
||||||
var ip string
|
|
||||||
var dnsName string
|
|
||||||
multiaddr.ForEach(ma, func(c multiaddr.Component) bool {
|
|
||||||
switch c.Protocol().Code {
|
|
||||||
case multiaddr.P_IP4, multiaddr.P_IP6:
|
|
||||||
ip = c.Value()
|
|
||||||
return false // Stop iteration - found IP
|
|
||||||
case multiaddr.P_DNS4, multiaddr.P_DNS6, multiaddr.P_DNSADDR:
|
|
||||||
dnsName = c.Value()
|
|
||||||
// Continue to check for IP, but remember DNS name as fallback
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
// If we found a direct IP, return it
|
|
||||||
if ip != "" {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we found a DNS name, try to resolve it
|
|
||||||
if dnsName != "" {
|
|
||||||
if resolvedIPs, err := net.LookupIP(dnsName); err == nil && len(resolvedIPs) > 0 {
|
|
||||||
// Prefer IPv4 addresses, but accept IPv6 if that's all we have
|
|
||||||
for _, resolvedIP := range resolvedIPs {
|
|
||||||
if resolvedIP.To4() != nil {
|
|
||||||
return resolvedIP.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Return first IPv6 address if no IPv4 found
|
|
||||||
return resolvedIPs[0].String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// peerSource returns a PeerSource that yields peers from configured peers.
|
|
||||||
func peerSource(peerAddrs []string, logger *zap.Logger) func(context.Context, int) <-chan peer.AddrInfo {
|
|
||||||
return func(ctx context.Context, num int) <-chan peer.AddrInfo {
|
return func(ctx context.Context, num int) <-chan peer.AddrInfo {
|
||||||
out := make(chan peer.AddrInfo, num)
|
out := make(chan peer.AddrInfo, num)
|
||||||
go func() {
|
go func() {
|
||||||
defer close(out)
|
defer close(out)
|
||||||
count := 0
|
count := 0
|
||||||
for _, s := range peerAddrs {
|
for _, s := range bootstrapAddrs {
|
||||||
if count >= num {
|
if count >= num {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ma, err := multiaddr.NewMultiaddr(s)
|
ma, err := multiaddr.NewMultiaddr(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Debug("invalid peer multiaddr", zap.String("addr", s), zap.Error(err))
|
logger.Debug("invalid bootstrap multiaddr", zap.String("addr", s), zap.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ai, err := peer.AddrInfoFromP2pAddr(ma)
|
ai, err := peer.AddrInfoFromP2pAddr(ma)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Debug("failed to parse peer address", zap.String("addr", s), zap.Error(err))
|
logger.Debug("failed to parse bootstrap peer", zap.String("addr", s), zap.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
@ -252,8 +164,8 @@ func peerSource(peerAddrs []string, logger *zap.Logger) func(context.Context, in
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasPeerConnections checks if we're connected to any peers
|
// hasBootstrapConnections checks if we're connected to any bootstrap peers
|
||||||
func (n *Node) hasPeerConnections() bool {
|
func (n *Node) hasBootstrapConnections() bool {
|
||||||
if n.host == nil || len(n.config.Discovery.BootstrapPeers) == 0 {
|
if n.host == nil || len(n.config.Discovery.BootstrapPeers) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -263,10 +175,10 @@ func (n *Node) hasPeerConnections() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse peer IDs
|
// Parse bootstrap peer IDs
|
||||||
peerIDs := make(map[peer.ID]bool)
|
bootstrapPeerIDs := make(map[peer.ID]bool)
|
||||||
for _, peerAddr := range n.config.Discovery.BootstrapPeers {
|
for _, bootstrapAddr := range n.config.Discovery.BootstrapPeers {
|
||||||
ma, err := multiaddr.NewMultiaddr(peerAddr)
|
ma, err := multiaddr.NewMultiaddr(bootstrapAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -274,12 +186,12 @@ func (n *Node) hasPeerConnections() bool {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
peerIDs[peerInfo.ID] = true
|
bootstrapPeerIDs[peerInfo.ID] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if any connected peer is in our peer list
|
// Check if any connected peer is a bootstrap peer
|
||||||
for _, peerID := range connectedPeers {
|
for _, peerID := range connectedPeers {
|
||||||
if peerIDs[peerID] {
|
if bootstrapPeerIDs[peerID] {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -314,8 +226,8 @@ func addJitter(interval time.Duration) time.Duration {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// connectToPeerAddr connects to a single peer address
|
// connectToBootstrapPeer connects to a single bootstrap peer
|
||||||
func (n *Node) connectToPeerAddr(ctx context.Context, addr string) error {
|
func (n *Node) connectToBootstrapPeer(ctx context.Context, addr string) error {
|
||||||
ma, err := multiaddr.NewMultiaddr(addr)
|
ma, err := multiaddr.NewMultiaddr(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid multiaddr: %w", err)
|
return fmt.Errorf("invalid multiaddr: %w", err)
|
||||||
@ -327,16 +239,16 @@ func (n *Node) connectToPeerAddr(ctx context.Context, addr string) error {
|
|||||||
return fmt.Errorf("failed to extract peer info: %w", err)
|
return fmt.Errorf("failed to extract peer info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Avoid dialing ourselves: if the address resolves to our own peer ID, skip.
|
// Avoid dialing ourselves: if the bootstrap address resolves to our own peer ID, skip.
|
||||||
if n.host != nil && peerInfo.ID == n.host.ID() {
|
if n.host != nil && peerInfo.ID == n.host.ID() {
|
||||||
n.logger.ComponentDebug(logging.ComponentNode, "Skipping peer address because it resolves to self",
|
n.logger.ComponentDebug(logging.ComponentNode, "Skipping bootstrap address because it resolves to self",
|
||||||
zap.String("addr", addr),
|
zap.String("addr", addr),
|
||||||
zap.String("peer_id", peerInfo.ID.String()))
|
zap.String("peer_id", peerInfo.ID.String()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log resolved peer info prior to connect
|
// Log resolved peer info prior to connect
|
||||||
n.logger.ComponentDebug(logging.ComponentNode, "Resolved peer",
|
n.logger.ComponentDebug(logging.ComponentNode, "Resolved bootstrap peer",
|
||||||
zap.String("peer_id", peerInfo.ID.String()),
|
zap.String("peer_id", peerInfo.ID.String()),
|
||||||
zap.String("addr", addr),
|
zap.String("addr", addr),
|
||||||
zap.Int("addr_count", len(peerInfo.Addrs)),
|
zap.Int("addr_count", len(peerInfo.Addrs)),
|
||||||
@ -347,28 +259,28 @@ func (n *Node) connectToPeerAddr(ctx context.Context, addr string) error {
|
|||||||
return fmt.Errorf("failed to connect to peer: %w", err)
|
return fmt.Errorf("failed to connect to peer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
n.logger.Info("Connected to peer",
|
n.logger.Info("Connected to bootstrap peer",
|
||||||
zap.String("peer", peerInfo.ID.String()),
|
zap.String("peer", peerInfo.ID.String()),
|
||||||
zap.String("addr", addr))
|
zap.String("addr", addr))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// connectToPeers connects to configured LibP2P peers
|
// connectToBootstrapPeers connects to configured LibP2P bootstrap peers
|
||||||
func (n *Node) connectToPeers(ctx context.Context) error {
|
func (n *Node) connectToBootstrapPeers(ctx context.Context) error {
|
||||||
if len(n.config.Discovery.BootstrapPeers) == 0 {
|
if len(n.config.Discovery.BootstrapPeers) == 0 {
|
||||||
n.logger.ComponentDebug(logging.ComponentNode, "No peers configured")
|
n.logger.ComponentDebug(logging.ComponentNode, "No bootstrap peers configured")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use passed context with a reasonable timeout for peer connections
|
// Use passed context with a reasonable timeout for bootstrap connections
|
||||||
connectCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
connectCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
for _, peerAddr := range n.config.Discovery.BootstrapPeers {
|
for _, bootstrapAddr := range n.config.Discovery.BootstrapPeers {
|
||||||
if err := n.connectToPeerAddr(connectCtx, peerAddr); err != nil {
|
if err := n.connectToBootstrapPeer(connectCtx, bootstrapAddr); err != nil {
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to peer",
|
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to bootstrap peer",
|
||||||
zap.String("addr", peerAddr),
|
zap.String("addr", bootstrapAddr),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -414,7 +326,7 @@ func (n *Node) startLibP2P() error {
|
|||||||
// For production, these would be enabled
|
// For production, these would be enabled
|
||||||
isLocalhost := len(n.config.Node.ListenAddresses) > 0 &&
|
isLocalhost := len(n.config.Node.ListenAddresses) > 0 &&
|
||||||
(strings.Contains(n.config.Node.ListenAddresses[0], "localhost") ||
|
(strings.Contains(n.config.Node.ListenAddresses[0], "localhost") ||
|
||||||
strings.Contains(n.config.Node.ListenAddresses[0], "127.0.0.1"))
|
strings.Contains(n.config.Node.ListenAddresses[0], "localhost"))
|
||||||
|
|
||||||
if isLocalhost {
|
if isLocalhost {
|
||||||
n.logger.ComponentInfo(logging.ComponentLibP2P, "Localhost detected - disabling NAT services for local development")
|
n.logger.ComponentInfo(logging.ComponentLibP2P, "Localhost detected - disabling NAT services for local development")
|
||||||
@ -427,7 +339,7 @@ func (n *Node) startLibP2P() error {
|
|||||||
libp2p.EnableRelay(),
|
libp2p.EnableRelay(),
|
||||||
libp2p.NATPortMap(),
|
libp2p.NATPortMap(),
|
||||||
libp2p.EnableAutoRelayWithPeerSource(
|
libp2p.EnableAutoRelayWithPeerSource(
|
||||||
peerSource(n.config.Discovery.BootstrapPeers, n.logger.Logger),
|
bootstrapPeerSource(n.config.Discovery.BootstrapPeers, n.logger.Logger),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -453,59 +365,59 @@ func (n *Node) startLibP2P() error {
|
|||||||
n.pubsub = pubsub.NewClientAdapter(ps, n.config.Discovery.NodeNamespace)
|
n.pubsub = pubsub.NewClientAdapter(ps, n.config.Discovery.NodeNamespace)
|
||||||
n.logger.Info("Initialized pubsub adapter on namespace", zap.String("namespace", n.config.Discovery.NodeNamespace))
|
n.logger.Info("Initialized pubsub adapter on namespace", zap.String("namespace", n.config.Discovery.NodeNamespace))
|
||||||
|
|
||||||
// Log configured peers
|
// Log configured bootstrap peers
|
||||||
if len(n.config.Discovery.BootstrapPeers) > 0 {
|
if len(n.config.Discovery.BootstrapPeers) > 0 {
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Configured peers",
|
n.logger.ComponentInfo(logging.ComponentNode, "Configured bootstrap peers",
|
||||||
zap.Strings("peers", n.config.Discovery.BootstrapPeers))
|
zap.Strings("peers", n.config.Discovery.BootstrapPeers))
|
||||||
} else {
|
} else {
|
||||||
n.logger.ComponentDebug(logging.ComponentNode, "No peers configured")
|
n.logger.ComponentDebug(logging.ComponentNode, "No bootstrap peers configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect to LibP2P peers if configured
|
// Connect to LibP2P bootstrap peers if configured
|
||||||
if err := n.connectToPeers(context.Background()); err != nil {
|
if err := n.connectToBootstrapPeers(context.Background()); err != nil {
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to peers", zap.Error(err))
|
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to bootstrap peers", zap.Error(err))
|
||||||
// Don't fail - continue without peer connections
|
// Don't fail - continue without bootstrap connections
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start exponential backoff reconnection for peers
|
// Start exponential backoff reconnection for bootstrap peers
|
||||||
if len(n.config.Discovery.BootstrapPeers) > 0 {
|
if len(n.config.Discovery.BootstrapPeers) > 0 {
|
||||||
peerCtx, cancel := context.WithCancel(context.Background())
|
bootstrapCtx, cancel := context.WithCancel(context.Background())
|
||||||
n.peerDiscoveryCancel = cancel
|
n.bootstrapCancel = cancel
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
interval := 5 * time.Second
|
interval := 5 * time.Second
|
||||||
consecutiveFailures := 0
|
consecutiveFailures := 0
|
||||||
|
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Starting peer reconnection with exponential backoff",
|
n.logger.ComponentInfo(logging.ComponentNode, "Starting bootstrap peer reconnection with exponential backoff",
|
||||||
zap.Duration("initial_interval", interval),
|
zap.Duration("initial_interval", interval),
|
||||||
zap.Duration("max_interval", 10*time.Minute))
|
zap.Duration("max_interval", 10*time.Minute))
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-peerCtx.Done():
|
case <-bootstrapCtx.Done():
|
||||||
n.logger.ComponentDebug(logging.ComponentNode, "Peer reconnection loop stopped")
|
n.logger.ComponentDebug(logging.ComponentNode, "Bootstrap reconnection loop stopped")
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we need to attempt connection
|
// Check if we need to attempt connection
|
||||||
if !n.hasPeerConnections() {
|
if !n.hasBootstrapConnections() {
|
||||||
n.logger.ComponentDebug(logging.ComponentNode, "Attempting peer connection",
|
n.logger.ComponentDebug(logging.ComponentNode, "Attempting bootstrap peer connection",
|
||||||
zap.Duration("current_interval", interval),
|
zap.Duration("current_interval", interval),
|
||||||
zap.Int("consecutive_failures", consecutiveFailures))
|
zap.Int("consecutive_failures", consecutiveFailures))
|
||||||
|
|
||||||
if err := n.connectToPeers(context.Background()); err != nil {
|
if err := n.connectToBootstrapPeers(context.Background()); err != nil {
|
||||||
consecutiveFailures++
|
consecutiveFailures++
|
||||||
// Calculate next backoff interval
|
// Calculate next backoff interval
|
||||||
jitteredInterval := addJitter(interval)
|
jitteredInterval := addJitter(interval)
|
||||||
n.logger.ComponentDebug(logging.ComponentNode, "Peer connection failed, backing off",
|
n.logger.ComponentDebug(logging.ComponentNode, "Bootstrap connection failed, backing off",
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
zap.Duration("next_attempt_in", jitteredInterval),
|
zap.Duration("next_attempt_in", jitteredInterval),
|
||||||
zap.Int("consecutive_failures", consecutiveFailures))
|
zap.Int("consecutive_failures", consecutiveFailures))
|
||||||
|
|
||||||
// Sleep with jitter
|
// Sleep with jitter
|
||||||
select {
|
select {
|
||||||
case <-peerCtx.Done():
|
case <-bootstrapCtx.Done():
|
||||||
return
|
return
|
||||||
case <-time.After(jitteredInterval):
|
case <-time.After(jitteredInterval):
|
||||||
}
|
}
|
||||||
@ -515,14 +427,14 @@ func (n *Node) startLibP2P() error {
|
|||||||
|
|
||||||
// Log interval increases occasionally to show progress
|
// Log interval increases occasionally to show progress
|
||||||
if consecutiveFailures%5 == 0 {
|
if consecutiveFailures%5 == 0 {
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Peer connection still failing",
|
n.logger.ComponentInfo(logging.ComponentNode, "Bootstrap connection still failing",
|
||||||
zap.Int("consecutive_failures", consecutiveFailures),
|
zap.Int("consecutive_failures", consecutiveFailures),
|
||||||
zap.Duration("current_interval", interval))
|
zap.Duration("current_interval", interval))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Success! Reset interval and counters
|
// Success! Reset interval and counters
|
||||||
if consecutiveFailures > 0 {
|
if consecutiveFailures > 0 {
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Successfully connected to peers",
|
n.logger.ComponentInfo(logging.ComponentNode, "Successfully connected to bootstrap peers",
|
||||||
zap.Int("failures_overcome", consecutiveFailures))
|
zap.Int("failures_overcome", consecutiveFailures))
|
||||||
}
|
}
|
||||||
interval = 5 * time.Second
|
interval = 5 * time.Second
|
||||||
@ -530,15 +442,15 @@ func (n *Node) startLibP2P() error {
|
|||||||
|
|
||||||
// Wait 30 seconds before checking connection again
|
// Wait 30 seconds before checking connection again
|
||||||
select {
|
select {
|
||||||
case <-peerCtx.Done():
|
case <-bootstrapCtx.Done():
|
||||||
return
|
return
|
||||||
case <-time.After(30 * time.Second):
|
case <-time.After(30 * time.Second):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// We have peer connections, just wait and check periodically
|
// We have bootstrap connections, just wait and check periodically
|
||||||
select {
|
select {
|
||||||
case <-peerCtx.Done():
|
case <-bootstrapCtx.Done():
|
||||||
return
|
return
|
||||||
case <-time.After(30 * time.Second):
|
case <-time.After(30 * time.Second):
|
||||||
}
|
}
|
||||||
@ -547,15 +459,15 @@ func (n *Node) startLibP2P() error {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add peers to peerstore for peer exchange
|
// Add bootstrap peers to peerstore for peer exchange
|
||||||
if len(n.config.Discovery.BootstrapPeers) > 0 {
|
if len(n.config.Discovery.BootstrapPeers) > 0 {
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Adding peers to peerstore")
|
n.logger.ComponentInfo(logging.ComponentNode, "Adding bootstrap peers to peerstore")
|
||||||
for _, peerAddr := range n.config.Discovery.BootstrapPeers {
|
for _, bootstrapAddr := range n.config.Discovery.BootstrapPeers {
|
||||||
if ma, err := multiaddr.NewMultiaddr(peerAddr); err == nil {
|
if ma, err := multiaddr.NewMultiaddr(bootstrapAddr); err == nil {
|
||||||
if peerInfo, err := peer.AddrInfoFromP2pAddr(ma); err == nil {
|
if peerInfo, err := peer.AddrInfoFromP2pAddr(ma); err == nil {
|
||||||
// Add to peerstore with longer TTL for peer exchange
|
// Add to peerstore with longer TTL for peer exchange
|
||||||
n.host.Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, time.Hour*24)
|
n.host.Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, time.Hour*24)
|
||||||
n.logger.ComponentDebug(logging.ComponentNode, "Added peer to peerstore",
|
n.logger.ComponentDebug(logging.ComponentNode, "Added bootstrap peer to peerstore",
|
||||||
zap.String("peer", peerInfo.ID.String()))
|
zap.String("peer", peerInfo.ID.String()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -668,33 +580,14 @@ func (n *Node) stopPeerDiscovery() {
|
|||||||
func (n *Node) Stop() error {
|
func (n *Node) Stop() error {
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Stopping network node")
|
n.logger.ComponentInfo(logging.ComponentNode, "Stopping network node")
|
||||||
|
|
||||||
// Stop HTTP Gateway server
|
|
||||||
if n.apiGatewayServer != nil {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
_ = n.apiGatewayServer.Shutdown(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close Gateway client
|
|
||||||
if n.apiGateway != nil {
|
|
||||||
n.apiGateway.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop SNI Gateway
|
|
||||||
if n.sniGateway != nil {
|
|
||||||
if err := n.sniGateway.Stop(); err != nil {
|
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "SNI Gateway stop error", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop cluster discovery
|
// Stop cluster discovery
|
||||||
if n.clusterDiscovery != nil {
|
if n.clusterDiscovery != nil {
|
||||||
n.clusterDiscovery.Stop()
|
n.clusterDiscovery.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop peer reconnection loop
|
// Stop bootstrap reconnection loop
|
||||||
if n.peerDiscoveryCancel != nil {
|
if n.bootstrapCancel != nil {
|
||||||
n.peerDiscoveryCancel()
|
n.bootstrapCancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop peer discovery
|
// Stop peer discovery
|
||||||
@ -717,457 +610,6 @@ func (n *Node) Stop() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadNodePeerIDFromIdentity safely loads the node's peer ID from its identity file
|
|
||||||
// This is needed before the host is initialized, so we read directly from the file
|
|
||||||
func loadNodePeerIDFromIdentity(dataDir string) string {
|
|
||||||
identityFile := filepath.Join(os.ExpandEnv(dataDir), "identity.key")
|
|
||||||
|
|
||||||
// Expand ~ in path
|
|
||||||
if strings.HasPrefix(identityFile, "~") {
|
|
||||||
home, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
identityFile = filepath.Join(home, identityFile[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load identity from file
|
|
||||||
if info, err := encryption.LoadIdentity(identityFile); err == nil {
|
|
||||||
return info.PeerID.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return "" // Return empty string if can't load (gateway will work without it)
|
|
||||||
}
|
|
||||||
|
|
||||||
// startHTTPGateway initializes and starts the full API gateway with auth, pubsub, and API endpoints
|
|
||||||
func (n *Node) startHTTPGateway(ctx context.Context) error {
|
|
||||||
if !n.config.HTTPGateway.Enabled {
|
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "HTTP Gateway disabled in config")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create separate logger for gateway
|
|
||||||
logFile := filepath.Join(os.ExpandEnv(n.config.Node.DataDir), "..", "logs", "gateway.log")
|
|
||||||
|
|
||||||
// Ensure logs directory exists
|
|
||||||
logsDir := filepath.Dir(logFile)
|
|
||||||
if err := os.MkdirAll(logsDir, 0755); err != nil {
|
|
||||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
gatewayLogger, err := logging.NewFileLogger(logging.ComponentGeneral, logFile, false)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create gateway logger: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create full API Gateway for auth, pubsub, rqlite, and API endpoints
|
|
||||||
// This replaces both the old reverse proxy gateway and the standalone gateway
|
|
||||||
gwCfg := &gateway.Config{
|
|
||||||
ListenAddr: n.config.HTTPGateway.ListenAddr,
|
|
||||||
ClientNamespace: n.config.HTTPGateway.ClientNamespace,
|
|
||||||
BootstrapPeers: n.config.Discovery.BootstrapPeers,
|
|
||||||
NodePeerID: loadNodePeerIDFromIdentity(n.config.Node.DataDir), // Load the node's actual peer ID from its identity file
|
|
||||||
RQLiteDSN: n.config.HTTPGateway.RQLiteDSN,
|
|
||||||
OlricServers: n.config.HTTPGateway.OlricServers,
|
|
||||||
OlricTimeout: n.config.HTTPGateway.OlricTimeout,
|
|
||||||
IPFSClusterAPIURL: n.config.HTTPGateway.IPFSClusterAPIURL,
|
|
||||||
IPFSAPIURL: n.config.HTTPGateway.IPFSAPIURL,
|
|
||||||
IPFSTimeout: n.config.HTTPGateway.IPFSTimeout,
|
|
||||||
// HTTPS/TLS configuration
|
|
||||||
EnableHTTPS: n.config.HTTPGateway.HTTPS.Enabled,
|
|
||||||
DomainName: n.config.HTTPGateway.HTTPS.Domain,
|
|
||||||
TLSCacheDir: n.config.HTTPGateway.HTTPS.CacheDir,
|
|
||||||
}
|
|
||||||
|
|
||||||
apiGateway, err := gateway.New(gatewayLogger, gwCfg)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create full API gateway: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
n.apiGateway = apiGateway
|
|
||||||
|
|
||||||
// Check if HTTPS is enabled and set up certManager BEFORE starting goroutine
|
|
||||||
// This ensures n.certManager is set before SNI gateway initialization checks it
|
|
||||||
var certManager *autocert.Manager
|
|
||||||
var tlsCacheDir string
|
|
||||||
if gwCfg.EnableHTTPS && gwCfg.DomainName != "" {
|
|
||||||
tlsCacheDir = gwCfg.TLSCacheDir
|
|
||||||
if tlsCacheDir == "" {
|
|
||||||
tlsCacheDir = "/home/debros/.orama/tls-cache"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure TLS cache directory exists and is writable
|
|
||||||
if err := os.MkdirAll(tlsCacheDir, 0700); err != nil {
|
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to create TLS cache directory",
|
|
||||||
zap.String("dir", tlsCacheDir),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "TLS cache directory ready",
|
|
||||||
zap.String("dir", tlsCacheDir),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create TLS configuration with Let's Encrypt autocert
|
|
||||||
// Using STAGING environment to avoid rate limits during development/testing
|
|
||||||
// TODO: Switch to production when ready (remove Client field)
|
|
||||||
certManager = &autocert.Manager{
|
|
||||||
Prompt: autocert.AcceptTOS,
|
|
||||||
HostPolicy: autocert.HostWhitelist(gwCfg.DomainName),
|
|
||||||
Cache: autocert.DirCache(tlsCacheDir),
|
|
||||||
Email: fmt.Sprintf("admin@%s", gwCfg.DomainName),
|
|
||||||
Client: &acme.Client{
|
|
||||||
DirectoryURL: "https://acme-staging-v02.api.letsencrypt.org/directory",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store certificate manager for use by SNI gateway
|
|
||||||
n.certManager = certManager
|
|
||||||
|
|
||||||
// Initialize certificate ready channel - will be closed when certs are extracted
|
|
||||||
// This allows RQLite to wait for certificates before starting with node TLS
|
|
||||||
n.certReady = make(chan struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Channel to signal when HTTP server is ready for ACME challenges
|
|
||||||
httpReady := make(chan struct{})
|
|
||||||
|
|
||||||
// Start API Gateway in a goroutine
|
|
||||||
go func() {
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Starting full API gateway",
|
|
||||||
zap.String("listen_addr", gwCfg.ListenAddr),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Check if HTTPS is enabled
|
|
||||||
if gwCfg.EnableHTTPS && gwCfg.DomainName != "" && certManager != nil {
|
|
||||||
// Start HTTPS server with automatic certificate provisioning
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTPS enabled, starting secure gateway",
|
|
||||||
zap.String("domain", gwCfg.DomainName),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Determine HTTPS and HTTP ports
|
|
||||||
httpsPort := 443
|
|
||||||
httpPort := 80
|
|
||||||
|
|
||||||
// Start HTTP server for ACME challenges and redirects
|
|
||||||
// certManager.HTTPHandler() must be the main handler, with a fallback for other requests
|
|
||||||
httpServer := &http.Server{
|
|
||||||
Addr: fmt.Sprintf(":%d", httpPort),
|
|
||||||
Handler: certManager.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// Fallback for non-ACME requests: redirect to HTTPS
|
|
||||||
target := fmt.Sprintf("https://%s%s", r.Host, r.URL.RequestURI())
|
|
||||||
http.Redirect(w, r, target, http.StatusMovedPermanently)
|
|
||||||
})),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create HTTP listener first to ensure port 80 is bound before signaling ready
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Binding HTTP listener for ACME challenges",
|
|
||||||
zap.Int("port", httpPort),
|
|
||||||
)
|
|
||||||
httpListener, err := net.Listen("tcp", fmt.Sprintf(":%d", httpPort))
|
|
||||||
if err != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "failed to bind HTTP listener for ACME", zap.Error(err))
|
|
||||||
close(httpReady) // Signal even on failure so SNI goroutine doesn't hang
|
|
||||||
return
|
|
||||||
}
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTP server ready for ACME challenges",
|
|
||||||
zap.Int("port", httpPort),
|
|
||||||
zap.String("tls_cache_dir", tlsCacheDir),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Start HTTP server in background for ACME challenges
|
|
||||||
go func() {
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTP server serving ACME challenges",
|
|
||||||
zap.String("addr", httpServer.Addr),
|
|
||||||
)
|
|
||||||
if err := httpServer.Serve(httpListener); err != nil && err != http.ErrServerClosed {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "HTTP server error", zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Pre-provision the certificate BEFORE starting HTTPS server
|
|
||||||
// This ensures we don't accept HTTPS connections without a valid certificate
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Pre-provisioning TLS certificate...",
|
|
||||||
zap.String("domain", gwCfg.DomainName),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Use a timeout context for certificate provisioning
|
|
||||||
// If Let's Encrypt is rate-limited or unreachable, don't block forever
|
|
||||||
certCtx, certCancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
||||||
defer certCancel()
|
|
||||||
|
|
||||||
certReq := &tls.ClientHelloInfo{
|
|
||||||
ServerName: gwCfg.DomainName,
|
|
||||||
}
|
|
||||||
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Initiating certificate request to Let's Encrypt",
|
|
||||||
zap.String("domain", gwCfg.DomainName),
|
|
||||||
zap.String("acme_environment", "staging"),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Try to get certificate with timeout
|
|
||||||
certProvisionChan := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "GetCertificate goroutine started")
|
|
||||||
_, err := certManager.GetCertificate(certReq)
|
|
||||||
if err != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "GetCertificate returned error",
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "GetCertificate succeeded")
|
|
||||||
}
|
|
||||||
certProvisionChan <- err
|
|
||||||
}()
|
|
||||||
|
|
||||||
var certErr error
|
|
||||||
select {
|
|
||||||
case err := <-certProvisionChan:
|
|
||||||
certErr = err
|
|
||||||
if certErr != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Certificate provisioning failed",
|
|
||||||
zap.String("domain", gwCfg.DomainName),
|
|
||||||
zap.Error(certErr),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
case <-certCtx.Done():
|
|
||||||
certErr = fmt.Errorf("certificate provisioning timeout (Let's Encrypt may be rate-limited or unreachable)")
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Certificate provisioning timeout",
|
|
||||||
zap.String("domain", gwCfg.DomainName),
|
|
||||||
zap.Duration("timeout", 30*time.Second),
|
|
||||||
zap.Error(certErr),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if certErr != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to provision TLS certificate - HTTPS disabled",
|
|
||||||
zap.String("domain", gwCfg.DomainName),
|
|
||||||
zap.Error(certErr),
|
|
||||||
zap.String("http_server_status", "running on port 80 for HTTP fallback"),
|
|
||||||
)
|
|
||||||
// Signal ready for SNI goroutine (even though we're failing)
|
|
||||||
close(httpReady)
|
|
||||||
|
|
||||||
// HTTP server on port 80 is already running, but it's configured to redirect to HTTPS
|
|
||||||
// Replace its handler to serve the gateway directly instead of redirecting
|
|
||||||
httpServer.Handler = apiGateway.Routes()
|
|
||||||
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTP gateway available on port 80 only",
|
|
||||||
zap.String("port", "80"),
|
|
||||||
)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "TLS certificate provisioned successfully",
|
|
||||||
zap.String("domain", gwCfg.DomainName),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Signal that HTTP server is ready for ACME challenges
|
|
||||||
close(httpReady)
|
|
||||||
|
|
||||||
tlsConfig := &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
GetCertificate: certManager.GetCertificate,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start HTTPS server
|
|
||||||
httpsServer := &http.Server{
|
|
||||||
Addr: fmt.Sprintf(":%d", httpsPort),
|
|
||||||
TLSConfig: tlsConfig,
|
|
||||||
Handler: apiGateway.Routes(),
|
|
||||||
}
|
|
||||||
|
|
||||||
n.apiGatewayServer = httpsServer
|
|
||||||
|
|
||||||
listener, err := tls.Listen("tcp", fmt.Sprintf(":%d", httpsPort), tlsConfig)
|
|
||||||
if err != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "failed to create TLS listener", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTPS gateway listener bound",
|
|
||||||
zap.String("domain", gwCfg.DomainName),
|
|
||||||
zap.Int("port", httpsPort),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Serve HTTPS
|
|
||||||
if err := httpsServer.Serve(listener); err != nil && err != http.ErrServerClosed {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "HTTPS Gateway error", zap.Error(err))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// No HTTPS - signal ready immediately (no ACME needed)
|
|
||||||
close(httpReady)
|
|
||||||
|
|
||||||
// Start plain HTTP server
|
|
||||||
server := &http.Server{
|
|
||||||
Addr: gwCfg.ListenAddr,
|
|
||||||
Handler: apiGateway.Routes(),
|
|
||||||
}
|
|
||||||
|
|
||||||
n.apiGatewayServer = server
|
|
||||||
|
|
||||||
// Try to bind listener
|
|
||||||
ln, err := net.Listen("tcp", gwCfg.ListenAddr)
|
|
||||||
if err != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "failed to bind API gateway listener", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "API gateway listener bound", zap.String("listen_addr", ln.Addr().String()))
|
|
||||||
|
|
||||||
// Serve HTTP
|
|
||||||
if err := server.Serve(ln); err != nil && err != http.ErrServerClosed {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "API Gateway error", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Initialize and start SNI gateway if HTTPS is enabled and SNI is configured
|
|
||||||
// This runs in a separate goroutine that waits for HTTP server to be ready
|
|
||||||
if n.config.HTTPGateway.SNI.Enabled && n.certManager != nil {
|
|
||||||
go func() {
|
|
||||||
// Wait for HTTP server to be ready for ACME challenges
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Waiting for HTTP server before SNI initialization...")
|
|
||||||
<-httpReady
|
|
||||||
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Initializing SNI gateway",
|
|
||||||
zap.String("listen_addr", n.config.HTTPGateway.SNI.ListenAddr),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Provision the certificate from Let's Encrypt cache
|
|
||||||
// This ensures the certificate file is downloaded and cached
|
|
||||||
domain := n.config.HTTPGateway.HTTPS.Domain
|
|
||||||
if domain != "" {
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Provisioning certificate for SNI",
|
|
||||||
zap.String("domain", domain))
|
|
||||||
|
|
||||||
certReq := &tls.ClientHelloInfo{
|
|
||||||
ServerName: domain,
|
|
||||||
}
|
|
||||||
if tlsCert, err := n.certManager.GetCertificate(certReq); err != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to provision certificate for SNI",
|
|
||||||
zap.String("domain", domain), zap.Error(err))
|
|
||||||
return // Can't start SNI without certificate
|
|
||||||
} else {
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Certificate provisioned for SNI",
|
|
||||||
zap.String("domain", domain))
|
|
||||||
|
|
||||||
// Extract certificate to PEM files for SNI gateway
|
|
||||||
// SNI gateway needs standard PEM cert files, not autocert cache format
|
|
||||||
tlsCacheDir := n.config.HTTPGateway.HTTPS.CacheDir
|
|
||||||
if tlsCacheDir == "" {
|
|
||||||
tlsCacheDir = "/home/debros/.orama/tls-cache"
|
|
||||||
}
|
|
||||||
|
|
||||||
certPath := filepath.Join(tlsCacheDir, domain+".crt")
|
|
||||||
keyPath := filepath.Join(tlsCacheDir, domain+".key")
|
|
||||||
|
|
||||||
if err := extractPEMFromTLSCert(tlsCert, certPath, keyPath); err != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to extract PEM from TLS cert for SNI",
|
|
||||||
zap.Error(err))
|
|
||||||
return // Can't start SNI without PEM files
|
|
||||||
}
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "PEM certificates extracted for SNI",
|
|
||||||
zap.String("cert_path", certPath), zap.String("key_path", keyPath))
|
|
||||||
|
|
||||||
// Signal that certificates are ready for RQLite node-to-node TLS
|
|
||||||
if n.certReady != nil {
|
|
||||||
close(n.certReady)
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Certificate ready signal sent for RQLite node TLS")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "No domain configured for SNI certificate")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create SNI config with certificate files
|
|
||||||
sniCfg := n.config.HTTPGateway.SNI
|
|
||||||
|
|
||||||
// Use the same gateway logger for SNI gateway (writes to gateway.log)
|
|
||||||
sniGateway, err := gateway.NewTCPSNIGateway(gatewayLogger, &sniCfg)
|
|
||||||
if err != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to initialize SNI gateway", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
n.sniGateway = sniGateway
|
|
||||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "SNI gateway initialized, starting...")
|
|
||||||
|
|
||||||
// Start SNI gateway (this blocks until shutdown)
|
|
||||||
if err := n.sniGateway.Start(ctx); err != nil {
|
|
||||||
gatewayLogger.ComponentError(logging.ComponentGateway, "SNI Gateway error", zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractPEMFromTLSCert extracts certificate and private key from tls.Certificate to PEM files
|
|
||||||
func extractPEMFromTLSCert(tlsCert *tls.Certificate, certPath, keyPath string) error {
|
|
||||||
if tlsCert == nil || len(tlsCert.Certificate) == 0 {
|
|
||||||
return fmt.Errorf("invalid tls certificate")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write certificate chain to PEM file
|
|
||||||
certFile, err := os.Create(certPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create cert file: %w", err)
|
|
||||||
}
|
|
||||||
defer certFile.Close()
|
|
||||||
|
|
||||||
// Write all certificates in the chain
|
|
||||||
for _, certBytes := range tlsCert.Certificate {
|
|
||||||
if err := pem.Encode(certFile, &pem.Block{
|
|
||||||
Type: "CERTIFICATE",
|
|
||||||
Bytes: certBytes,
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("failed to encode certificate: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write private key to PEM file
|
|
||||||
if tlsCert.PrivateKey == nil {
|
|
||||||
return fmt.Errorf("private key is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
keyFile, err := os.Create(keyPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create key file: %w", err)
|
|
||||||
}
|
|
||||||
defer keyFile.Close()
|
|
||||||
|
|
||||||
// Handle different key types
|
|
||||||
var keyBytes []byte
|
|
||||||
switch key := tlsCert.PrivateKey.(type) {
|
|
||||||
case *x509.Certificate:
|
|
||||||
keyBytes, err = x509.MarshalPKCS8PrivateKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal private key: %w", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// Try to marshal as PKCS8
|
|
||||||
keyBytes, err = x509.MarshalPKCS8PrivateKey(tlsCert.PrivateKey)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal private key: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pem.Encode(keyFile, &pem.Block{
|
|
||||||
Type: "PRIVATE KEY",
|
|
||||||
Bytes: keyBytes,
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("failed to encode private key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set proper permissions
|
|
||||||
os.Chmod(certPath, 0644)
|
|
||||||
os.Chmod(keyPath, 0600)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Starts the network node
|
// Starts the network node
|
||||||
func (n *Node) Start(ctx context.Context) error {
|
func (n *Node) Start(ctx context.Context) error {
|
||||||
n.logger.Info("Starting network node", zap.String("data_dir", n.config.Node.DataDir))
|
n.logger.Info("Starting network node", zap.String("data_dir", n.config.Node.DataDir))
|
||||||
@ -1188,12 +630,6 @@ func (n *Node) Start(ctx context.Context) error {
|
|||||||
return fmt.Errorf("failed to create data directory: %w", err)
|
return fmt.Errorf("failed to create data directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start HTTP Gateway first (doesn't depend on other services)
|
|
||||||
if err := n.startHTTPGateway(ctx); err != nil {
|
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to start HTTP Gateway", zap.Error(err))
|
|
||||||
// Don't fail node startup if gateway fails
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start LibP2P host first (needed for cluster discovery)
|
// Start LibP2P host first (needed for cluster discovery)
|
||||||
if err := n.startLibP2P(); err != nil {
|
if err := n.startLibP2P(); err != nil {
|
||||||
return fmt.Errorf("failed to start LibP2P: %w", err)
|
return fmt.Errorf("failed to start LibP2P: %w", err)
|
||||||
@ -1250,14 +686,16 @@ func (n *Node) startIPFSClusterConfig() error {
|
|||||||
return fmt.Errorf("failed to ensure cluster config: %w", err)
|
return fmt.Errorf("failed to ensure cluster config: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to repair peer configuration automatically
|
// If this is not the bootstrap node, try to update bootstrap peer info
|
||||||
// This will be retried periodically if peer is not available yet
|
if n.config.Node.Type != "bootstrap" && len(n.config.Discovery.BootstrapPeers) > 0 {
|
||||||
if success, err := cm.RepairPeerConfiguration(); err != nil {
|
// Try to find bootstrap cluster API URL from config
|
||||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair peer configuration, will retry later", zap.Error(err))
|
// For now, we'll discover it from the first bootstrap peer
|
||||||
} else if success {
|
// In a real scenario, you might want to configure this explicitly
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "Peer configuration repaired successfully")
|
bootstrapClusterAPI := "http://localhost:9094" // Default bootstrap cluster API
|
||||||
} else {
|
if err := cm.UpdateBootstrapPeers(bootstrapClusterAPI); err != nil {
|
||||||
n.logger.ComponentDebug(logging.ComponentNode, "Peer not available yet, will retry periodically")
|
n.logger.ComponentWarn(logging.ComponentNode, "Failed to update bootstrap peers, will retry later", zap.Error(err))
|
||||||
|
// Don't fail - peers can connect later via mDNS or manual config
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n.logger.ComponentInfo(logging.ComponentNode, "IPFS Cluster configuration initialized")
|
n.logger.ComponentInfo(logging.ComponentNode, "IPFS Cluster configuration initialized")
|
||||||
|
|||||||
@ -140,7 +140,7 @@ func TestLoadOrCreateIdentity(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHasPeerConnections(t *testing.T) {
|
func TestHashBootstrapConnections(t *testing.T) {
|
||||||
cfg := &config.Config{}
|
cfg := &config.Config{}
|
||||||
|
|
||||||
n, err := NewNode(cfg)
|
n, err := NewNode(cfg)
|
||||||
@ -148,8 +148,8 @@ func TestHasPeerConnections(t *testing.T) {
|
|||||||
t.Fatalf("NewNode() error: %v", err)
|
t.Fatalf("NewNode() error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert: Does not have peer connections
|
// Assert: Does not have bootstrap connections
|
||||||
conns := n.hasPeerConnections()
|
conns := n.hasBootstrapConnections()
|
||||||
if conns != false {
|
if conns != false {
|
||||||
t.Fatalf("expected false, got %v", conns)
|
t.Fatalf("expected false, got %v", conns)
|
||||||
}
|
}
|
||||||
@ -162,13 +162,13 @@ func TestHasPeerConnections(t *testing.T) {
|
|||||||
defer h.Close()
|
defer h.Close()
|
||||||
|
|
||||||
n.host = h
|
n.host = h
|
||||||
conns = n.hasPeerConnections()
|
conns = n.hasBootstrapConnections()
|
||||||
if conns != false {
|
if conns != false {
|
||||||
t.Fatalf("expected false, got %v", conns)
|
t.Fatalf("expected false, got %v", conns)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert: Return true if connected to at least one peer
|
// Assert: Return true if connected to at least one bootstrap peer
|
||||||
t.Run("returns true when connected to at least one configured peer", func(t *testing.T) {
|
t.Run("returns true when connected to at least one configured bootstrap peer", func(t *testing.T) {
|
||||||
// Fresh node and config
|
// Fresh node and config
|
||||||
cfg := &config.Config{}
|
cfg := &config.Config{}
|
||||||
n2, err := NewNode(cfg)
|
n2, err := NewNode(cfg)
|
||||||
@ -189,7 +189,7 @@ func TestHasPeerConnections(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer hB.Close()
|
defer hB.Close()
|
||||||
|
|
||||||
// Build B's peer multiaddr: <one-of-B.Addrs>/p2p/<B.ID>
|
// Build B's bootstrap multiaddr: <one-of-B.Addrs>/p2p/<B.ID>
|
||||||
var base multiaddr.Multiaddr
|
var base multiaddr.Multiaddr
|
||||||
for _, a := range hB.Addrs() {
|
for _, a := range hB.Addrs() {
|
||||||
if strings.Contains(a.String(), "/tcp/") {
|
if strings.Contains(a.String(), "/tcp/") {
|
||||||
@ -204,11 +204,11 @@ func TestHasPeerConnections(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("NewMultiaddr(/p2p/<id>): %v", err)
|
t.Fatalf("NewMultiaddr(/p2p/<id>): %v", err)
|
||||||
}
|
}
|
||||||
peerAddr := base.Encapsulate(pidMA).String()
|
bootstrap := base.Encapsulate(pidMA).String()
|
||||||
|
|
||||||
// Configure node A with B as a peer
|
// Configure node A with B as a bootstrap peer
|
||||||
n2.host = hA
|
n2.host = hA
|
||||||
n2.config.Discovery.BootstrapPeers = []string{peerAddr}
|
n2.config.Discovery.BootstrapPeers = []string{bootstrap}
|
||||||
|
|
||||||
// Connect A -> B
|
// Connect A -> B
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||||
@ -229,13 +229,13 @@ func TestHasPeerConnections(t *testing.T) {
|
|||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert: hasPeerConnections returns true
|
// Assert: hasBootstrapConnections returns true
|
||||||
if !n2.hasPeerConnections() {
|
if !n2.hasBootstrapConnections() {
|
||||||
t.Fatalf("expected hasPeerConnections() to be true")
|
t.Fatalf("expected hasBootstrapConnections() to be true")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("returns false when connected peers are not in the peer list", func(t *testing.T) {
|
t.Run("returns false when connected peers are not in the bootstrap list", func(t *testing.T) {
|
||||||
// Fresh node and config
|
// Fresh node and config
|
||||||
cfg := &config.Config{}
|
cfg := &config.Config{}
|
||||||
n2, err := NewNode(cfg)
|
n2, err := NewNode(cfg)
|
||||||
@ -262,7 +262,7 @@ func TestHasPeerConnections(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer hC.Close()
|
defer hC.Close()
|
||||||
|
|
||||||
// Build C's peer multiaddr: <one-of-C.Addrs>/p2p/<C.ID>
|
// Build C's bootstrap multiaddr: <one-of-C.Addrs>/p2p/<C.ID>
|
||||||
var baseC multiaddr.Multiaddr
|
var baseC multiaddr.Multiaddr
|
||||||
for _, a := range hC.Addrs() {
|
for _, a := range hC.Addrs() {
|
||||||
if strings.Contains(a.String(), "/tcp/") {
|
if strings.Contains(a.String(), "/tcp/") {
|
||||||
@ -277,13 +277,13 @@ func TestHasPeerConnections(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("NewMultiaddr(/p2p/<id>): %v", err)
|
t.Fatalf("NewMultiaddr(/p2p/<id>): %v", err)
|
||||||
}
|
}
|
||||||
peerC := baseC.Encapsulate(pidC).String()
|
bootstrapC := baseC.Encapsulate(pidC).String()
|
||||||
|
|
||||||
// Configure node A with ONLY C as a peer
|
// Configure node A with ONLY C as a bootstrap peer
|
||||||
n2.host = hA
|
n2.host = hA
|
||||||
n2.config.Discovery.BootstrapPeers = []string{peerC}
|
n2.config.Discovery.BootstrapPeers = []string{bootstrapC}
|
||||||
|
|
||||||
// Connect A -> B (but C is in the peer list, not B)
|
// Connect A -> B (but C is in the bootstrap list, not B)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := hA.Connect(ctx, peer.AddrInfo{ID: hB.ID(), Addrs: hB.Addrs()}); err != nil {
|
if err := hA.Connect(ctx, peer.AddrInfo{ID: hB.ID(), Addrs: hB.Addrs()}); err != nil {
|
||||||
@ -302,9 +302,9 @@ func TestHasPeerConnections(t *testing.T) {
|
|||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert: hasPeerConnections should be false (connected peer is not in peer list)
|
// Assert: hasBootstrapConnections should be false (connected peer is not in bootstrap list)
|
||||||
if n2.hasPeerConnections() {
|
if n2.hasBootstrapConnections() {
|
||||||
t.Fatalf("expected hasPeerConnections() to be false")
|
t.Fatalf("expected hasBootstrapConnections() to be false")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@ -4,8 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"net/netip"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@ -15,7 +13,6 @@ import (
|
|||||||
"github.com/DeBrosOfficial/network/pkg/discovery"
|
"github.com/DeBrosOfficial/network/pkg/discovery"
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/multiformats/go-multiaddr"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -29,7 +26,6 @@ type ClusterDiscoveryService struct {
|
|||||||
raftAddress string
|
raftAddress string
|
||||||
httpAddress string
|
httpAddress string
|
||||||
dataDir string
|
dataDir string
|
||||||
minClusterSize int // Minimum cluster size required
|
|
||||||
|
|
||||||
knownPeers map[string]*discovery.RQLiteNodeMetadata // NodeID -> Metadata
|
knownPeers map[string]*discovery.RQLiteNodeMetadata // NodeID -> Metadata
|
||||||
peerHealth map[string]*PeerHealth // NodeID -> Health
|
peerHealth map[string]*PeerHealth // NodeID -> Health
|
||||||
@ -55,11 +51,6 @@ func NewClusterDiscoveryService(
|
|||||||
dataDir string,
|
dataDir string,
|
||||||
logger *zap.Logger,
|
logger *zap.Logger,
|
||||||
) *ClusterDiscoveryService {
|
) *ClusterDiscoveryService {
|
||||||
minClusterSize := 1
|
|
||||||
if rqliteManager != nil && rqliteManager.config != nil {
|
|
||||||
minClusterSize = rqliteManager.config.MinClusterSize
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ClusterDiscoveryService{
|
return &ClusterDiscoveryService{
|
||||||
host: h,
|
host: h,
|
||||||
discoveryMgr: discoveryMgr,
|
discoveryMgr: discoveryMgr,
|
||||||
@ -69,7 +60,6 @@ func NewClusterDiscoveryService(
|
|||||||
raftAddress: raftAddress,
|
raftAddress: raftAddress,
|
||||||
httpAddress: httpAddress,
|
httpAddress: httpAddress,
|
||||||
dataDir: dataDir,
|
dataDir: dataDir,
|
||||||
minClusterSize: minClusterSize,
|
|
||||||
knownPeers: make(map[string]*discovery.RQLiteNodeMetadata),
|
knownPeers: make(map[string]*discovery.RQLiteNodeMetadata),
|
||||||
peerHealth: make(map[string]*PeerHealth),
|
peerHealth: make(map[string]*PeerHealth),
|
||||||
updateInterval: 30 * time.Second,
|
updateInterval: 30 * time.Second,
|
||||||
@ -166,34 +156,21 @@ func (c *ClusterDiscoveryService) collectPeerMetadata() []*discovery.RQLiteNodeM
|
|||||||
connectedPeers := c.host.Network().Peers()
|
connectedPeers := c.host.Network().Peers()
|
||||||
var metadata []*discovery.RQLiteNodeMetadata
|
var metadata []*discovery.RQLiteNodeMetadata
|
||||||
|
|
||||||
// Metadata collection is routine - no need to log every occurrence
|
c.logger.Debug("Collecting peer metadata from LibP2P",
|
||||||
|
zap.Int("connected_libp2p_peers", len(connectedPeers)))
|
||||||
c.mu.RLock()
|
|
||||||
currentRaftAddr := c.raftAddress
|
|
||||||
currentHTTPAddr := c.httpAddress
|
|
||||||
c.mu.RUnlock()
|
|
||||||
|
|
||||||
// Add ourselves
|
// Add ourselves
|
||||||
ourMetadata := &discovery.RQLiteNodeMetadata{
|
ourMetadata := &discovery.RQLiteNodeMetadata{
|
||||||
NodeID: currentRaftAddr, // RQLite uses raft address as node ID
|
NodeID: c.raftAddress, // RQLite uses raft address as node ID
|
||||||
RaftAddress: currentRaftAddr,
|
RaftAddress: c.raftAddress,
|
||||||
HTTPAddress: currentHTTPAddr,
|
HTTPAddress: c.httpAddress,
|
||||||
NodeType: c.nodeType,
|
NodeType: c.nodeType,
|
||||||
RaftLogIndex: c.rqliteManager.getRaftLogIndex(),
|
RaftLogIndex: c.rqliteManager.getRaftLogIndex(),
|
||||||
LastSeen: time.Now(),
|
LastSeen: time.Now(),
|
||||||
ClusterVersion: "1.0",
|
ClusterVersion: "1.0",
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.adjustSelfAdvertisedAddresses(ourMetadata) {
|
|
||||||
c.logger.Debug("Adjusted self-advertised RQLite addresses",
|
|
||||||
zap.String("raft_address", ourMetadata.RaftAddress),
|
|
||||||
zap.String("http_address", ourMetadata.HTTPAddress))
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata = append(metadata, ourMetadata)
|
metadata = append(metadata, ourMetadata)
|
||||||
|
|
||||||
staleNodeIDs := make([]string, 0)
|
|
||||||
|
|
||||||
// Query connected peers for their RQLite metadata
|
// Query connected peers for their RQLite metadata
|
||||||
// For now, we'll use a simple approach - store metadata in peer metadata store
|
// For now, we'll use a simple approach - store metadata in peer metadata store
|
||||||
// In a full implementation, this would use a custom protocol to exchange RQLite metadata
|
// In a full implementation, this would use a custom protocol to exchange RQLite metadata
|
||||||
@ -204,9 +181,6 @@ func (c *ClusterDiscoveryService) collectPeerMetadata() []*discovery.RQLiteNodeM
|
|||||||
if jsonData, ok := val.([]byte); ok {
|
if jsonData, ok := val.([]byte); ok {
|
||||||
var peerMeta discovery.RQLiteNodeMetadata
|
var peerMeta discovery.RQLiteNodeMetadata
|
||||||
if err := json.Unmarshal(jsonData, &peerMeta); err == nil {
|
if err := json.Unmarshal(jsonData, &peerMeta); err == nil {
|
||||||
if updated, stale := c.adjustPeerAdvertisedAddresses(peerID, &peerMeta); updated && stale != "" {
|
|
||||||
staleNodeIDs = append(staleNodeIDs, stale)
|
|
||||||
}
|
|
||||||
peerMeta.LastSeen = time.Now()
|
peerMeta.LastSeen = time.Now()
|
||||||
metadata = append(metadata, &peerMeta)
|
metadata = append(metadata, &peerMeta)
|
||||||
}
|
}
|
||||||
@ -214,16 +188,6 @@ func (c *ClusterDiscoveryService) collectPeerMetadata() []*discovery.RQLiteNodeM
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up stale entries if NodeID changed
|
|
||||||
if len(staleNodeIDs) > 0 {
|
|
||||||
c.mu.Lock()
|
|
||||||
for _, id := range staleNodeIDs {
|
|
||||||
delete(c.knownPeers, id)
|
|
||||||
delete(c.peerHealth, id)
|
|
||||||
}
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
return metadata
|
return metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,6 +203,9 @@ type membershipUpdateResult struct {
|
|||||||
func (c *ClusterDiscoveryService) updateClusterMembership() {
|
func (c *ClusterDiscoveryService) updateClusterMembership() {
|
||||||
metadata := c.collectPeerMetadata()
|
metadata := c.collectPeerMetadata()
|
||||||
|
|
||||||
|
c.logger.Debug("Collected peer metadata",
|
||||||
|
zap.Int("metadata_count", len(metadata)))
|
||||||
|
|
||||||
// Compute membership changes while holding lock
|
// Compute membership changes while holding lock
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
result := c.computeMembershipChangesLocked(metadata)
|
result := c.computeMembershipChangesLocked(metadata)
|
||||||
@ -248,30 +215,35 @@ func (c *ClusterDiscoveryService) updateClusterMembership() {
|
|||||||
if result.changed {
|
if result.changed {
|
||||||
// Log state changes (peer added/removed) at Info level
|
// Log state changes (peer added/removed) at Info level
|
||||||
if len(result.added) > 0 || len(result.updated) > 0 {
|
if len(result.added) > 0 || len(result.updated) > 0 {
|
||||||
c.logger.Info("Membership changed",
|
c.logger.Info("Cluster membership changed",
|
||||||
zap.Int("added", len(result.added)),
|
zap.Int("added", len(result.added)),
|
||||||
zap.Int("updated", len(result.updated)),
|
zap.Int("updated", len(result.updated)),
|
||||||
zap.Strings("added", result.added),
|
zap.Strings("added_ids", result.added),
|
||||||
zap.Strings("updated", result.updated))
|
zap.Strings("updated_ids", result.updated))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write peers.json without holding lock
|
// Write peers.json without holding lock
|
||||||
if err := c.writePeersJSONWithData(result.peersJSON); err != nil {
|
if err := c.writePeersJSONWithData(result.peersJSON); err != nil {
|
||||||
c.logger.Error("Failed to write peers.json",
|
c.logger.Error("CRITICAL: Failed to write peers.json",
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
zap.String("data_dir", c.dataDir),
|
zap.String("data_dir", c.dataDir),
|
||||||
zap.Int("peers", len(result.peersJSON)))
|
zap.Int("peer_count", len(result.peersJSON)))
|
||||||
} else {
|
} else {
|
||||||
c.logger.Debug("peers.json updated",
|
c.logger.Debug("peers.json updated",
|
||||||
zap.Int("peers", len(result.peersJSON)))
|
zap.Int("peer_count", len(result.peersJSON)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update lastUpdate timestamp
|
// Update lastUpdate timestamp
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
c.lastUpdate = time.Now()
|
c.lastUpdate = time.Now()
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
|
} else {
|
||||||
|
c.mu.RLock()
|
||||||
|
totalPeers := len(c.knownPeers)
|
||||||
|
c.mu.RUnlock()
|
||||||
|
c.logger.Debug("No changes to cluster membership",
|
||||||
|
zap.Int("total_peers", totalPeers))
|
||||||
}
|
}
|
||||||
// No changes - don't log (reduces noise)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// computeMembershipChangesLocked computes membership changes and returns snapshot data
|
// computeMembershipChangesLocked computes membership changes and returns snapshot data
|
||||||
@ -296,10 +268,10 @@ func (c *ClusterDiscoveryService) computeMembershipChangesLocked(metadata []*dis
|
|||||||
} else {
|
} else {
|
||||||
// New peer discovered
|
// New peer discovered
|
||||||
added = append(added, meta.NodeID)
|
added = append(added, meta.NodeID)
|
||||||
c.logger.Info("Node added",
|
c.logger.Info("Node added to cluster",
|
||||||
zap.String("node", meta.NodeID),
|
zap.String("node_id", meta.NodeID),
|
||||||
zap.String("raft", meta.RaftAddress),
|
zap.String("raft_address", meta.RaftAddress),
|
||||||
zap.String("type", meta.NodeType),
|
zap.String("node_type", meta.NodeType),
|
||||||
zap.Uint64("log_index", meta.RaftLogIndex))
|
zap.Uint64("log_index", meta.RaftLogIndex))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,56 +293,18 @@ func (c *ClusterDiscoveryService) computeMembershipChangesLocked(metadata []*dis
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CRITICAL FIX: Count remote peers (excluding self)
|
|
||||||
remotePeerCount := 0
|
|
||||||
for _, peer := range c.knownPeers {
|
|
||||||
if peer.NodeID != c.raftAddress {
|
|
||||||
remotePeerCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get peers JSON snapshot (for checking if it would be empty)
|
|
||||||
peers := c.getPeersJSONUnlocked()
|
|
||||||
|
|
||||||
// Determine if we should write peers.json
|
// Determine if we should write peers.json
|
||||||
shouldWrite := len(added) > 0 || len(updated) > 0 || c.lastUpdate.IsZero()
|
shouldWrite := len(added) > 0 || len(updated) > 0 || c.lastUpdate.IsZero()
|
||||||
|
|
||||||
// CRITICAL FIX: Don't write peers.json until we have minimum cluster size
|
|
||||||
// This prevents RQLite from starting as a single-node cluster
|
|
||||||
// For min_cluster_size=3, we need at least 2 remote peers (plus self = 3 total)
|
|
||||||
if shouldWrite {
|
if shouldWrite {
|
||||||
// For initial sync, wait until we have at least (MinClusterSize - 1) remote peers
|
|
||||||
// This ensures peers.json contains enough peers for proper cluster formation
|
|
||||||
if c.lastUpdate.IsZero() {
|
|
||||||
requiredRemotePeers := c.minClusterSize - 1
|
|
||||||
|
|
||||||
if remotePeerCount < requiredRemotePeers {
|
|
||||||
c.logger.Info("Waiting for peers",
|
|
||||||
zap.Int("have", remotePeerCount),
|
|
||||||
zap.Int("need", requiredRemotePeers),
|
|
||||||
zap.Int("min_size", c.minClusterSize))
|
|
||||||
return membershipUpdateResult{
|
|
||||||
changed: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Additional safety check: don't write empty peers.json (would cause single-node cluster)
|
|
||||||
if len(peers) == 0 && c.lastUpdate.IsZero() {
|
|
||||||
c.logger.Info("No remote peers - waiting")
|
|
||||||
return membershipUpdateResult{
|
|
||||||
changed: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log initial sync if this is the first time
|
// Log initial sync if this is the first time
|
||||||
if c.lastUpdate.IsZero() {
|
if c.lastUpdate.IsZero() {
|
||||||
c.logger.Info("Initial sync",
|
c.logger.Info("Initial cluster membership sync",
|
||||||
zap.Int("total", len(c.knownPeers)),
|
zap.Int("total_peers", len(c.knownPeers)))
|
||||||
zap.Int("remote", remotePeerCount),
|
|
||||||
zap.Int("in_json", len(peers)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get peers JSON snapshot
|
||||||
|
peers := c.getPeersJSONUnlocked()
|
||||||
return membershipUpdateResult{
|
return membershipUpdateResult{
|
||||||
peersJSON: peers,
|
peersJSON: peers,
|
||||||
added: added,
|
added: added,
|
||||||
@ -397,8 +331,8 @@ func (c *ClusterDiscoveryService) removeInactivePeers() {
|
|||||||
|
|
||||||
if inactiveDuration > c.inactivityLimit {
|
if inactiveDuration > c.inactivityLimit {
|
||||||
// Mark as inactive and remove
|
// Mark as inactive and remove
|
||||||
c.logger.Warn("Node removed",
|
c.logger.Warn("Node removed from cluster",
|
||||||
zap.String("node", nodeID),
|
zap.String("node_id", nodeID),
|
||||||
zap.String("reason", "inactive"),
|
zap.String("reason", "inactive"),
|
||||||
zap.Duration("inactive_duration", inactiveDuration))
|
zap.Duration("inactive_duration", inactiveDuration))
|
||||||
|
|
||||||
@ -410,9 +344,9 @@ func (c *ClusterDiscoveryService) removeInactivePeers() {
|
|||||||
|
|
||||||
// Regenerate peers.json if any peers were removed
|
// Regenerate peers.json if any peers were removed
|
||||||
if len(removed) > 0 {
|
if len(removed) > 0 {
|
||||||
c.logger.Info("Removed inactive",
|
c.logger.Info("Removed inactive nodes, regenerating peers.json",
|
||||||
zap.Int("count", len(removed)),
|
zap.Int("removed", len(removed)),
|
||||||
zap.Strings("nodes", removed))
|
zap.Strings("node_ids", removed))
|
||||||
|
|
||||||
if err := c.writePeersJSON(); err != nil {
|
if err := c.writePeersJSON(); err != nil {
|
||||||
c.logger.Error("Failed to write peers.json after cleanup", zap.Error(err))
|
c.logger.Error("Failed to write peers.json after cleanup", zap.Error(err))
|
||||||
@ -432,11 +366,6 @@ func (c *ClusterDiscoveryService) getPeersJSONUnlocked() []map[string]interface{
|
|||||||
peers := make([]map[string]interface{}, 0, len(c.knownPeers))
|
peers := make([]map[string]interface{}, 0, len(c.knownPeers))
|
||||||
|
|
||||||
for _, peer := range c.knownPeers {
|
for _, peer := range c.knownPeers {
|
||||||
// CRITICAL FIX: Include ALL peers (including self) in peers.json
|
|
||||||
// When using expect configuration with recovery, RQLite needs the complete
|
|
||||||
// expected cluster configuration to properly form consensus.
|
|
||||||
// The peers.json file is used by RQLite's recovery mechanism to know
|
|
||||||
// what the full cluster membership should be, including the local node.
|
|
||||||
peerEntry := map[string]interface{}{
|
peerEntry := map[string]interface{}{
|
||||||
"id": peer.RaftAddress, // RQLite uses raft address as node ID
|
"id": peer.RaftAddress, // RQLite uses raft address as node ID
|
||||||
"address": peer.RaftAddress,
|
"address": peer.RaftAddress,
|
||||||
@ -472,7 +401,11 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
|
|||||||
// Get the RQLite raft directory
|
// Get the RQLite raft directory
|
||||||
rqliteDir := filepath.Join(dataDir, "rqlite", "raft")
|
rqliteDir := filepath.Join(dataDir, "rqlite", "raft")
|
||||||
|
|
||||||
// Writing peers.json - routine operation, no need to log details
|
c.logger.Debug("Writing peers.json",
|
||||||
|
zap.String("data_dir", c.dataDir),
|
||||||
|
zap.String("expanded_path", dataDir),
|
||||||
|
zap.String("raft_dir", rqliteDir),
|
||||||
|
zap.Int("peer_count", len(peers)))
|
||||||
|
|
||||||
if err := os.MkdirAll(rqliteDir, 0755); err != nil {
|
if err := os.MkdirAll(rqliteDir, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create raft directory %s: %w", rqliteDir, err)
|
return fmt.Errorf("failed to create raft directory %s: %w", rqliteDir, err)
|
||||||
@ -483,7 +416,7 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
|
|||||||
|
|
||||||
// Backup existing peers.json if it exists
|
// Backup existing peers.json if it exists
|
||||||
if _, err := os.Stat(peersFile); err == nil {
|
if _, err := os.Stat(peersFile); err == nil {
|
||||||
// Backup existing peers.json if it exists - routine operation
|
c.logger.Debug("Backing up existing peers.json", zap.String("backup_file", backupFile))
|
||||||
data, err := os.ReadFile(peersFile)
|
data, err := os.ReadFile(peersFile)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
_ = os.WriteFile(backupFile, data, 0644)
|
_ = os.WriteFile(backupFile, data, 0644)
|
||||||
@ -496,7 +429,7 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
|
|||||||
return fmt.Errorf("failed to marshal peers.json: %w", err)
|
return fmt.Errorf("failed to marshal peers.json: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marshaled peers.json - routine operation
|
c.logger.Debug("Marshaled peers.json", zap.Int("data_size", len(data)))
|
||||||
|
|
||||||
// Write atomically using temp file + rename
|
// Write atomically using temp file + rename
|
||||||
tempFile := peersFile + ".tmp"
|
tempFile := peersFile + ".tmp"
|
||||||
@ -516,8 +449,9 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
|
|||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Info("peers.json written",
|
c.logger.Info("peers.json written",
|
||||||
zap.Int("peers", len(peers)),
|
zap.String("file", peersFile),
|
||||||
zap.Strings("nodes", nodeIDs))
|
zap.Int("node_count", len(peers)),
|
||||||
|
zap.Strings("node_ids", nodeIDs))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -584,34 +518,25 @@ func (c *ClusterDiscoveryService) HasRecentPeersJSON() bool {
|
|||||||
return time.Since(c.lastUpdate) < 5*time.Minute
|
return time.Since(c.lastUpdate) < 5*time.Minute
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindJoinTargets discovers join targets via LibP2P
|
// FindJoinTargets discovers join targets via LibP2P, prioritizing bootstrap nodes
|
||||||
func (c *ClusterDiscoveryService) FindJoinTargets() []string {
|
func (c *ClusterDiscoveryService) FindJoinTargets() []string {
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
defer c.mu.RUnlock()
|
defer c.mu.RUnlock()
|
||||||
|
|
||||||
targets := []string{}
|
targets := []string{}
|
||||||
|
|
||||||
// All nodes are equal - prioritize by Raft log index (more advanced = better)
|
// Prioritize bootstrap nodes
|
||||||
type nodeWithIndex struct {
|
|
||||||
address string
|
|
||||||
logIndex uint64
|
|
||||||
}
|
|
||||||
var nodes []nodeWithIndex
|
|
||||||
for _, peer := range c.knownPeers {
|
for _, peer := range c.knownPeers {
|
||||||
nodes = append(nodes, nodeWithIndex{peer.RaftAddress, peer.RaftLogIndex})
|
if peer.NodeType == "bootstrap" {
|
||||||
}
|
targets = append(targets, peer.RaftAddress)
|
||||||
|
|
||||||
// Sort by log index descending (higher log index = more up-to-date)
|
|
||||||
for i := 0; i < len(nodes)-1; i++ {
|
|
||||||
for j := i + 1; j < len(nodes); j++ {
|
|
||||||
if nodes[j].logIndex > nodes[i].logIndex {
|
|
||||||
nodes[i], nodes[j] = nodes[j], nodes[i]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range nodes {
|
// Add other nodes as fallback
|
||||||
targets = append(targets, n.address)
|
for _, peer := range c.knownPeers {
|
||||||
|
if peer.NodeType != "bootstrap" {
|
||||||
|
targets = append(targets, peer.RaftAddress)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return targets
|
return targets
|
||||||
@ -642,54 +567,17 @@ func (c *ClusterDiscoveryService) WaitForDiscoverySettling(ctx context.Context)
|
|||||||
|
|
||||||
// TriggerSync manually triggers a cluster membership sync
|
// TriggerSync manually triggers a cluster membership sync
|
||||||
func (c *ClusterDiscoveryService) TriggerSync() {
|
func (c *ClusterDiscoveryService) TriggerSync() {
|
||||||
// All nodes use the same discovery timing for consistency
|
c.logger.Info("Manually triggering cluster membership sync")
|
||||||
|
|
||||||
|
// For bootstrap nodes, wait a bit for peer discovery to stabilize
|
||||||
|
if c.nodeType == "bootstrap" {
|
||||||
|
c.logger.Info("Bootstrap node: waiting for peer discovery to complete")
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
c.updateClusterMembership()
|
c.updateClusterMembership()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForceWritePeersJSON forces writing peers.json regardless of membership changes
|
|
||||||
// This is useful after clearing raft state when we need to recreate peers.json
|
|
||||||
func (c *ClusterDiscoveryService) ForceWritePeersJSON() error {
|
|
||||||
c.logger.Info("Force writing peers.json")
|
|
||||||
|
|
||||||
// First, collect latest peer metadata to ensure we have current information
|
|
||||||
metadata := c.collectPeerMetadata()
|
|
||||||
|
|
||||||
// Update known peers with latest metadata (without writing file yet)
|
|
||||||
c.mu.Lock()
|
|
||||||
for _, meta := range metadata {
|
|
||||||
c.knownPeers[meta.NodeID] = meta
|
|
||||||
// Update health tracking for remote peers
|
|
||||||
if meta.NodeID != c.raftAddress {
|
|
||||||
if _, ok := c.peerHealth[meta.NodeID]; !ok {
|
|
||||||
c.peerHealth[meta.NodeID] = &PeerHealth{
|
|
||||||
LastSeen: time.Now(),
|
|
||||||
LastSuccessful: time.Now(),
|
|
||||||
Status: "active",
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c.peerHealth[meta.NodeID].LastSeen = time.Now()
|
|
||||||
c.peerHealth[meta.NodeID].Status = "active"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
peers := c.getPeersJSONUnlocked()
|
|
||||||
c.mu.Unlock()
|
|
||||||
|
|
||||||
// Now force write the file
|
|
||||||
if err := c.writePeersJSONWithData(peers); err != nil {
|
|
||||||
c.logger.Error("Failed to force write peers.json",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("data_dir", c.dataDir),
|
|
||||||
zap.Int("peers", len(peers)))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.logger.Info("peers.json written",
|
|
||||||
zap.Int("peers", len(peers)))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TriggerPeerExchange actively exchanges peer information with connected peers
|
// TriggerPeerExchange actively exchanges peer information with connected peers
|
||||||
// This populates the peerstore with RQLite metadata from other nodes
|
// This populates the peerstore with RQLite metadata from other nodes
|
||||||
func (c *ClusterDiscoveryService) TriggerPeerExchange(ctx context.Context) error {
|
func (c *ClusterDiscoveryService) TriggerPeerExchange(ctx context.Context) error {
|
||||||
@ -697,36 +585,25 @@ func (c *ClusterDiscoveryService) TriggerPeerExchange(ctx context.Context) error
|
|||||||
return fmt.Errorf("discovery manager not available")
|
return fmt.Errorf("discovery manager not available")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.logger.Info("Triggering peer exchange via discovery manager")
|
||||||
collected := c.discoveryMgr.TriggerPeerExchange(ctx)
|
collected := c.discoveryMgr.TriggerPeerExchange(ctx)
|
||||||
c.logger.Debug("Exchange completed", zap.Int("with_metadata", collected))
|
c.logger.Info("Peer exchange completed", zap.Int("peers_with_metadata", collected))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateOwnMetadata updates our own RQLite metadata in the peerstore
|
// UpdateOwnMetadata updates our own RQLite metadata in the peerstore
|
||||||
func (c *ClusterDiscoveryService) UpdateOwnMetadata() {
|
func (c *ClusterDiscoveryService) UpdateOwnMetadata() {
|
||||||
c.mu.RLock()
|
|
||||||
currentRaftAddr := c.raftAddress
|
|
||||||
currentHTTPAddr := c.httpAddress
|
|
||||||
c.mu.RUnlock()
|
|
||||||
|
|
||||||
metadata := &discovery.RQLiteNodeMetadata{
|
metadata := &discovery.RQLiteNodeMetadata{
|
||||||
NodeID: currentRaftAddr, // RQLite uses raft address as node ID
|
NodeID: c.raftAddress, // RQLite uses raft address as node ID
|
||||||
RaftAddress: currentRaftAddr,
|
RaftAddress: c.raftAddress,
|
||||||
HTTPAddress: currentHTTPAddr,
|
HTTPAddress: c.httpAddress,
|
||||||
NodeType: c.nodeType,
|
NodeType: c.nodeType,
|
||||||
RaftLogIndex: c.rqliteManager.getRaftLogIndex(),
|
RaftLogIndex: c.rqliteManager.getRaftLogIndex(),
|
||||||
LastSeen: time.Now(),
|
LastSeen: time.Now(),
|
||||||
ClusterVersion: "1.0",
|
ClusterVersion: "1.0",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adjust addresses if needed
|
|
||||||
if c.adjustSelfAdvertisedAddresses(metadata) {
|
|
||||||
c.logger.Debug("Adjusted self-advertised RQLite addresses in UpdateOwnMetadata",
|
|
||||||
zap.String("raft_address", metadata.RaftAddress),
|
|
||||||
zap.String("http_address", metadata.HTTPAddress))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store in our own peerstore for peer exchange
|
// Store in our own peerstore for peer exchange
|
||||||
data, err := json.Marshal(metadata)
|
data, err := json.Marshal(metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -739,28 +616,13 @@ func (c *ClusterDiscoveryService) UpdateOwnMetadata() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Debug("Metadata updated",
|
c.logger.Debug("Updated own RQLite metadata",
|
||||||
zap.String("node", metadata.NodeID),
|
zap.String("node_id", metadata.NodeID),
|
||||||
zap.Uint64("log_index", metadata.RaftLogIndex))
|
zap.Uint64("log_index", metadata.RaftLogIndex))
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoreRemotePeerMetadata stores metadata received from a remote peer
|
// StoreRemotePeerMetadata stores metadata received from a remote peer
|
||||||
func (c *ClusterDiscoveryService) StoreRemotePeerMetadata(peerID peer.ID, metadata *discovery.RQLiteNodeMetadata) error {
|
func (c *ClusterDiscoveryService) StoreRemotePeerMetadata(peerID peer.ID, metadata *discovery.RQLiteNodeMetadata) error {
|
||||||
if metadata == nil {
|
|
||||||
return fmt.Errorf("metadata is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adjust addresses if needed (replace localhost with actual IP)
|
|
||||||
if updated, stale := c.adjustPeerAdvertisedAddresses(peerID, metadata); updated && stale != "" {
|
|
||||||
// Clean up stale entry if NodeID changed
|
|
||||||
c.mu.Lock()
|
|
||||||
delete(c.knownPeers, stale)
|
|
||||||
delete(c.peerHealth, stale)
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
metadata.LastSeen = time.Now()
|
|
||||||
|
|
||||||
data, err := json.Marshal(metadata)
|
data, err := json.Marshal(metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to marshal metadata: %w", err)
|
return fmt.Errorf("failed to marshal metadata: %w", err)
|
||||||
@ -770,245 +632,9 @@ func (c *ClusterDiscoveryService) StoreRemotePeerMetadata(peerID peer.ID, metada
|
|||||||
return fmt.Errorf("failed to store metadata: %w", err)
|
return fmt.Errorf("failed to store metadata: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Debug("Metadata stored",
|
c.logger.Debug("Stored remote peer metadata",
|
||||||
zap.String("peer", shortPeerID(peerID)),
|
zap.String("peer_id", peerID.String()[:8]+"..."),
|
||||||
zap.String("node", metadata.NodeID))
|
zap.String("node_id", metadata.NodeID))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// adjustPeerAdvertisedAddresses adjusts peer metadata addresses by replacing localhost/loopback
|
|
||||||
// with the actual IP address from LibP2P connection. Returns (updated, staleNodeID).
|
|
||||||
// staleNodeID is non-empty if NodeID changed (indicating old entry should be cleaned up).
|
|
||||||
func (c *ClusterDiscoveryService) adjustPeerAdvertisedAddresses(peerID peer.ID, meta *discovery.RQLiteNodeMetadata) (bool, string) {
|
|
||||||
ip := c.selectPeerIP(peerID)
|
|
||||||
if ip == "" {
|
|
||||||
return false, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
changed, stale := rewriteAdvertisedAddresses(meta, ip, true)
|
|
||||||
if changed {
|
|
||||||
c.logger.Debug("Addresses normalized",
|
|
||||||
zap.String("peer", shortPeerID(peerID)),
|
|
||||||
zap.String("raft", meta.RaftAddress),
|
|
||||||
zap.String("http_address", meta.HTTPAddress))
|
|
||||||
}
|
|
||||||
return changed, stale
|
|
||||||
}
|
|
||||||
|
|
||||||
// adjustSelfAdvertisedAddresses adjusts our own metadata addresses by replacing localhost/loopback
|
|
||||||
// with the actual IP address from LibP2P host. Updates internal state if changed.
|
|
||||||
func (c *ClusterDiscoveryService) adjustSelfAdvertisedAddresses(meta *discovery.RQLiteNodeMetadata) bool {
|
|
||||||
ip := c.selectSelfIP()
|
|
||||||
if ip == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
changed, _ := rewriteAdvertisedAddresses(meta, ip, true)
|
|
||||||
if !changed {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update internal state with corrected addresses
|
|
||||||
c.mu.Lock()
|
|
||||||
c.raftAddress = meta.RaftAddress
|
|
||||||
c.httpAddress = meta.HTTPAddress
|
|
||||||
c.mu.Unlock()
|
|
||||||
|
|
||||||
if c.rqliteManager != nil {
|
|
||||||
c.rqliteManager.UpdateAdvertisedAddresses(meta.RaftAddress, meta.HTTPAddress)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// selectPeerIP selects the best IP address for a peer from LibP2P connections.
|
|
||||||
// Prefers public IPs, falls back to private IPs if no public IP is available.
|
|
||||||
func (c *ClusterDiscoveryService) selectPeerIP(peerID peer.ID) string {
|
|
||||||
var fallback string
|
|
||||||
|
|
||||||
// First, try to get IP from active connections
|
|
||||||
for _, conn := range c.host.Network().ConnsToPeer(peerID) {
|
|
||||||
if ip, public := ipFromMultiaddr(conn.RemoteMultiaddr()); ip != "" {
|
|
||||||
if shouldReplaceHost(ip) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if public {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
if fallback == "" {
|
|
||||||
fallback = ip
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to peerstore addresses
|
|
||||||
for _, addr := range c.host.Peerstore().Addrs(peerID) {
|
|
||||||
if ip, public := ipFromMultiaddr(addr); ip != "" {
|
|
||||||
if shouldReplaceHost(ip) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if public {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
if fallback == "" {
|
|
||||||
fallback = ip
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
// selectSelfIP selects the best IP address for ourselves from LibP2P host addresses.
|
|
||||||
// Prefers public IPs, falls back to private IPs if no public IP is available.
|
|
||||||
func (c *ClusterDiscoveryService) selectSelfIP() string {
|
|
||||||
var fallback string
|
|
||||||
|
|
||||||
for _, addr := range c.host.Addrs() {
|
|
||||||
if ip, public := ipFromMultiaddr(addr); ip != "" {
|
|
||||||
if shouldReplaceHost(ip) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if public {
|
|
||||||
return ip
|
|
||||||
}
|
|
||||||
if fallback == "" {
|
|
||||||
fallback = ip
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
// rewriteAdvertisedAddresses rewrites RaftAddress and HTTPAddress in metadata,
|
|
||||||
// replacing localhost/loopback addresses with the provided IP.
|
|
||||||
// Returns (changed, staleNodeID). staleNodeID is non-empty if NodeID changed.
|
|
||||||
func rewriteAdvertisedAddresses(meta *discovery.RQLiteNodeMetadata, newHost string, allowNodeIDRewrite bool) (bool, string) {
|
|
||||||
if meta == nil || newHost == "" {
|
|
||||||
return false, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
originalNodeID := meta.NodeID
|
|
||||||
changed := false
|
|
||||||
nodeIDChanged := false
|
|
||||||
|
|
||||||
// Replace host in RaftAddress if it's localhost/loopback
|
|
||||||
if newAddr, replaced := replaceAddressHost(meta.RaftAddress, newHost); replaced {
|
|
||||||
if meta.RaftAddress != newAddr {
|
|
||||||
meta.RaftAddress = newAddr
|
|
||||||
changed = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replace host in HTTPAddress if it's localhost/loopback
|
|
||||||
if newAddr, replaced := replaceAddressHost(meta.HTTPAddress, newHost); replaced {
|
|
||||||
if meta.HTTPAddress != newAddr {
|
|
||||||
meta.HTTPAddress = newAddr
|
|
||||||
changed = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update NodeID to match RaftAddress if it changed
|
|
||||||
if allowNodeIDRewrite {
|
|
||||||
if meta.RaftAddress != "" && (meta.NodeID == "" || meta.NodeID == originalNodeID || shouldReplaceHost(hostFromAddress(meta.NodeID))) {
|
|
||||||
if meta.NodeID != meta.RaftAddress {
|
|
||||||
meta.NodeID = meta.RaftAddress
|
|
||||||
nodeIDChanged = meta.NodeID != originalNodeID
|
|
||||||
if nodeIDChanged {
|
|
||||||
changed = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if nodeIDChanged {
|
|
||||||
return changed, originalNodeID
|
|
||||||
}
|
|
||||||
return changed, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// replaceAddressHost replaces the host part of an address if it's localhost/loopback.
|
|
||||||
// Returns (newAddress, replaced). replaced is true if host was replaced.
|
|
||||||
func replaceAddressHost(address, newHost string) (string, bool) {
|
|
||||||
if address == "" || newHost == "" {
|
|
||||||
return address, false
|
|
||||||
}
|
|
||||||
|
|
||||||
host, port, err := net.SplitHostPort(address)
|
|
||||||
if err != nil {
|
|
||||||
return address, false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !shouldReplaceHost(host) {
|
|
||||||
return address, false
|
|
||||||
}
|
|
||||||
|
|
||||||
return net.JoinHostPort(newHost, port), true
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldReplaceHost returns true if the host should be replaced (localhost, loopback, etc.)
|
|
||||||
func shouldReplaceHost(host string) bool {
|
|
||||||
if host == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if strings.EqualFold(host, "localhost") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's a loopback or unspecified address
|
|
||||||
if addr, err := netip.ParseAddr(host); err == nil {
|
|
||||||
if addr.IsLoopback() || addr.IsUnspecified() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// hostFromAddress extracts the host part from a host:port address
|
|
||||||
func hostFromAddress(address string) string {
|
|
||||||
host, _, err := net.SplitHostPort(address)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return host
|
|
||||||
}
|
|
||||||
|
|
||||||
// ipFromMultiaddr extracts an IP address from a multiaddr and returns (ip, isPublic)
|
|
||||||
func ipFromMultiaddr(addr multiaddr.Multiaddr) (string, bool) {
|
|
||||||
if addr == nil {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
if v4, err := addr.ValueForProtocol(multiaddr.P_IP4); err == nil {
|
|
||||||
return v4, isPublicIP(v4)
|
|
||||||
}
|
|
||||||
if v6, err := addr.ValueForProtocol(multiaddr.P_IP6); err == nil {
|
|
||||||
return v6, isPublicIP(v6)
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isPublicIP returns true if the IP is a public (non-private, non-loopback) address
|
|
||||||
func isPublicIP(ip string) bool {
|
|
||||||
addr, err := netip.ParseAddr(ip)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// Exclude loopback, unspecified, link-local, multicast, and private addresses
|
|
||||||
if addr.IsLoopback() || addr.IsUnspecified() || addr.IsLinkLocalUnicast() || addr.IsLinkLocalMulticast() || addr.IsPrivate() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// shortPeerID returns a shortened version of a peer ID for logging
|
|
||||||
func shortPeerID(id peer.ID) string {
|
|
||||||
s := id.String()
|
|
||||||
if len(s) <= 8 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return s[:8] + "..."
|
|
||||||
}
|
|
||||||
|
|||||||
@ -35,11 +35,11 @@ func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
|
|||||||
inactiveCount++
|
inactiveCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to determine leader (highest log index is likely the leader)
|
// Try to determine leader
|
||||||
if peer, ok := c.knownPeers[nodeID]; ok {
|
if peer, ok := c.knownPeers[nodeID]; ok {
|
||||||
// We'd need to check the actual leader status from RQLite
|
// We'd need to check the actual leader status from RQLite
|
||||||
// For now, use highest log index as heuristic
|
// For now, bootstrap nodes are more likely to be leader
|
||||||
if currentLeader == "" || peer.RaftLogIndex > c.knownPeers[currentLeader].RaftLogIndex {
|
if peer.NodeType == "bootstrap" && currentLeader == "" {
|
||||||
currentLeader = nodeID
|
currentLeader = nodeID
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -71,3 +71,4 @@ func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
|
|||||||
AveragePeerHealth: averageHealth,
|
AveragePeerHealth: averageHealth,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -18,7 +18,6 @@ import (
|
|||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/DeBrosOfficial/network/pkg/config"
|
"github.com/DeBrosOfficial/network/pkg/config"
|
||||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RQLiteManager manages an RQLite node instance
|
// RQLiteManager manages an RQLite node instance
|
||||||
@ -26,7 +25,6 @@ type RQLiteManager struct {
|
|||||||
config *config.DatabaseConfig
|
config *config.DatabaseConfig
|
||||||
discoverConfig *config.DiscoveryConfig
|
discoverConfig *config.DiscoveryConfig
|
||||||
dataDir string
|
dataDir string
|
||||||
nodeType string // Node type identifier
|
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
cmd *exec.Cmd
|
cmd *exec.Cmd
|
||||||
connection *gorqlite.Connection
|
connection *gorqlite.Connection
|
||||||
@ -35,6 +33,11 @@ type RQLiteManager struct {
|
|||||||
|
|
||||||
// waitForSQLAvailable waits until a simple query succeeds, indicating a leader is known and queries can be served.
|
// waitForSQLAvailable waits until a simple query succeeds, indicating a leader is known and queries can be served.
|
||||||
func (r *RQLiteManager) waitForSQLAvailable(ctx context.Context) error {
|
func (r *RQLiteManager) waitForSQLAvailable(ctx context.Context) error {
|
||||||
|
if r.connection == nil {
|
||||||
|
r.logger.Error("No rqlite connection")
|
||||||
|
return errors.New("no rqlite connection")
|
||||||
|
}
|
||||||
|
|
||||||
ticker := time.NewTicker(1 * time.Second)
|
ticker := time.NewTicker(1 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
@ -44,16 +47,6 @@ func (r *RQLiteManager) waitForSQLAvailable(ctx context.Context) error {
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
// Check for nil connection inside the loop to handle cases where
|
|
||||||
// connection becomes nil during restart/recovery operations
|
|
||||||
if r.connection == nil {
|
|
||||||
attempts++
|
|
||||||
if attempts%5 == 0 { // log every ~5s to reduce noise
|
|
||||||
r.logger.Debug("Waiting for RQLite connection to be established")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
attempts++
|
attempts++
|
||||||
_, err := r.connection.QueryOne("SELECT 1")
|
_, err := r.connection.QueryOne("SELECT 1")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -82,31 +75,6 @@ func (r *RQLiteManager) SetDiscoveryService(service *ClusterDiscoveryService) {
|
|||||||
r.discoveryService = service
|
r.discoveryService = service
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNodeType sets the node type for this RQLite manager
|
|
||||||
func (r *RQLiteManager) SetNodeType(nodeType string) {
|
|
||||||
if nodeType != "" {
|
|
||||||
r.nodeType = nodeType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateAdvertisedAddresses overrides the discovery advertised addresses when cluster discovery
|
|
||||||
// infers a better host than what was provided via configuration (e.g. replacing localhost).
|
|
||||||
func (r *RQLiteManager) UpdateAdvertisedAddresses(raftAddr, httpAddr string) {
|
|
||||||
if r == nil || r.discoverConfig == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if raftAddr != "" && r.discoverConfig.RaftAdvAddress != raftAddr {
|
|
||||||
r.logger.Info("Updating Raft advertised address", zap.String("addr", raftAddr))
|
|
||||||
r.discoverConfig.RaftAdvAddress = raftAddr
|
|
||||||
}
|
|
||||||
|
|
||||||
if httpAddr != "" && r.discoverConfig.HttpAdvAddress != httpAddr {
|
|
||||||
r.logger.Info("Updating HTTP advertised address", zap.String("addr", httpAddr))
|
|
||||||
r.discoverConfig.HttpAdvAddress = httpAddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the RQLite node
|
// Start starts the RQLite node
|
||||||
func (r *RQLiteManager) Start(ctx context.Context) error {
|
func (r *RQLiteManager) Start(ctx context.Context) error {
|
||||||
rqliteDataDir, err := r.prepareDataDir()
|
rqliteDataDir, err := r.prepareDataDir()
|
||||||
@ -118,25 +86,6 @@ func (r *RQLiteManager) Start(ctx context.Context) error {
|
|||||||
return fmt.Errorf("discovery config HttpAdvAddress is empty")
|
return fmt.Errorf("discovery config HttpAdvAddress is empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
// CRITICAL FIX: Ensure peers.json exists with minimum cluster size BEFORE starting RQLite
|
|
||||||
// This prevents split-brain where each node starts as a single-node cluster
|
|
||||||
// We NEVER start as a single-node cluster - we wait indefinitely until minimum cluster size is met
|
|
||||||
// This applies to ALL nodes (with or without join addresses)
|
|
||||||
if r.discoveryService != nil {
|
|
||||||
r.logger.Info("Ensuring peers.json exists with minimum cluster size before RQLite startup",
|
|
||||||
zap.String("policy", "will wait indefinitely - never start as single-node cluster"),
|
|
||||||
zap.Bool("has_join_address", r.config.RQLiteJoinAddress != ""))
|
|
||||||
|
|
||||||
// Wait for peer discovery to find minimum cluster size - NO TIMEOUT
|
|
||||||
// This ensures we never start as a single-node cluster, regardless of join address
|
|
||||||
if err := r.waitForMinClusterSizeBeforeStart(ctx, rqliteDataDir); err != nil {
|
|
||||||
r.logger.Error("Failed to ensure minimum cluster size before start",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("action", "startup aborted - will not start as single-node cluster"))
|
|
||||||
return fmt.Errorf("cannot start RQLite: minimum cluster size not met: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CRITICAL: Check if we need to do pre-start cluster discovery to build peers.json
|
// CRITICAL: Check if we need to do pre-start cluster discovery to build peers.json
|
||||||
// This handles the case where nodes have old cluster state and need coordinated recovery
|
// This handles the case where nodes have old cluster state and need coordinated recovery
|
||||||
if needsClusterRecovery, err := r.checkNeedsClusterRecovery(rqliteDataDir); err != nil {
|
if needsClusterRecovery, err := r.checkNeedsClusterRecovery(rqliteDataDir); err != nil {
|
||||||
@ -158,22 +107,13 @@ func (r *RQLiteManager) Start(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start periodic health monitoring for automatic recovery
|
|
||||||
if r.discoveryService != nil {
|
|
||||||
go r.startHealthMonitoring(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Establish leadership/SQL availability
|
// Establish leadership/SQL availability
|
||||||
if err := r.establishLeadershipOrJoin(ctx, rqliteDataDir); err != nil {
|
if err := r.establishLeadershipOrJoin(ctx, rqliteDataDir); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply migrations - resolve path for production vs development
|
// Apply migrations
|
||||||
migrationsDir, err := r.resolveMigrationsDir()
|
migrationsDir := "migrations"
|
||||||
if err != nil {
|
|
||||||
r.logger.Error("Failed to resolve migrations directory", zap.Error(err))
|
|
||||||
return fmt.Errorf("resolve migrations directory: %w", err)
|
|
||||||
}
|
|
||||||
if err := r.ApplyMigrations(ctx, migrationsDir); err != nil {
|
if err := r.ApplyMigrations(ctx, migrationsDir); err != nil {
|
||||||
r.logger.Error("Migrations failed", zap.Error(err), zap.String("dir", migrationsDir))
|
r.logger.Error("Migrations failed", zap.Error(err), zap.String("dir", migrationsDir))
|
||||||
return fmt.Errorf("apply migrations: %w", err)
|
return fmt.Errorf("apply migrations: %w", err)
|
||||||
@ -199,23 +139,6 @@ func (r *RQLiteManager) rqliteDataDirPath() (string, error) {
|
|||||||
return filepath.Join(dataDir, "rqlite"), nil
|
return filepath.Join(dataDir, "rqlite"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveMigrationsDir resolves the migrations directory path for production vs development
|
|
||||||
// In production, migrations are at /home/debros/src/migrations
|
|
||||||
// In development, migrations are relative to the project root (migrations/)
|
|
||||||
func (r *RQLiteManager) resolveMigrationsDir() (string, error) {
|
|
||||||
// Check for production path first: /home/debros/src/migrations
|
|
||||||
productionPath := "/home/debros/src/migrations"
|
|
||||||
if _, err := os.Stat(productionPath); err == nil {
|
|
||||||
r.logger.Info("Using production migrations directory", zap.String("path", productionPath))
|
|
||||||
return productionPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fall back to relative path for development
|
|
||||||
devPath := "migrations"
|
|
||||||
r.logger.Info("Using development migrations directory", zap.String("path", devPath))
|
|
||||||
return devPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepareDataDir expands and creates the RQLite data directory
|
// prepareDataDir expands and creates the RQLite data directory
|
||||||
func (r *RQLiteManager) prepareDataDir() (string, error) {
|
func (r *RQLiteManager) prepareDataDir() (string, error) {
|
||||||
rqliteDataDir, err := r.rqliteDataDirPath()
|
rqliteDataDir, err := r.rqliteDataDirPath()
|
||||||
@ -241,28 +164,7 @@ func (r *RQLiteManager) launchProcess(ctx context.Context, rqliteDataDir string)
|
|||||||
"-raft-addr", fmt.Sprintf("0.0.0.0:%d", r.config.RQLiteRaftPort),
|
"-raft-addr", fmt.Sprintf("0.0.0.0:%d", r.config.RQLiteRaftPort),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add node-to-node TLS encryption if configured
|
// Add join address if specified (for non-bootstrap or secondary bootstrap nodes)
|
||||||
// This enables TLS for Raft inter-node communication, required for SNI gateway routing
|
|
||||||
// See: https://rqlite.io/docs/guides/security/#encrypting-node-to-node-communication
|
|
||||||
if r.config.NodeCert != "" && r.config.NodeKey != "" {
|
|
||||||
r.logger.Info("Enabling node-to-node TLS encryption",
|
|
||||||
zap.String("node_cert", r.config.NodeCert),
|
|
||||||
zap.String("node_key", r.config.NodeKey),
|
|
||||||
zap.String("node_ca_cert", r.config.NodeCACert),
|
|
||||||
zap.Bool("node_no_verify", r.config.NodeNoVerify))
|
|
||||||
|
|
||||||
args = append(args, "-node-cert", r.config.NodeCert)
|
|
||||||
args = append(args, "-node-key", r.config.NodeKey)
|
|
||||||
|
|
||||||
if r.config.NodeCACert != "" {
|
|
||||||
args = append(args, "-node-ca-cert", r.config.NodeCACert)
|
|
||||||
}
|
|
||||||
if r.config.NodeNoVerify {
|
|
||||||
args = append(args, "-node-no-verify")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// All nodes follow the same join logic - either join specified address or start as single-node cluster
|
|
||||||
if r.config.RQLiteJoinAddress != "" {
|
if r.config.RQLiteJoinAddress != "" {
|
||||||
r.logger.Info("Joining RQLite cluster", zap.String("join_address", r.config.RQLiteJoinAddress))
|
r.logger.Info("Joining RQLite cluster", zap.String("join_address", r.config.RQLiteJoinAddress))
|
||||||
|
|
||||||
@ -274,24 +176,18 @@ func (r *RQLiteManager) launchProcess(ctx context.Context, rqliteDataDir string)
|
|||||||
joinArg = strings.TrimPrefix(joinArg, "https://")
|
joinArg = strings.TrimPrefix(joinArg, "https://")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for join target to become reachable to avoid forming a separate cluster
|
// Wait for join target to become reachable to avoid forming a separate cluster (wait indefinitely)
|
||||||
// Use 5 minute timeout to prevent infinite waits on bad configurations
|
if err := r.waitForJoinTarget(ctx, r.config.RQLiteJoinAddress, 0); err != nil {
|
||||||
joinTimeout := 5 * time.Minute
|
|
||||||
if err := r.waitForJoinTarget(ctx, r.config.RQLiteJoinAddress, joinTimeout); err != nil {
|
|
||||||
r.logger.Warn("Join target did not become reachable within timeout; will still attempt to join",
|
r.logger.Warn("Join target did not become reachable within timeout; will still attempt to join",
|
||||||
zap.String("join_address", r.config.RQLiteJoinAddress),
|
zap.String("join_address", r.config.RQLiteJoinAddress),
|
||||||
zap.Duration("timeout", joinTimeout),
|
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Always add the join parameter in host:port form - let rqlited handle the rest
|
// Always add the join parameter in host:port form - let rqlited handle the rest
|
||||||
// Add retry parameters to handle slow cluster startup (e.g., during recovery)
|
// Add retry parameters to handle slow cluster startup (e.g., during recovery)
|
||||||
// Include -join-as with the raft advertise address so the leader knows which node this is
|
args = append(args, "-join", joinArg, "-join-attempts", "30", "-join-interval", "10s")
|
||||||
args = append(args, "-join", joinArg, "-join-as", r.discoverConfig.RaftAdvAddress, "-join-attempts", "30", "-join-interval", "10s")
|
|
||||||
} else {
|
} else {
|
||||||
r.logger.Info("No join address specified - starting as single-node cluster")
|
r.logger.Info("No join address specified - starting as new cluster")
|
||||||
// When no join address is provided, rqlited will start as a single-node cluster
|
|
||||||
// This is expected for the first node in a fresh cluster
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add data directory as positional argument
|
// Add data directory as positional argument
|
||||||
@ -306,41 +202,14 @@ func (r *RQLiteManager) launchProcess(ctx context.Context, rqliteDataDir string)
|
|||||||
// Start RQLite process (not bound to ctx for graceful Stop handling)
|
// Start RQLite process (not bound to ctx for graceful Stop handling)
|
||||||
r.cmd = exec.Command("rqlited", args...)
|
r.cmd = exec.Command("rqlited", args...)
|
||||||
|
|
||||||
// Setup log file for RQLite output
|
// Enable debug logging of RQLite process to help diagnose issues
|
||||||
// Determine node type for log filename
|
r.cmd.Stdout = os.Stdout
|
||||||
nodeType := r.nodeType
|
r.cmd.Stderr = os.Stderr
|
||||||
if nodeType == "" {
|
|
||||||
nodeType = "node"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create logs directory
|
|
||||||
logsDir := filepath.Join(filepath.Dir(r.dataDir), "logs")
|
|
||||||
if err := os.MkdirAll(logsDir, 0755); err != nil {
|
|
||||||
return fmt.Errorf("failed to create logs directory at %s: %w", logsDir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open log file for RQLite output
|
|
||||||
logPath := filepath.Join(logsDir, fmt.Sprintf("rqlite-%s.log", nodeType))
|
|
||||||
logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open RQLite log file at %s: %w", logPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Info("RQLite logs will be written to file",
|
|
||||||
zap.String("path", logPath))
|
|
||||||
|
|
||||||
r.cmd.Stdout = logFile
|
|
||||||
r.cmd.Stderr = logFile
|
|
||||||
|
|
||||||
if err := r.cmd.Start(); err != nil {
|
if err := r.cmd.Start(); err != nil {
|
||||||
logFile.Close()
|
|
||||||
return fmt.Errorf("failed to start RQLite: %w", err)
|
return fmt.Errorf("failed to start RQLite: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the log file handle after process starts (the subprocess maintains its own reference)
|
|
||||||
// This allows the file to be rotated or inspected while the process is running
|
|
||||||
logFile.Close()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -375,8 +244,8 @@ func (r *RQLiteManager) waitForReadyAndConnect(ctx context.Context) error {
|
|||||||
// Check if error is "store is not open" (recovery scenario)
|
// Check if error is "store is not open" (recovery scenario)
|
||||||
if strings.Contains(err.Error(), "store is not open") {
|
if strings.Contains(err.Error(), "store is not open") {
|
||||||
if attempt < maxConnectAttempts-1 {
|
if attempt < maxConnectAttempts-1 {
|
||||||
// Retry with exponential backoff for all nodes during recovery
|
// Only retry for joining nodes; bootstrap nodes should fail fast
|
||||||
// The store may not open immediately, especially during cluster recovery
|
if r.config.RQLiteJoinAddress != "" {
|
||||||
if attempt%3 == 0 {
|
if attempt%3 == 0 {
|
||||||
r.logger.Debug("RQLite store not yet accessible for connection, retrying...",
|
r.logger.Debug("RQLite store not yet accessible for connection, retrying...",
|
||||||
zap.Int("attempt", attempt+1), zap.Error(err))
|
zap.Int("attempt", attempt+1), zap.Error(err))
|
||||||
@ -389,6 +258,7 @@ func (r *RQLiteManager) waitForReadyAndConnect(ctx context.Context) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// For any other error or final attempt, fail
|
// For any other error or final attempt, fail
|
||||||
if r.cmd != nil && r.cmd.Process != nil {
|
if r.cmd != nil && r.cmd.Process != nil {
|
||||||
@ -413,52 +283,93 @@ func (r *RQLiteManager) waitForReadyAndConnect(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// establishLeadershipOrJoin handles post-startup cluster establishment
|
// establishLeadershipOrJoin establishes leadership (bootstrap) or waits for SQL availability (joining)
|
||||||
// All nodes follow the same pattern: wait for SQL availability
|
|
||||||
// For nodes without a join address, RQLite automatically forms a single-node cluster and becomes leader
|
|
||||||
func (r *RQLiteManager) establishLeadershipOrJoin(ctx context.Context, rqliteDataDir string) error {
|
func (r *RQLiteManager) establishLeadershipOrJoin(ctx context.Context, rqliteDataDir string) error {
|
||||||
if r.config.RQLiteJoinAddress == "" {
|
if r.config.RQLiteJoinAddress == "" {
|
||||||
// First node - no join address specified
|
// Bootstrap node logic with data safety checks
|
||||||
// RQLite will automatically form a single-node cluster and become leader
|
r.logger.Info("Bootstrap node: checking if safe to lead")
|
||||||
r.logger.Info("Starting as first node in cluster")
|
|
||||||
|
|
||||||
// Wait for SQL to be available (indicates RQLite cluster is ready)
|
// SAFETY: Check if we can safely become leader
|
||||||
|
canLead, err := r.canSafelyBecomeLeader()
|
||||||
|
if !canLead && err != nil {
|
||||||
|
r.logger.Warn("Not safe to become leader, attempting to join existing cluster",
|
||||||
|
zap.Error(err))
|
||||||
|
|
||||||
|
// Find node with highest log index and join it
|
||||||
|
if r.discoveryService != nil {
|
||||||
|
targetNode := r.discoveryService.GetNodeWithHighestLogIndex()
|
||||||
|
if targetNode != nil {
|
||||||
|
r.logger.Info("Joining node with higher data",
|
||||||
|
zap.String("target_node", targetNode.NodeID),
|
||||||
|
zap.String("raft_address", targetNode.RaftAddress),
|
||||||
|
zap.Uint64("their_index", targetNode.RaftLogIndex))
|
||||||
|
return r.joinExistingCluster(ctx, targetNode.RaftAddress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Safe to lead - attempt leadership
|
||||||
|
leadershipErr := r.waitForLeadership(ctx)
|
||||||
|
if leadershipErr == nil {
|
||||||
|
r.logger.Info("Bootstrap node successfully established leadership")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.logger.Warn("Initial leadership attempt failed, may need cluster recovery",
|
||||||
|
zap.Error(leadershipErr))
|
||||||
|
|
||||||
|
// Try recovery if we have peers.json from discovery
|
||||||
|
if r.discoveryService != nil {
|
||||||
|
peersPath := filepath.Join(rqliteDataDir, "raft", "peers.json")
|
||||||
|
if _, err := os.Stat(peersPath); err == nil {
|
||||||
|
r.logger.Info("Attempting cluster recovery using peers.json",
|
||||||
|
zap.String("peers_file", peersPath))
|
||||||
|
|
||||||
|
if recoveryErr := r.recoverCluster(peersPath); recoveryErr == nil {
|
||||||
|
r.logger.Info("Cluster recovery successful, retrying leadership")
|
||||||
|
leadershipErr = r.waitForLeadership(ctx)
|
||||||
|
if leadershipErr == nil {
|
||||||
|
r.logger.Info("Bootstrap node established leadership after recovery")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
r.logger.Warn("Cluster recovery failed", zap.Error(recoveryErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final fallback: SQL availability
|
||||||
|
r.logger.Warn("Leadership failed, trying SQL availability")
|
||||||
sqlCtx := ctx
|
sqlCtx := ctx
|
||||||
if _, hasDeadline := ctx.Deadline(); !hasDeadline {
|
if _, hasDeadline := ctx.Deadline(); !hasDeadline {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
sqlCtx, cancel = context.WithTimeout(context.Background(), 2*time.Minute)
|
sqlCtx, cancel = context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.waitForSQLAvailable(sqlCtx); err != nil {
|
|
||||||
if r.cmd != nil && r.cmd.Process != nil {
|
|
||||||
_ = r.cmd.Process.Kill()
|
|
||||||
}
|
|
||||||
return fmt.Errorf("SQL not available for first node: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Info("First node established successfully")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Joining node - wait for SQL availability (indicates it joined the leader)
|
|
||||||
r.logger.Info("Waiting for RQLite SQL availability (joining cluster)")
|
|
||||||
sqlCtx := ctx
|
|
||||||
if _, hasDeadline := ctx.Deadline(); !hasDeadline {
|
|
||||||
var cancel context.CancelFunc
|
|
||||||
sqlCtx, cancel = context.WithTimeout(context.Background(), 5*time.Minute)
|
|
||||||
defer cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := r.waitForSQLAvailable(sqlCtx); err != nil {
|
if err := r.waitForSQLAvailable(sqlCtx); err != nil {
|
||||||
if r.cmd != nil && r.cmd.Process != nil {
|
if r.cmd != nil && r.cmd.Process != nil {
|
||||||
_ = r.cmd.Process.Kill()
|
_ = r.cmd.Process.Kill()
|
||||||
}
|
}
|
||||||
return fmt.Errorf("RQLite SQL not available: %w", err)
|
return fmt.Errorf("RQLite SQL not available: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.logger.Info("Node successfully joined cluster")
|
|
||||||
return nil
|
return nil
|
||||||
|
} else {
|
||||||
|
// Joining node logic
|
||||||
|
r.logger.Info("Waiting for RQLite SQL availability (leader discovery)")
|
||||||
|
sqlCtx := ctx
|
||||||
|
if _, hasDeadline := ctx.Deadline(); !hasDeadline {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
sqlCtx, cancel = context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
}
|
||||||
|
if err := r.waitForSQLAvailable(sqlCtx); err != nil {
|
||||||
|
if r.cmd != nil && r.cmd.Process != nil {
|
||||||
|
_ = r.cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
return fmt.Errorf("RQLite SQL not available: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasExistingState returns true if the rqlite data directory already contains files or subdirectories.
|
// hasExistingState returns true if the rqlite data directory already contains files or subdirectories.
|
||||||
@ -483,11 +394,18 @@ func (r *RQLiteManager) hasExistingState(rqliteDataDir string) bool {
|
|||||||
// For joining nodes in recovery, this may take longer (up to 3 minutes)
|
// For joining nodes in recovery, this may take longer (up to 3 minutes)
|
||||||
func (r *RQLiteManager) waitForReady(ctx context.Context) error {
|
func (r *RQLiteManager) waitForReady(ctx context.Context) error {
|
||||||
url := fmt.Sprintf("http://localhost:%d/status", r.config.RQLitePort)
|
url := fmt.Sprintf("http://localhost:%d/status", r.config.RQLitePort)
|
||||||
client := tlsutil.NewHTTPClient(2 * time.Second)
|
client := &http.Client{Timeout: 2 * time.Second}
|
||||||
|
|
||||||
// All nodes may need time to open the store during recovery
|
// Determine timeout based on whether this is a joining node
|
||||||
// Use consistent timeout for cluster consistency
|
// Joining nodes in recovery may take longer to open the store
|
||||||
maxAttempts := 180 // 180 seconds (3 minutes) for all nodes
|
var maxAttempts int
|
||||||
|
if r.config.RQLiteJoinAddress != "" {
|
||||||
|
// Joining node: allow up to 180 seconds (3 minutes) for recovery
|
||||||
|
maxAttempts = 180
|
||||||
|
} else {
|
||||||
|
// Bootstrap node: allow 30 seconds
|
||||||
|
maxAttempts = 30
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < maxAttempts; i++ {
|
for i := 0; i < maxAttempts; i++ {
|
||||||
select {
|
select {
|
||||||
@ -496,11 +414,6 @@ func (r *RQLiteManager) waitForReady(ctx context.Context) error {
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use centralized TLS configuration
|
|
||||||
if client == nil {
|
|
||||||
client = tlsutil.NewHTTPClient(2 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := client.Get(url)
|
resp, err := client.Get(url)
|
||||||
if err == nil && resp.StatusCode == http.StatusOK {
|
if err == nil && resp.StatusCode == http.StatusOK {
|
||||||
// Parse the response to check for valid raft state
|
// Parse the response to check for valid raft state
|
||||||
@ -545,7 +458,47 @@ func (r *RQLiteManager) waitForReady(ctx context.Context) error {
|
|||||||
return fmt.Errorf("RQLite did not become ready within timeout")
|
return fmt.Errorf("RQLite did not become ready within timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetConnection returns the RQLite connection
|
// waitForLeadership waits for RQLite to establish leadership (for bootstrap nodes)
|
||||||
|
func (r *RQLiteManager) waitForLeadership(ctx context.Context) error {
|
||||||
|
r.logger.Info("Waiting for RQLite to establish leadership...")
|
||||||
|
|
||||||
|
maxAttempts := 30
|
||||||
|
attempt := 0
|
||||||
|
backoffDelay := 500 * time.Millisecond
|
||||||
|
maxBackoff := 5 * time.Second
|
||||||
|
|
||||||
|
for attempt < maxAttempts {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try a simple query to check if leadership is established
|
||||||
|
if r.connection != nil {
|
||||||
|
_, err := r.connection.QueryOne("SELECT 1")
|
||||||
|
if err == nil {
|
||||||
|
r.logger.Info("RQLite leadership established")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Log every 5th attempt or on first attempt to reduce noise
|
||||||
|
if attempt%5 == 0 || attempt == 0 {
|
||||||
|
r.logger.Debug("Waiting for leadership", zap.Int("attempt", attempt+1), zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exponential backoff with jitter
|
||||||
|
time.Sleep(backoffDelay)
|
||||||
|
backoffDelay = time.Duration(float64(backoffDelay) * 1.5)
|
||||||
|
if backoffDelay > maxBackoff {
|
||||||
|
backoffDelay = maxBackoff
|
||||||
|
}
|
||||||
|
attempt++
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("RQLite failed to establish leadership within timeout")
|
||||||
|
}
|
||||||
|
|
||||||
// GetConnection returns the RQLite connection
|
// GetConnection returns the RQLite connection
|
||||||
func (r *RQLiteManager) GetConnection() *gorqlite.Connection {
|
func (r *RQLiteManager) GetConnection() *gorqlite.Connection {
|
||||||
return r.connection
|
return r.connection
|
||||||
@ -619,96 +572,12 @@ func (r *RQLiteManager) waitForJoinTarget(ctx context.Context, joinAddress strin
|
|||||||
return lastErr
|
return lastErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForMinClusterSizeBeforeStart waits for minimum cluster size to be discovered
|
|
||||||
// and ensures peers.json exists before RQLite starts
|
|
||||||
// CRITICAL: This function waits INDEFINITELY - it will NEVER timeout
|
|
||||||
// We never start as a single-node cluster, regardless of how long we wait
|
|
||||||
func (r *RQLiteManager) waitForMinClusterSizeBeforeStart(ctx context.Context, rqliteDataDir string) error {
|
|
||||||
if r.discoveryService == nil {
|
|
||||||
return fmt.Errorf("discovery service not available")
|
|
||||||
}
|
|
||||||
|
|
||||||
requiredRemotePeers := r.config.MinClusterSize - 1
|
|
||||||
r.logger.Info("Waiting for minimum cluster size before RQLite startup",
|
|
||||||
zap.Int("min_cluster_size", r.config.MinClusterSize),
|
|
||||||
zap.Int("required_remote_peers", requiredRemotePeers),
|
|
||||||
zap.String("policy", "waiting indefinitely - will never start as single-node cluster"))
|
|
||||||
|
|
||||||
// Trigger peer exchange to collect metadata
|
|
||||||
if err := r.discoveryService.TriggerPeerExchange(ctx); err != nil {
|
|
||||||
r.logger.Warn("Peer exchange failed", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NO TIMEOUT - wait indefinitely until minimum cluster size is met
|
|
||||||
// Only exit on context cancellation or when minimum cluster size is achieved
|
|
||||||
checkInterval := 2 * time.Second
|
|
||||||
lastLogTime := time.Now()
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Check context cancellation first
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return fmt.Errorf("context cancelled while waiting for minimum cluster size: %w", ctx.Err())
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trigger sync to update knownPeers
|
|
||||||
r.discoveryService.TriggerSync()
|
|
||||||
time.Sleep(checkInterval)
|
|
||||||
|
|
||||||
// Check if we have enough remote peers
|
|
||||||
allPeers := r.discoveryService.GetAllPeers()
|
|
||||||
remotePeerCount := 0
|
|
||||||
for _, peer := range allPeers {
|
|
||||||
if peer.NodeID != r.discoverConfig.RaftAdvAddress {
|
|
||||||
remotePeerCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if remotePeerCount >= requiredRemotePeers {
|
|
||||||
// Found enough peers - verify peers.json exists and contains them
|
|
||||||
peersPath := filepath.Join(rqliteDataDir, "raft", "peers.json")
|
|
||||||
|
|
||||||
// Trigger one more sync to ensure peers.json is written
|
|
||||||
r.discoveryService.TriggerSync()
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Verify peers.json exists and contains enough peers
|
|
||||||
if info, err := os.Stat(peersPath); err == nil && info.Size() > 10 {
|
|
||||||
// Read and verify it contains enough peers
|
|
||||||
data, err := os.ReadFile(peersPath)
|
|
||||||
if err == nil {
|
|
||||||
var peers []map[string]interface{}
|
|
||||||
if err := json.Unmarshal(data, &peers); err == nil && len(peers) >= requiredRemotePeers {
|
|
||||||
r.logger.Info("peers.json exists with minimum cluster size, safe to start RQLite",
|
|
||||||
zap.String("peers_file", peersPath),
|
|
||||||
zap.Int("remote_peers_discovered", remotePeerCount),
|
|
||||||
zap.Int("peers_in_json", len(peers)),
|
|
||||||
zap.Int("min_cluster_size", r.config.MinClusterSize))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log progress every 10 seconds
|
|
||||||
if time.Since(lastLogTime) >= 10*time.Second {
|
|
||||||
r.logger.Info("Waiting for minimum cluster size (indefinitely)...",
|
|
||||||
zap.Int("discovered_peers", len(allPeers)),
|
|
||||||
zap.Int("remote_peers", remotePeerCount),
|
|
||||||
zap.Int("required_remote_peers", requiredRemotePeers),
|
|
||||||
zap.String("status", "will continue waiting until minimum cluster size is met"))
|
|
||||||
lastLogTime = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// testJoinAddress tests if a join address is reachable
|
// testJoinAddress tests if a join address is reachable
|
||||||
func (r *RQLiteManager) testJoinAddress(joinAddress string) error {
|
func (r *RQLiteManager) testJoinAddress(joinAddress string) error {
|
||||||
// Determine the HTTP status URL to probe.
|
// Determine the HTTP status URL to probe.
|
||||||
// If joinAddress contains a scheme, use it directly. Otherwise treat joinAddress
|
// If joinAddress contains a scheme, use it directly. Otherwise treat joinAddress
|
||||||
// as host:port (Raft) and probe the standard HTTP API port 5001 on that host.
|
// as host:port (Raft) and probe the standard HTTP API port 5001 on that host.
|
||||||
client := tlsutil.NewHTTPClient(5 * time.Second)
|
client := &http.Client{Timeout: 5 * time.Second}
|
||||||
|
|
||||||
var statusURL string
|
var statusURL string
|
||||||
if strings.HasPrefix(joinAddress, "http://") || strings.HasPrefix(joinAddress, "https://") {
|
if strings.HasPrefix(joinAddress, "http://") || strings.HasPrefix(joinAddress, "https://") {
|
||||||
@ -736,6 +605,70 @@ func (r *RQLiteManager) testJoinAddress(joinAddress string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// canSafelyBecomeLeader checks if this node can safely become leader without causing data loss
|
||||||
|
func (r *RQLiteManager) canSafelyBecomeLeader() (bool, error) {
|
||||||
|
// Get our current Raft log index
|
||||||
|
ourLogIndex := r.getRaftLogIndex()
|
||||||
|
|
||||||
|
// If no discovery service, assume it's safe (backward compatibility)
|
||||||
|
if r.discoveryService == nil {
|
||||||
|
r.logger.Debug("No discovery service, assuming safe to lead")
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query discovery service for other nodes
|
||||||
|
otherNodes := r.discoveryService.GetActivePeers()
|
||||||
|
|
||||||
|
if len(otherNodes) == 0 {
|
||||||
|
// No other nodes - safe to bootstrap
|
||||||
|
r.logger.Debug("No other nodes discovered, safe to lead",
|
||||||
|
zap.Uint64("our_log_index", ourLogIndex))
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if any other node has higher log index
|
||||||
|
for _, peer := range otherNodes {
|
||||||
|
if peer.RaftLogIndex > ourLogIndex {
|
||||||
|
// Other node has more data - we should join them
|
||||||
|
return false, fmt.Errorf(
|
||||||
|
"node %s has higher log index (%d > %d), should join as follower",
|
||||||
|
peer.NodeID, peer.RaftLogIndex, ourLogIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have most recent data or equal - safe to lead
|
||||||
|
r.logger.Info("Safe to lead - we have most recent data",
|
||||||
|
zap.Uint64("our_log_index", ourLogIndex),
|
||||||
|
zap.Int("other_nodes_checked", len(otherNodes)))
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// joinExistingCluster attempts to join an existing cluster as a follower
|
||||||
|
func (r *RQLiteManager) joinExistingCluster(ctx context.Context, raftAddress string) error {
|
||||||
|
r.logger.Info("Attempting to join existing cluster",
|
||||||
|
zap.String("target_raft_address", raftAddress))
|
||||||
|
|
||||||
|
// Wait for the target to be reachable
|
||||||
|
if err := r.waitForJoinTarget(ctx, raftAddress, 2*time.Minute); err != nil {
|
||||||
|
return fmt.Errorf("join target not reachable: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for SQL availability (the target should have a leader)
|
||||||
|
sqlCtx := ctx
|
||||||
|
if _, hasDeadline := ctx.Deadline(); !hasDeadline {
|
||||||
|
var cancel context.CancelFunc
|
||||||
|
sqlCtx, cancel = context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.waitForSQLAvailable(sqlCtx); err != nil {
|
||||||
|
return fmt.Errorf("failed to join cluster - SQL not available: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.logger.Info("Successfully joined existing cluster")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// exponentialBackoff calculates exponential backoff duration with jitter
|
// exponentialBackoff calculates exponential backoff duration with jitter
|
||||||
func (r *RQLiteManager) exponentialBackoff(attempt int, baseDelay time.Duration, maxDelay time.Duration) time.Duration {
|
func (r *RQLiteManager) exponentialBackoff(attempt int, baseDelay time.Duration, maxDelay time.Duration) time.Duration {
|
||||||
// Calculate exponential backoff: baseDelay * 2^attempt
|
// Calculate exponential backoff: baseDelay * 2^attempt
|
||||||
@ -750,9 +683,7 @@ func (r *RQLiteManager) exponentialBackoff(attempt int, baseDelay time.Duration,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// recoverCluster restarts RQLite using the recovery.db created from peers.json
|
// recoverCluster restarts RQLite using the recovery.db created from peers.json
|
||||||
// It reuses launchProcess and waitForReadyAndConnect to ensure all join/backoff logic
|
func (r *RQLiteManager) recoverCluster(peersJSONPath string) error {
|
||||||
// and proper readiness checks are applied during recovery.
|
|
||||||
func (r *RQLiteManager) recoverCluster(ctx context.Context, peersJSONPath string) error {
|
|
||||||
r.logger.Info("Initiating cluster recovery by restarting RQLite",
|
r.logger.Info("Initiating cluster recovery by restarting RQLite",
|
||||||
zap.String("peers_file", peersJSONPath))
|
zap.String("peers_file", peersJSONPath))
|
||||||
|
|
||||||
@ -765,28 +696,40 @@ func (r *RQLiteManager) recoverCluster(ctx context.Context, peersJSONPath string
|
|||||||
// Wait for process to fully stop
|
// Wait for process to fully stop
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
// Get the data directory path
|
// Restart RQLite - it will automatically detect peers.json and perform recovery
|
||||||
|
r.logger.Info("Restarting RQLite (will auto-recover using peers.json)")
|
||||||
|
|
||||||
|
// Rebuild the launch arguments using the centralized path helper
|
||||||
rqliteDataDir, err := r.rqliteDataDirPath()
|
rqliteDataDir, err := r.rqliteDataDirPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to resolve RQLite data directory: %w", err)
|
return fmt.Errorf("failed to resolve RQLite data directory: %w", err)
|
||||||
}
|
}
|
||||||
|
args := []string{
|
||||||
// Restart RQLite using launchProcess to ensure all join/backoff logic is applied
|
"-http-addr", fmt.Sprintf("0.0.0.0:%d", r.config.RQLitePort),
|
||||||
// This includes: join address handling, join retries, expect configuration, etc.
|
"-http-adv-addr", r.discoverConfig.HttpAdvAddress,
|
||||||
r.logger.Info("Restarting RQLite (will auto-recover using peers.json)")
|
"-raft-adv-addr", r.discoverConfig.RaftAdvAddress,
|
||||||
if err := r.launchProcess(ctx, rqliteDataDir); err != nil {
|
"-raft-addr", fmt.Sprintf("0.0.0.0:%d", r.config.RQLiteRaftPort),
|
||||||
return fmt.Errorf("failed to restart RQLite process: %w", err)
|
rqliteDataDir,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for RQLite to be ready and establish connection using proper readiness checks
|
// Restart RQLite
|
||||||
// This includes retries for "store is not open" errors during recovery
|
r.cmd = exec.Command("rqlited", args...)
|
||||||
if err := r.waitForReadyAndConnect(ctx); err != nil {
|
r.cmd.Stdout = os.Stdout
|
||||||
// Clean up the process if connection failed
|
r.cmd.Stderr = os.Stderr
|
||||||
if r.cmd != nil && r.cmd.Process != nil {
|
|
||||||
_ = r.cmd.Process.Kill()
|
if err := r.cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to restart RQLite: %w", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("failed to wait for RQLite readiness after recovery: %w", err)
|
|
||||||
|
r.logger.Info("RQLite restarted, waiting for it to become ready")
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
|
// Recreate connection
|
||||||
|
conn, err := gorqlite.Open(fmt.Sprintf("http://localhost:%d", r.config.RQLitePort))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to reconnect to RQLite: %w", err)
|
||||||
}
|
}
|
||||||
|
r.connection = conn
|
||||||
|
|
||||||
r.logger.Info("Cluster recovery completed, RQLite restarted with new configuration")
|
r.logger.Info("Cluster recovery completed, RQLite restarted with new configuration")
|
||||||
return nil
|
return nil
|
||||||
@ -891,270 +834,6 @@ func (r *RQLiteManager) clearRaftState(rqliteDataDir string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isInSplitBrainState detects if we're in a split-brain scenario where all nodes
|
|
||||||
// are followers with no peers (each node thinks it's alone)
|
|
||||||
func (r *RQLiteManager) isInSplitBrainState() bool {
|
|
||||||
status, err := r.getRQLiteStatus()
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
raft := status.Store.Raft
|
|
||||||
|
|
||||||
// Split-brain indicators:
|
|
||||||
// - State is Follower (not Leader)
|
|
||||||
// - Term is 0 (no leader election has occurred)
|
|
||||||
// - num_peers is 0 (node thinks it's alone)
|
|
||||||
// - voter is false (node not configured as voter)
|
|
||||||
isSplitBrain := raft.State == "Follower" &&
|
|
||||||
raft.Term == 0 &&
|
|
||||||
raft.NumPeers == 0 &&
|
|
||||||
!raft.Voter &&
|
|
||||||
raft.LeaderAddr == ""
|
|
||||||
|
|
||||||
if !isSplitBrain {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify all discovered peers are also in split-brain state
|
|
||||||
if r.discoveryService == nil {
|
|
||||||
r.logger.Debug("No discovery service to verify split-brain across peers")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
peers := r.discoveryService.GetActivePeers()
|
|
||||||
if len(peers) == 0 {
|
|
||||||
// No peers discovered yet - might be network issue, not split-brain
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if all reachable peers are also in split-brain
|
|
||||||
splitBrainCount := 0
|
|
||||||
reachableCount := 0
|
|
||||||
for _, peer := range peers {
|
|
||||||
if !r.isPeerReachable(peer.HTTPAddress) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
reachableCount++
|
|
||||||
|
|
||||||
peerStatus, err := r.getPeerRQLiteStatus(peer.HTTPAddress)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
peerRaft := peerStatus.Store.Raft
|
|
||||||
if peerRaft.State == "Follower" &&
|
|
||||||
peerRaft.Term == 0 &&
|
|
||||||
peerRaft.NumPeers == 0 &&
|
|
||||||
!peerRaft.Voter {
|
|
||||||
splitBrainCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If all reachable peers are in split-brain, we have cluster-wide split-brain
|
|
||||||
if reachableCount > 0 && splitBrainCount == reachableCount {
|
|
||||||
r.logger.Warn("Detected cluster-wide split-brain state",
|
|
||||||
zap.Int("reachable_peers", reachableCount),
|
|
||||||
zap.Int("split_brain_peers", splitBrainCount))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isPeerReachable checks if a peer is at least responding to HTTP requests
|
|
||||||
func (r *RQLiteManager) isPeerReachable(httpAddr string) bool {
|
|
||||||
url := fmt.Sprintf("http://%s/status", httpAddr)
|
|
||||||
client := &http.Client{Timeout: 3 * time.Second}
|
|
||||||
|
|
||||||
resp, err := client.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
return resp.StatusCode == http.StatusOK
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPeerRQLiteStatus queries a peer's status endpoint
|
|
||||||
func (r *RQLiteManager) getPeerRQLiteStatus(httpAddr string) (*RQLiteStatus, error) {
|
|
||||||
url := fmt.Sprintf("http://%s/status", httpAddr)
|
|
||||||
client := &http.Client{Timeout: 3 * time.Second}
|
|
||||||
|
|
||||||
resp, err := client.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("peer returned status %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
var status RQLiteStatus
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &status, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// startHealthMonitoring runs periodic health checks and automatically recovers from split-brain
|
|
||||||
func (r *RQLiteManager) startHealthMonitoring(ctx context.Context) {
|
|
||||||
// Wait a bit after startup before starting health checks
|
|
||||||
time.Sleep(30 * time.Second)
|
|
||||||
|
|
||||||
ticker := time.NewTicker(60 * time.Second) // Check every minute
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
// Check for split-brain state
|
|
||||||
if r.isInSplitBrainState() {
|
|
||||||
r.logger.Warn("Split-brain detected during health check, initiating automatic recovery")
|
|
||||||
|
|
||||||
// Attempt automatic recovery
|
|
||||||
if err := r.recoverFromSplitBrain(ctx); err != nil {
|
|
||||||
r.logger.Error("Automatic split-brain recovery failed",
|
|
||||||
zap.Error(err),
|
|
||||||
zap.String("action", "will retry on next health check"))
|
|
||||||
} else {
|
|
||||||
r.logger.Info("Successfully recovered from split-brain")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// recoverFromSplitBrain automatically recovers from split-brain state
|
|
||||||
func (r *RQLiteManager) recoverFromSplitBrain(ctx context.Context) error {
|
|
||||||
if r.discoveryService == nil {
|
|
||||||
return fmt.Errorf("discovery service not available for recovery")
|
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Info("Starting automatic split-brain recovery")
|
|
||||||
|
|
||||||
// Step 1: Ensure we have latest peer information
|
|
||||||
r.discoveryService.TriggerPeerExchange(ctx)
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
r.discoveryService.TriggerSync()
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Step 2: Get data directory
|
|
||||||
rqliteDataDir, err := r.rqliteDataDirPath()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get data directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 3: Check if peers have more recent data
|
|
||||||
allPeers := r.discoveryService.GetAllPeers()
|
|
||||||
maxPeerIndex := uint64(0)
|
|
||||||
for _, peer := range allPeers {
|
|
||||||
if peer.NodeID == r.discoverConfig.RaftAdvAddress {
|
|
||||||
continue // Skip self
|
|
||||||
}
|
|
||||||
if peer.RaftLogIndex > maxPeerIndex {
|
|
||||||
maxPeerIndex = peer.RaftLogIndex
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 4: Clear our Raft state if peers have more recent data
|
|
||||||
ourIndex := r.getRaftLogIndex()
|
|
||||||
if maxPeerIndex > ourIndex || (maxPeerIndex == 0 && ourIndex == 0) {
|
|
||||||
r.logger.Info("Clearing Raft state to allow clean cluster join",
|
|
||||||
zap.Uint64("our_index", ourIndex),
|
|
||||||
zap.Uint64("peer_max_index", maxPeerIndex))
|
|
||||||
|
|
||||||
if err := r.clearRaftState(rqliteDataDir); err != nil {
|
|
||||||
return fmt.Errorf("failed to clear Raft state: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 5: Refresh peer metadata and force write peers.json
|
|
||||||
// We trigger peer exchange again to ensure we have the absolute latest metadata
|
|
||||||
// after clearing state, then force write peers.json regardless of changes
|
|
||||||
r.logger.Info("Refreshing peer metadata after clearing raft state")
|
|
||||||
r.discoveryService.TriggerPeerExchange(ctx)
|
|
||||||
time.Sleep(1 * time.Second) // Brief wait for peer exchange to complete
|
|
||||||
|
|
||||||
r.logger.Info("Force writing peers.json with all discovered peers")
|
|
||||||
// We use ForceWritePeersJSON instead of TriggerSync because TriggerSync
|
|
||||||
// only writes if membership changed, but after clearing state we need
|
|
||||||
// to write regardless of changes
|
|
||||||
if err := r.discoveryService.ForceWritePeersJSON(); err != nil {
|
|
||||||
return fmt.Errorf("failed to force write peers.json: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify peers.json was created
|
|
||||||
peersPath := filepath.Join(rqliteDataDir, "raft", "peers.json")
|
|
||||||
if _, err := os.Stat(peersPath); err != nil {
|
|
||||||
return fmt.Errorf("peers.json not created after force write: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Info("peers.json verified after force write",
|
|
||||||
zap.String("peers_path", peersPath))
|
|
||||||
|
|
||||||
// Step 6: Restart RQLite to pick up new peers.json
|
|
||||||
r.logger.Info("Restarting RQLite to apply new cluster configuration")
|
|
||||||
if err := r.recoverCluster(ctx, peersPath); err != nil {
|
|
||||||
return fmt.Errorf("failed to restart RQLite: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 7: Wait for cluster to form (waitForReadyAndConnect already handled readiness)
|
|
||||||
r.logger.Info("Waiting for cluster to stabilize after recovery...")
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
|
|
||||||
// Verify recovery succeeded
|
|
||||||
if r.isInSplitBrainState() {
|
|
||||||
return fmt.Errorf("still in split-brain after recovery attempt")
|
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Info("Split-brain recovery completed successfully")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("cannot recover: we have more recent data than peers")
|
|
||||||
}
|
|
||||||
|
|
||||||
// isSafeToClearState verifies we can safely clear Raft state
|
|
||||||
// Returns true only if peers have higher log indexes (they have more recent data)
|
|
||||||
// or if we have no meaningful state (index == 0)
|
|
||||||
func (r *RQLiteManager) isSafeToClearState(rqliteDataDir string) bool {
|
|
||||||
if r.discoveryService == nil {
|
|
||||||
r.logger.Debug("No discovery service available, cannot verify safety")
|
|
||||||
return false // No discovery service, can't verify
|
|
||||||
}
|
|
||||||
|
|
||||||
ourIndex := r.getRaftLogIndex()
|
|
||||||
peers := r.discoveryService.GetActivePeers()
|
|
||||||
|
|
||||||
if len(peers) == 0 {
|
|
||||||
r.logger.Debug("No peers discovered, might be network issue")
|
|
||||||
return false // No peers, might be network issue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find max peer log index
|
|
||||||
maxPeerIndex := uint64(0)
|
|
||||||
for _, peer := range peers {
|
|
||||||
if peer.RaftLogIndex > maxPeerIndex {
|
|
||||||
maxPeerIndex = peer.RaftLogIndex
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Safe to clear if peers have higher log indexes (they have more recent data)
|
|
||||||
// OR if we have no meaningful state (index == 0)
|
|
||||||
safe := maxPeerIndex > ourIndex || ourIndex == 0
|
|
||||||
|
|
||||||
r.logger.Debug("Checking if safe to clear Raft state",
|
|
||||||
zap.Uint64("our_log_index", ourIndex),
|
|
||||||
zap.Uint64("peer_max_log_index", maxPeerIndex),
|
|
||||||
zap.Bool("safe_to_clear", safe))
|
|
||||||
|
|
||||||
return safe
|
|
||||||
}
|
|
||||||
|
|
||||||
// performPreStartClusterDiscovery waits for peer discovery and builds a complete peers.json
|
// performPreStartClusterDiscovery waits for peer discovery and builds a complete peers.json
|
||||||
// before starting RQLite. This ensures all nodes use the same cluster membership for recovery.
|
// before starting RQLite. This ensures all nodes use the same cluster membership for recovery.
|
||||||
func (r *RQLiteManager) performPreStartClusterDiscovery(ctx context.Context, rqliteDataDir string) error {
|
func (r *RQLiteManager) performPreStartClusterDiscovery(ctx context.Context, rqliteDataDir string) error {
|
||||||
@ -1208,9 +887,9 @@ func (r *RQLiteManager) performPreStartClusterDiscovery(ctx context.Context, rql
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CRITICAL FIX: Skip recovery if no peers were discovered (other than ourselves)
|
// CRITICAL FIX: Skip recovery if no peers were discovered (other than ourselves)
|
||||||
// Only ourselves in the cluster means this is a fresh cluster, not a recovery scenario
|
// Only ourselves in the cluster means this is a fresh bootstrap, not a recovery scenario
|
||||||
if discoveredPeers <= 1 {
|
if discoveredPeers <= 1 {
|
||||||
r.logger.Info("No peers discovered during pre-start discovery window - skipping recovery (fresh cluster)",
|
r.logger.Info("No peers discovered during pre-start discovery window - skipping recovery (fresh bootstrap)",
|
||||||
zap.Int("discovered_peers", discoveredPeers))
|
zap.Int("discovered_peers", discoveredPeers))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1243,14 +922,6 @@ func (r *RQLiteManager) performPreStartClusterDiscovery(ctx context.Context, rql
|
|||||||
if err := r.clearRaftState(rqliteDataDir); err != nil {
|
if err := r.clearRaftState(rqliteDataDir); err != nil {
|
||||||
r.logger.Error("Failed to clear Raft state", zap.Error(err))
|
r.logger.Error("Failed to clear Raft state", zap.Error(err))
|
||||||
// Continue anyway - rqlite might still be able to recover
|
// Continue anyway - rqlite might still be able to recover
|
||||||
} else {
|
|
||||||
// Force write peers.json after clearing stale state
|
|
||||||
if r.discoveryService != nil {
|
|
||||||
r.logger.Info("Force writing peers.json after clearing stale Raft state")
|
|
||||||
if err := r.discoveryService.ForceWritePeersJSON(); err != nil {
|
|
||||||
r.logger.Error("Failed to force write peers.json after clearing stale state", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -13,9 +13,6 @@ type RQLiteStatus struct {
|
|||||||
State string `json:"state"`
|
State string `json:"state"`
|
||||||
LeaderID string `json:"leader_id"`
|
LeaderID string `json:"leader_id"`
|
||||||
LeaderAddr string `json:"leader_addr"`
|
LeaderAddr string `json:"leader_addr"`
|
||||||
Term uint64 `json:"term"`
|
|
||||||
NumPeers int `json:"num_peers"`
|
|
||||||
Voter bool `json:"voter"`
|
|
||||||
} `json:"raft"`
|
} `json:"raft"`
|
||||||
DBConf struct {
|
DBConf struct {
|
||||||
DSN string `json:"dsn"`
|
DSN string `json:"dsn"`
|
||||||
@ -71,3 +68,4 @@ type ClusterMetrics struct {
|
|||||||
CurrentLeader string
|
CurrentLeader string
|
||||||
AveragePeerHealth float64
|
AveragePeerHealth float64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,122 +0,0 @@
|
|||||||
// Package tlsutil provides centralized TLS configuration for trusting specific domains
|
|
||||||
package tlsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Global cache of trusted domains loaded from environment
|
|
||||||
trustedDomains []string
|
|
||||||
// CA certificate pool for trusting self-signed certs
|
|
||||||
caCertPool *x509.CertPool
|
|
||||||
initialized bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// Default trusted domains - always trust debros.network for staging/development
|
|
||||||
var defaultTrustedDomains = []string{
|
|
||||||
"*.debros.network",
|
|
||||||
}
|
|
||||||
|
|
||||||
// init loads trusted domains and CA certificate from environment and files
|
|
||||||
func init() {
|
|
||||||
// Start with default trusted domains
|
|
||||||
trustedDomains = append(trustedDomains, defaultTrustedDomains...)
|
|
||||||
|
|
||||||
// Add any additional domains from environment
|
|
||||||
domains := os.Getenv("DEBROS_TRUSTED_TLS_DOMAINS")
|
|
||||||
if domains != "" {
|
|
||||||
for _, d := range strings.Split(domains, ",") {
|
|
||||||
d = strings.TrimSpace(d)
|
|
||||||
if d != "" {
|
|
||||||
trustedDomains = append(trustedDomains, d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to load CA certificate
|
|
||||||
caCertPath := os.Getenv("DEBROS_CA_CERT_PATH")
|
|
||||||
if caCertPath == "" {
|
|
||||||
caCertPath = "/etc/debros/ca.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
if caCertData, err := os.ReadFile(caCertPath); err == nil {
|
|
||||||
caCertPool = x509.NewCertPool()
|
|
||||||
if caCertPool.AppendCertsFromPEM(caCertData) {
|
|
||||||
// Successfully loaded CA certificate
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
initialized = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTrustedDomains returns the list of domains to skip TLS verification for
|
|
||||||
func GetTrustedDomains() []string {
|
|
||||||
return trustedDomains
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldSkipTLSVerify checks if TLS verification should be skipped for this domain
|
|
||||||
func ShouldSkipTLSVerify(domain string) bool {
|
|
||||||
for _, trusted := range trustedDomains {
|
|
||||||
if strings.HasPrefix(trusted, "*.") {
|
|
||||||
// Handle wildcards like *.debros.network
|
|
||||||
suffix := strings.TrimPrefix(trusted, "*")
|
|
||||||
if strings.HasSuffix(domain, suffix) || domain == strings.TrimPrefix(suffix, ".") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
} else if domain == trusted {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTLSConfig returns a TLS config with appropriate verification settings
|
|
||||||
func GetTLSConfig() *tls.Config {
|
|
||||||
config := &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have a CA cert pool, use it
|
|
||||||
if caCertPool != nil {
|
|
||||||
config.RootCAs = caCertPool
|
|
||||||
} else if len(trustedDomains) > 0 {
|
|
||||||
// Fallback: skip verification if trusted domains are configured but no CA pool
|
|
||||||
config.InsecureSkipVerify = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPClient creates an HTTP client with TLS verification for trusted domains
|
|
||||||
func NewHTTPClient(timeout time.Duration) *http.Client {
|
|
||||||
return &http.Client{
|
|
||||||
Timeout: timeout,
|
|
||||||
Transport: &http.Transport{
|
|
||||||
TLSClientConfig: GetTLSConfig(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPClientForDomain creates an HTTP client configured for a specific domain
|
|
||||||
func NewHTTPClientForDomain(timeout time.Duration, hostname string) *http.Client {
|
|
||||||
tlsConfig := GetTLSConfig()
|
|
||||||
|
|
||||||
// If this domain is in trusted list and we don't have a CA pool, allow insecure
|
|
||||||
if caCertPool == nil && ShouldSkipTLSVerify(hostname) {
|
|
||||||
tlsConfig.InsecureSkipVerify = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return &http.Client{
|
|
||||||
Timeout: timeout,
|
|
||||||
Transport: &http.Transport{
|
|
||||||
TLSClientConfig: tlsConfig,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
echo "Force killing all debros development processes..."
|
echo "Force killing all processes on dev ports..."
|
||||||
|
|
||||||
# Define all dev ports (5 nodes topology: bootstrap, bootstrap2, node2, node3, node4)
|
# Define all dev ports (5 nodes topology: bootstrap, bootstrap2, node2, node3, node4)
|
||||||
PORTS=(
|
PORTS=(
|
||||||
@ -32,13 +32,16 @@ PORTS=(
|
|||||||
killed_count=0
|
killed_count=0
|
||||||
killed_pids=()
|
killed_pids=()
|
||||||
|
|
||||||
# Method 1: Kill all processes using these ports
|
# Kill all processes using these ports (LISTEN, ESTABLISHED, or any state)
|
||||||
for port in "${PORTS[@]}"; do
|
for port in "${PORTS[@]}"; do
|
||||||
|
# Get all PIDs using this port in ANY TCP state
|
||||||
pids=$(lsof -nP -iTCP:"$port" -t 2>/dev/null || true)
|
pids=$(lsof -nP -iTCP:"$port" -t 2>/dev/null || true)
|
||||||
if [[ -n "$pids" ]]; then
|
if [[ -n "$pids" ]]; then
|
||||||
echo "Killing processes on port $port: $pids"
|
echo "Killing processes on port $port: $pids"
|
||||||
for pid in $pids; do
|
for pid in $pids; do
|
||||||
|
# Kill the process and all its children
|
||||||
kill -9 "$pid" 2>/dev/null || true
|
kill -9 "$pid" 2>/dev/null || true
|
||||||
|
# Also kill any children of this process
|
||||||
pkill -9 -P "$pid" 2>/dev/null || true
|
pkill -9 -P "$pid" 2>/dev/null || true
|
||||||
killed_pids+=("$pid")
|
killed_pids+=("$pid")
|
||||||
done
|
done
|
||||||
@ -46,26 +49,18 @@ for port in "${PORTS[@]}"; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Method 2: Kill processes by specific patterns (ONLY debros-related)
|
# Also kill processes by command name patterns (in case they're orphaned)
|
||||||
# Be very specific to avoid killing unrelated processes
|
# This catches processes that might be using debros ports but not showing up in lsof
|
||||||
SPECIFIC_PATTERNS=(
|
COMMANDS=("node" "ipfs" "ipfs-cluster-service" "rqlited" "olric-server" "gateway")
|
||||||
"ipfs daemon"
|
for cmd in "${COMMANDS[@]}"; do
|
||||||
"ipfs-cluster-service daemon"
|
# Find all processes with this command name
|
||||||
"olric-server"
|
all_pids=$(pgrep -f "^.*$cmd.*" 2>/dev/null || true)
|
||||||
"bin/orama-node"
|
|
||||||
"bin/gateway"
|
|
||||||
"anyone-client"
|
|
||||||
)
|
|
||||||
|
|
||||||
for pattern in "${SPECIFIC_PATTERNS[@]}"; do
|
|
||||||
# Use exact pattern matching to avoid false positives
|
|
||||||
all_pids=$(pgrep -f "$pattern" 2>/dev/null || true)
|
|
||||||
if [[ -n "$all_pids" ]]; then
|
if [[ -n "$all_pids" ]]; then
|
||||||
for pid in $all_pids; do
|
for pid in $all_pids; do
|
||||||
# Double-check the command line to avoid killing wrong processes
|
# Check if this process is using any of our dev ports
|
||||||
cmdline=$(ps -p "$pid" -o command= 2>/dev/null || true)
|
port_match=$(lsof -nP -p "$pid" -iTCP 2>/dev/null | grep -E ":(400[1-4]|401[1-1]|410[1-4]|411[1-1]|450[1-4]|451[1-1]|500[1-4]|501[1-1]|600[1-1]|700[1-4]|701[1-1]|750[1-4]|751[1-1]|332[02]|9050|909[4-9]|910[4-9]|911[4-9]|912[4-9]|913[4-9]|909[6-9]|910[6-9]|911[6-9]|912[6-9]|913[6-9])" || true)
|
||||||
if [[ "$cmdline" == *"$pattern"* ]]; then
|
if [[ -n "$port_match" ]]; then
|
||||||
echo " Killing $pattern process (PID: $pid)"
|
echo "Killing orphaned $cmd process (PID: $pid) using dev ports"
|
||||||
kill -9 "$pid" 2>/dev/null || true
|
kill -9 "$pid" 2>/dev/null || true
|
||||||
pkill -9 -P "$pid" 2>/dev/null || true
|
pkill -9 -P "$pid" 2>/dev/null || true
|
||||||
killed_pids+=("$pid")
|
killed_pids+=("$pid")
|
||||||
@ -74,40 +69,26 @@ for pattern in "${SPECIFIC_PATTERNS[@]}"; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Method 3: Kill processes using PID files
|
# Clean up PID files
|
||||||
PIDS_DIR="$HOME/.orama/.pids"
|
PIDS_DIR="$HOME/.debros/.pids"
|
||||||
if [[ -d "$PIDS_DIR" ]]; then
|
if [[ -d "$PIDS_DIR" ]]; then
|
||||||
for pidfile in "$PIDS_DIR"/*.pid; do
|
rm -f "$PIDS_DIR"/*.pid || true
|
||||||
if [[ -f "$pidfile" ]]; then
|
|
||||||
pid=$(cat "$pidfile" 2>/dev/null || true)
|
|
||||||
if [[ -n "$pid" ]] && ps -p "$pid" > /dev/null 2>&1; then
|
|
||||||
name=$(basename "$pidfile" .pid)
|
|
||||||
echo " Killing $name (PID: $pid from pidfile)"
|
|
||||||
kill -9 "$pid" 2>/dev/null || true
|
|
||||||
pkill -9 -P "$pid" 2>/dev/null || true
|
|
||||||
killed_pids+=("$pid")
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
# Clean up all PID files
|
|
||||||
rm -f "$PIDS_DIR"/*.pid 2>/dev/null || true
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Remove duplicates and report
|
# Remove duplicates and report
|
||||||
if [[ ${#killed_pids[@]} -gt 0 ]]; then
|
if [[ ${#killed_pids[@]} -gt 0 ]]; then
|
||||||
unique_pids=($(printf '%s\n' "${killed_pids[@]}" | sort -u))
|
unique_pids=($(printf '%s\n' "${killed_pids[@]}" | sort -u))
|
||||||
echo "✓ Killed ${#unique_pids[@]} unique process(es)"
|
echo "✓ Killed ${#unique_pids[@]} unique process(es) on $killed_count port(s)"
|
||||||
else
|
else
|
||||||
echo "✓ No debros processes found running"
|
echo "✓ No processes found on dev ports"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Final verification: check if any ports are still in use
|
# Final verification: check if any ports are still in use
|
||||||
still_in_use=0
|
still_in_use=0
|
||||||
busy_ports=()
|
|
||||||
for port in "${PORTS[@]}"; do
|
for port in "${PORTS[@]}"; do
|
||||||
pids=$(lsof -nP -iTCP:"$port" -t 2>/dev/null || true)
|
pids=$(lsof -nP -iTCP:"$port" -t 2>/dev/null || true)
|
||||||
if [[ -n "$pids" ]]; then
|
if [[ -n "$pids" ]]; then
|
||||||
busy_ports+=("$port")
|
echo "⚠️ Warning: Port $port still in use by: $pids"
|
||||||
still_in_use=$((still_in_use + 1))
|
still_in_use=$((still_in_use + 1))
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@ -115,7 +96,6 @@ done
|
|||||||
if [[ $still_in_use -eq 0 ]]; then
|
if [[ $still_in_use -eq 0 ]]; then
|
||||||
echo "✓ All dev ports are now free"
|
echo "✓ All dev ports are now free"
|
||||||
else
|
else
|
||||||
echo "⚠️ Warning: $still_in_use port(s) still in use: ${busy_ports[*]}"
|
echo "⚠️ $still_in_use port(s) still in use - you may need to manually kill processes"
|
||||||
echo " Run 'lsof -nP -iTCP:<port>' to identify the processes"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
210
scripts/install-debros-network.sh
Executable file
210
scripts/install-debros-network.sh
Executable file
@ -0,0 +1,210 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# DeBros Network Installation Script
|
||||||
|
# Downloads dbn from GitHub releases and runs the new 'dbn prod install' flow
|
||||||
|
#
|
||||||
|
# Supported: Ubuntu 20.04+, Debian 11+
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# curl -fsSL https://install.debros.network | bash
|
||||||
|
# OR
|
||||||
|
# bash scripts/install-debros-network.sh
|
||||||
|
# OR with specific flags:
|
||||||
|
# bash scripts/install-debros-network.sh --bootstrap
|
||||||
|
# bash scripts/install-debros-network.sh --vps-ip 1.2.3.4 --peers /ip4/1.2.3.4/tcp/4001/p2p/Qm...
|
||||||
|
# bash scripts/install-debros-network.sh --domain example.com
|
||||||
|
|
||||||
|
set -e
|
||||||
|
trap 'error "An error occurred. Installation aborted."; exit 1' ERR
|
||||||
|
|
||||||
|
# Color codes
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
BLUE='\033[38;2;2;128;175m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NOCOLOR='\033[0m'
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
GITHUB_REPO="DeBrosOfficial/network"
|
||||||
|
GITHUB_API="https://api.github.com/repos/$GITHUB_REPO"
|
||||||
|
INSTALL_DIR="/usr/local/bin"
|
||||||
|
|
||||||
|
log() { echo -e "${CYAN}[$(date '+%Y-%m-%d %H:%M:%S')]${NOCOLOR} $1"; }
|
||||||
|
error() { echo -e "${RED}[ERROR]${NOCOLOR} $1" >&2; }
|
||||||
|
success() { echo -e "${GREEN}[SUCCESS]${NOCOLOR} $1"; }
|
||||||
|
warning() { echo -e "${YELLOW}[WARNING]${NOCOLOR} $1" >&2; }
|
||||||
|
|
||||||
|
display_banner() {
|
||||||
|
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||||
|
echo -e "${CYAN}
|
||||||
|
____ ____ _ _ _ _
|
||||||
|
| _ \\ ___| __ ) _ __ ___ ___ | \\ | | ___| |___ _____ _ __| | __
|
||||||
|
| | | |/ _ \\ _ \\| __/ _ \\/ __| | \\| |/ _ \\ __\\ \\ /\\ / / _ \\| __| |/ /
|
||||||
|
| |_| | __/ |_) | | | (_) \\__ \\ | |\\ | __/ |_ \\ V V / (_) | | | <
|
||||||
|
|____/ \\___|____/|_| \\___/|___/ |_| \\_|\\___|\\__| \\_/\\_/ \\___/|_| |_|\\_\\
|
||||||
|
${NOCOLOR}"
|
||||||
|
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||||
|
echo -e "${GREEN} Production Installation ${NOCOLOR}"
|
||||||
|
echo -e "${BLUE}========================================================================${NOCOLOR}"
|
||||||
|
}
|
||||||
|
|
||||||
|
detect_os() {
|
||||||
|
if [ ! -f /etc/os-release ]; then
|
||||||
|
error "Cannot detect operating system"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
. /etc/os-release
|
||||||
|
OS=$ID
|
||||||
|
VERSION=$VERSION_ID
|
||||||
|
|
||||||
|
# Support Debian and Ubuntu
|
||||||
|
case $OS in
|
||||||
|
ubuntu|debian)
|
||||||
|
log "Detected OS: $OS ${VERSION:-unknown}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
warning "Unsupported operating system: $OS (may not work)"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
check_architecture() {
|
||||||
|
ARCH=$(uname -m)
|
||||||
|
case $ARCH in
|
||||||
|
x86_64)
|
||||||
|
GITHUB_ARCH="amd64"
|
||||||
|
;;
|
||||||
|
aarch64|arm64)
|
||||||
|
GITHUB_ARCH="arm64"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
error "Unsupported architecture: $ARCH"
|
||||||
|
echo -e "${YELLOW}Supported: x86_64, aarch64/arm64${NOCOLOR}"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
log "Architecture: $ARCH (using $GITHUB_ARCH)"
|
||||||
|
}
|
||||||
|
|
||||||
|
check_root() {
|
||||||
|
if [[ $EUID -ne 0 ]]; then
|
||||||
|
error "This script must be run as root"
|
||||||
|
echo -e "${YELLOW}Please run with sudo:${NOCOLOR}"
|
||||||
|
echo -e "${CYAN} sudo bash <(curl -fsSL https://install.debros.network)${NOCOLOR}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
get_latest_release() {
|
||||||
|
log "Fetching latest release..."
|
||||||
|
|
||||||
|
if command -v jq &>/dev/null; then
|
||||||
|
# Get the latest release (including pre-releases/nightly)
|
||||||
|
LATEST_RELEASE=$(curl -fsSL -H "Accept: application/vnd.github+json" "$GITHUB_API/releases" | \
|
||||||
|
jq -r '.[0] | .tag_name')
|
||||||
|
else
|
||||||
|
LATEST_RELEASE=$(curl -fsSL "$GITHUB_API/releases" | \
|
||||||
|
grep '"tag_name"' | \
|
||||||
|
head -1 | \
|
||||||
|
cut -d'"' -f4)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$LATEST_RELEASE" ]; then
|
||||||
|
error "Could not determine latest release version"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Latest release: $LATEST_RELEASE"
|
||||||
|
}
|
||||||
|
|
||||||
|
download_and_install_cli() {
|
||||||
|
BINARY_NAME="debros-network_${LATEST_RELEASE#v}_linux_${GITHUB_ARCH}.tar.gz"
|
||||||
|
DOWNLOAD_URL="$GITHUB_REPO/releases/download/$LATEST_RELEASE/$BINARY_NAME"
|
||||||
|
|
||||||
|
log "Downloading dbn from GitHub releases..."
|
||||||
|
log "URL: https://github.com/$DOWNLOAD_URL"
|
||||||
|
|
||||||
|
# Clean up any stale binaries
|
||||||
|
rm -f /tmp/network-cli /tmp/dbn.tar.gz "$INSTALL_DIR/dbn"
|
||||||
|
|
||||||
|
if ! curl -fsSL -o /tmp/dbn.tar.gz "https://github.com/$DOWNLOAD_URL"; then
|
||||||
|
error "Failed to download dbn"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify the download was successful
|
||||||
|
if [ ! -f /tmp/dbn.tar.gz ]; then
|
||||||
|
error "Download file not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Extracting dbn..."
|
||||||
|
# Extract to /tmp
|
||||||
|
tar -xzf /tmp/dbn.tar.gz -C /tmp/
|
||||||
|
|
||||||
|
# Check for extracted binary (could be named network-cli or dbn)
|
||||||
|
EXTRACTED_BINARY=""
|
||||||
|
if [ -f /tmp/network-cli ]; then
|
||||||
|
EXTRACTED_BINARY="/tmp/network-cli"
|
||||||
|
elif [ -f /tmp/dbn ]; then
|
||||||
|
EXTRACTED_BINARY="/tmp/dbn"
|
||||||
|
else
|
||||||
|
error "Failed to extract binary (neither network-cli nor dbn found)"
|
||||||
|
ls -la /tmp/ | grep -E "(network|cli|dbn)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
chmod +x "$EXTRACTED_BINARY"
|
||||||
|
|
||||||
|
log "Installing dbn to $INSTALL_DIR..."
|
||||||
|
# Always rename to dbn during installation
|
||||||
|
mv "$EXTRACTED_BINARY" "$INSTALL_DIR/dbn"
|
||||||
|
|
||||||
|
# Sanity check: verify the installed binary is functional and reports correct version
|
||||||
|
if ! "$INSTALL_DIR/dbn" version &>/dev/null; then
|
||||||
|
error "Installed dbn failed sanity check (version command failed)"
|
||||||
|
rm -f "$INSTALL_DIR/dbn"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
rm -f /tmp/dbn.tar.gz
|
||||||
|
|
||||||
|
success "dbn installed successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main flow
|
||||||
|
display_banner
|
||||||
|
|
||||||
|
# Check prerequisites
|
||||||
|
check_root
|
||||||
|
detect_os
|
||||||
|
check_architecture
|
||||||
|
|
||||||
|
# Download and install
|
||||||
|
get_latest_release
|
||||||
|
download_and_install_cli
|
||||||
|
|
||||||
|
# Show next steps
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}Installation complete!${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
|
echo -e "${CYAN}Next, run the production setup:${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
|
echo "Bootstrap node (first node, main branch):"
|
||||||
|
echo -e " ${BLUE}sudo dbn prod install --bootstrap${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
|
echo "Bootstrap node (nightly branch):"
|
||||||
|
echo -e " ${BLUE}sudo dbn prod install --bootstrap --branch nightly${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
|
echo "Secondary node (join existing cluster):"
|
||||||
|
echo -e " ${BLUE}sudo dbn prod install --vps-ip <bootstrap_ip> --peers <multiaddr>${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
|
echo "With HTTPS/domain:"
|
||||||
|
echo -e " ${BLUE}sudo dbn prod install --bootstrap --domain example.com${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
|
echo "For more help:"
|
||||||
|
echo -e " ${BLUE}dbn prod --help${NOCOLOR}"
|
||||||
|
echo ""
|
||||||
@ -1,53 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Setup local domains for DeBros Network development
|
|
||||||
# Adds entries to /etc/hosts for node-1.local through node-5.local
|
|
||||||
# Maps them to 127.0.0.1 for local development
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
HOSTS_FILE="/etc/hosts"
|
|
||||||
NODES=("node-1" "node-2" "node-3" "node-4" "node-5")
|
|
||||||
|
|
||||||
# Check if we have sudo access
|
|
||||||
if [ "$EUID" -ne 0 ]; then
|
|
||||||
echo "This script requires sudo to modify /etc/hosts"
|
|
||||||
echo "Please run: sudo bash scripts/setup-local-domains.sh"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Function to add or update domain entry
|
|
||||||
add_domain() {
|
|
||||||
local domain=$1
|
|
||||||
local ip="127.0.0.1"
|
|
||||||
|
|
||||||
# Check if domain already exists
|
|
||||||
if grep -q "^[[:space:]]*$ip[[:space:]]\+$domain" "$HOSTS_FILE"; then
|
|
||||||
echo "✓ $domain already configured"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Add domain to /etc/hosts
|
|
||||||
echo "$ip $domain" >> "$HOSTS_FILE"
|
|
||||||
echo "✓ Added $domain -> $ip"
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "Setting up local domains for DeBros Network..."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Add each node domain
|
|
||||||
for node in "${NODES[@]}"; do
|
|
||||||
add_domain "${node}.local"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "✓ Local domains configured successfully!"
|
|
||||||
echo ""
|
|
||||||
echo "You can now access nodes via:"
|
|
||||||
for node in "${NODES[@]}"; do
|
|
||||||
echo " - ${node}.local (HTTP Gateway)"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Example: curl http://node-1.local:8080/rqlite/http/db/status"
|
|
||||||
|
|
||||||
@ -1,379 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Production Cluster Health Check Script
|
|
||||||
# Tests RQLite, IPFS, and IPFS Cluster connectivity and replication
|
|
||||||
|
|
||||||
# Note: We don't use 'set -e' here because we want to continue testing even if individual checks fail
|
|
||||||
|
|
||||||
# Colors for output
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
BLUE='\033[0;34m'
|
|
||||||
NC='\033[0m' # No Color
|
|
||||||
|
|
||||||
# Node IPs - Update these if needed
|
|
||||||
BOOTSTRAP="${BOOTSTRAP:-51.83.128.181}"
|
|
||||||
NODE1="${NODE1:-57.128.223.92}"
|
|
||||||
NODE2="${NODE2:-185.185.83.89}"
|
|
||||||
|
|
||||||
ALL_NODES=($BOOTSTRAP $NODE1 $NODE2)
|
|
||||||
|
|
||||||
# Counters
|
|
||||||
PASSED=0
|
|
||||||
FAILED=0
|
|
||||||
WARNINGS=0
|
|
||||||
|
|
||||||
# Helper functions
|
|
||||||
print_header() {
|
|
||||||
echo ""
|
|
||||||
echo -e "${BLUE}========================================${NC}"
|
|
||||||
echo -e "${BLUE}$1${NC}"
|
|
||||||
echo -e "${BLUE}========================================${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
print_test() {
|
|
||||||
echo -e "${YELLOW}▶ $1${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
print_pass() {
|
|
||||||
echo -e "${GREEN}✓ $1${NC}"
|
|
||||||
PASSED=$((PASSED + 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
print_fail() {
|
|
||||||
echo -e "${RED}✗ $1${NC}"
|
|
||||||
FAILED=$((FAILED + 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
print_warn() {
|
|
||||||
echo -e "${YELLOW}⚠ $1${NC}"
|
|
||||||
WARNINGS=$((WARNINGS + 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
print_info() {
|
|
||||||
echo -e " $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test functions
|
|
||||||
test_rqlite_status() {
|
|
||||||
print_header "1. RQLITE CLUSTER STATUS"
|
|
||||||
|
|
||||||
local leader_found=false
|
|
||||||
local follower_count=0
|
|
||||||
local commit_indices=()
|
|
||||||
|
|
||||||
for i in "${!ALL_NODES[@]}"; do
|
|
||||||
local node="${ALL_NODES[$i]}"
|
|
||||||
print_test "Testing RQLite on $node"
|
|
||||||
|
|
||||||
if ! response=$(curl -s --max-time 5 http://$node:5001/status 2>/dev/null); then
|
|
||||||
print_fail "Cannot connect to RQLite on $node:5001"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
local state=$(echo "$response" | jq -r '.store.raft.state // "unknown"')
|
|
||||||
local num_peers=$(echo "$response" | jq -r '.store.raft.num_peers // 0')
|
|
||||||
local commit_index=$(echo "$response" | jq -r '.store.raft.commit_index // 0')
|
|
||||||
local last_contact=$(echo "$response" | jq -r '.store.raft.last_contact // "N/A"')
|
|
||||||
local config=$(echo "$response" | jq -r '.store.raft.latest_configuration // "[]"')
|
|
||||||
local node_count=$(echo "$config" | grep -o "Address" | wc -l | tr -d ' ')
|
|
||||||
|
|
||||||
commit_indices+=($commit_index)
|
|
||||||
|
|
||||||
print_info "State: $state | Peers: $num_peers | Commit Index: $commit_index | Cluster Nodes: $node_count"
|
|
||||||
|
|
||||||
# Check state
|
|
||||||
if [ "$state" = "Leader" ]; then
|
|
||||||
leader_found=true
|
|
||||||
print_pass "Node $node is the Leader"
|
|
||||||
elif [ "$state" = "Follower" ]; then
|
|
||||||
follower_count=$((follower_count + 1))
|
|
||||||
# Check last contact
|
|
||||||
if [ "$last_contact" != "N/A" ] && [ "$last_contact" != "0" ]; then
|
|
||||||
print_pass "Node $node is a Follower (last contact: $last_contact)"
|
|
||||||
else
|
|
||||||
print_warn "Node $node is Follower but last_contact is $last_contact"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
print_fail "Node $node has unexpected state: $state"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check peer count
|
|
||||||
if [ "$num_peers" = "2" ]; then
|
|
||||||
print_pass "Node $node has correct peer count: 2"
|
|
||||||
else
|
|
||||||
print_fail "Node $node has incorrect peer count: $num_peers (expected 2)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check cluster configuration
|
|
||||||
if [ "$node_count" = "3" ]; then
|
|
||||||
print_pass "Node $node sees all 3 cluster members"
|
|
||||||
else
|
|
||||||
print_fail "Node $node only sees $node_count cluster members (expected 3)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check for exactly 1 leader
|
|
||||||
if [ "$leader_found" = true ] && [ "$follower_count" = "2" ]; then
|
|
||||||
print_pass "Cluster has 1 Leader and 2 Followers ✓"
|
|
||||||
else
|
|
||||||
print_fail "Invalid cluster state (Leader found: $leader_found, Followers: $follower_count)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check commit index sync
|
|
||||||
if [ ${#commit_indices[@]} -eq 3 ]; then
|
|
||||||
local first="${commit_indices[0]}"
|
|
||||||
local all_same=true
|
|
||||||
for idx in "${commit_indices[@]}"; do
|
|
||||||
if [ "$idx" != "$first" ]; then
|
|
||||||
all_same=false
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$all_same" = true ]; then
|
|
||||||
print_pass "All nodes have synced commit index: $first"
|
|
||||||
else
|
|
||||||
print_warn "Commit indices differ: ${commit_indices[*]} (might be normal if writes are happening)"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
test_rqlite_replication() {
|
|
||||||
print_header "2. RQLITE REPLICATION TEST"
|
|
||||||
|
|
||||||
print_test "Creating test table and inserting data on leader ($BOOTSTRAP)"
|
|
||||||
|
|
||||||
# Create table
|
|
||||||
if ! response=$(curl -s --max-time 5 -XPOST "http://$BOOTSTRAP:5001/db/execute" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '[["CREATE TABLE IF NOT EXISTS test_cluster_health (id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp TEXT, node TEXT, value TEXT)"]]' 2>/dev/null); then
|
|
||||||
print_fail "Failed to create table"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if echo "$response" | jq -e '.results[0].error' >/dev/null 2>&1; then
|
|
||||||
local error=$(echo "$response" | jq -r '.results[0].error')
|
|
||||||
if [[ "$error" != "table test_cluster_health already exists" ]]; then
|
|
||||||
print_fail "Table creation error: $error"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
print_pass "Table exists"
|
|
||||||
|
|
||||||
# Insert test data
|
|
||||||
local test_value="test_$(date +%s)"
|
|
||||||
if ! response=$(curl -s --max-time 5 -XPOST "http://$BOOTSTRAP:5001/db/execute" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d "[
|
|
||||||
[\"INSERT INTO test_cluster_health (timestamp, node, value) VALUES (datetime('now'), 'bootstrap', '$test_value')\"]
|
|
||||||
]" 2>/dev/null); then
|
|
||||||
print_fail "Failed to insert data"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if echo "$response" | jq -e '.results[0].error' >/dev/null 2>&1; then
|
|
||||||
local error=$(echo "$response" | jq -r '.results[0].error')
|
|
||||||
print_fail "Insert error: $error"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
print_pass "Data inserted: $test_value"
|
|
||||||
|
|
||||||
# Wait for replication
|
|
||||||
print_info "Waiting 2 seconds for replication..."
|
|
||||||
sleep 2
|
|
||||||
|
|
||||||
# Query from all nodes
|
|
||||||
for node in "${ALL_NODES[@]}"; do
|
|
||||||
print_test "Reading from $node"
|
|
||||||
|
|
||||||
if ! response=$(curl -s --max-time 5 -XPOST "http://$node:5001/db/query?level=weak" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d "[\"SELECT * FROM test_cluster_health WHERE value = '$test_value' LIMIT 1\"]" 2>/dev/null); then
|
|
||||||
print_fail "Failed to query from $node"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
if echo "$response" | jq -e '.results[0].error' >/dev/null 2>&1; then
|
|
||||||
local error=$(echo "$response" | jq -r '.results[0].error')
|
|
||||||
print_fail "Query error on $node: $error"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
local row_count=$(echo "$response" | jq -r '.results[0].values | length // 0')
|
|
||||||
if [ "$row_count" = "1" ]; then
|
|
||||||
local retrieved_value=$(echo "$response" | jq -r '.results[0].values[0][3] // ""')
|
|
||||||
if [ "$retrieved_value" = "$test_value" ]; then
|
|
||||||
print_pass "Data replicated correctly to $node"
|
|
||||||
else
|
|
||||||
print_fail "Data mismatch on $node (got: $retrieved_value, expected: $test_value)"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
print_fail "Expected 1 row from $node, got $row_count"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
test_ipfs_status() {
|
|
||||||
print_header "3. IPFS DAEMON STATUS"
|
|
||||||
|
|
||||||
for node in "${ALL_NODES[@]}"; do
|
|
||||||
print_test "Testing IPFS on $node"
|
|
||||||
|
|
||||||
if ! response=$(curl -s --max-time 5 -X POST http://$node:4501/api/v0/id 2>/dev/null); then
|
|
||||||
print_fail "Cannot connect to IPFS on $node:4501"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
local peer_id=$(echo "$response" | jq -r '.ID // "unknown"')
|
|
||||||
local addr_count=$(echo "$response" | jq -r '.Addresses | length // 0')
|
|
||||||
local agent=$(echo "$response" | jq -r '.AgentVersion // "unknown"')
|
|
||||||
|
|
||||||
if [ "$peer_id" != "unknown" ]; then
|
|
||||||
print_pass "IPFS running on $node (ID: ${peer_id:0:12}...)"
|
|
||||||
print_info "Agent: $agent | Addresses: $addr_count"
|
|
||||||
else
|
|
||||||
print_fail "IPFS not responding correctly on $node"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
test_ipfs_swarm() {
|
|
||||||
print_header "4. IPFS SWARM CONNECTIVITY"
|
|
||||||
|
|
||||||
for node in "${ALL_NODES[@]}"; do
|
|
||||||
print_test "Checking IPFS swarm peers on $node"
|
|
||||||
|
|
||||||
if ! response=$(curl -s --max-time 5 -X POST http://$node:4501/api/v0/swarm/peers 2>/dev/null); then
|
|
||||||
print_fail "Failed to get swarm peers from $node"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
local peer_count=$(echo "$response" | jq -r '.Peers | length // 0')
|
|
||||||
|
|
||||||
if [ "$peer_count" = "2" ]; then
|
|
||||||
print_pass "Node $node connected to 2 IPFS peers"
|
|
||||||
elif [ "$peer_count" -gt "0" ]; then
|
|
||||||
print_warn "Node $node connected to $peer_count IPFS peers (expected 2)"
|
|
||||||
else
|
|
||||||
print_fail "Node $node has no IPFS swarm peers"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
test_ipfs_cluster_status() {
|
|
||||||
print_header "5. IPFS CLUSTER STATUS"
|
|
||||||
|
|
||||||
for node in "${ALL_NODES[@]}"; do
|
|
||||||
print_test "Testing IPFS Cluster on $node"
|
|
||||||
|
|
||||||
if ! response=$(curl -s --max-time 5 http://$node:9094/id 2>/dev/null); then
|
|
||||||
print_fail "Cannot connect to IPFS Cluster on $node:9094"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
local cluster_id=$(echo "$response" | jq -r '.id // "unknown"')
|
|
||||||
local cluster_peers=$(echo "$response" | jq -r '.cluster_peers | length // 0')
|
|
||||||
local version=$(echo "$response" | jq -r '.version // "unknown"')
|
|
||||||
|
|
||||||
if [ "$cluster_id" != "unknown" ]; then
|
|
||||||
print_pass "IPFS Cluster running on $node (ID: ${cluster_id:0:12}...)"
|
|
||||||
print_info "Version: $version | Cluster Peers: $cluster_peers"
|
|
||||||
|
|
||||||
if [ "$cluster_peers" = "3" ]; then
|
|
||||||
print_pass "Node $node sees all 3 cluster peers"
|
|
||||||
else
|
|
||||||
print_warn "Node $node sees $cluster_peers cluster peers (expected 3)"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
print_fail "IPFS Cluster not responding correctly on $node"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
test_ipfs_cluster_pins() {
|
|
||||||
print_header "6. IPFS CLUSTER PIN CONSISTENCY"
|
|
||||||
|
|
||||||
local pin_counts=()
|
|
||||||
|
|
||||||
for node in "${ALL_NODES[@]}"; do
|
|
||||||
print_test "Checking pins on $node"
|
|
||||||
|
|
||||||
if ! response=$(curl -s --max-time 5 http://$node:9094/pins 2>/dev/null); then
|
|
||||||
print_fail "Failed to get pins from $node"
|
|
||||||
pin_counts+=(0)
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
local pin_count=$(echo "$response" | jq -r 'length // 0')
|
|
||||||
pin_counts+=($pin_count)
|
|
||||||
print_pass "Node $node has $pin_count pins"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check if all nodes have same pin count
|
|
||||||
if [ ${#pin_counts[@]} -eq 3 ]; then
|
|
||||||
local first="${pin_counts[0]}"
|
|
||||||
local all_same=true
|
|
||||||
for count in "${pin_counts[@]}"; do
|
|
||||||
if [ "$count" != "$first" ]; then
|
|
||||||
all_same=false
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$all_same" = true ]; then
|
|
||||||
print_pass "All nodes have consistent pin count: $first"
|
|
||||||
else
|
|
||||||
print_warn "Pin counts differ: ${pin_counts[*]} (might be syncing)"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
print_summary() {
|
|
||||||
print_header "TEST SUMMARY"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo -e "${GREEN}Passed: $PASSED${NC}"
|
|
||||||
echo -e "${YELLOW}Warnings: $WARNINGS${NC}"
|
|
||||||
echo -e "${RED}Failed: $FAILED${NC}"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
if [ $FAILED -eq 0 ]; then
|
|
||||||
echo -e "${GREEN}🎉 All critical tests passed! Cluster is healthy.${NC}"
|
|
||||||
exit 0
|
|
||||||
elif [ $FAILED -le 2 ]; then
|
|
||||||
echo -e "${YELLOW}⚠️ Some tests failed. Review the output above.${NC}"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo -e "${RED}❌ Multiple failures detected. Cluster needs attention.${NC}"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main execution
|
|
||||||
main() {
|
|
||||||
echo ""
|
|
||||||
echo -e "${BLUE}╔════════════════════════════════════════════╗${NC}"
|
|
||||||
echo -e "${BLUE}║ DEBROS Production Cluster Health Check ║${NC}"
|
|
||||||
echo -e "${BLUE}╚════════════════════════════════════════════╝${NC}"
|
|
||||||
echo ""
|
|
||||||
echo "Testing cluster:"
|
|
||||||
echo " Bootstrap: $BOOTSTRAP"
|
|
||||||
echo " Node 1: $NODE1"
|
|
||||||
echo " Node 2: $NODE2"
|
|
||||||
|
|
||||||
test_rqlite_status
|
|
||||||
test_rqlite_replication
|
|
||||||
test_ipfs_status
|
|
||||||
test_ipfs_swarm
|
|
||||||
test_ipfs_cluster_status
|
|
||||||
test_ipfs_cluster_pins
|
|
||||||
print_summary
|
|
||||||
}
|
|
||||||
|
|
||||||
# Run main
|
|
||||||
main
|
|
||||||
|
|
||||||
@ -1,85 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Test local domain routing for DeBros Network
|
|
||||||
# Validates that all HTTP gateway routes are working
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
NODES=("1" "2" "3" "4" "5")
|
|
||||||
GATEWAY_PORTS=(8080 8081 8082 8083 8084)
|
|
||||||
|
|
||||||
# Color codes
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
RED='\033[0;31m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
NC='\033[0m' # No Color
|
|
||||||
|
|
||||||
# Counters
|
|
||||||
PASSED=0
|
|
||||||
FAILED=0
|
|
||||||
|
|
||||||
# Test a single endpoint
|
|
||||||
test_endpoint() {
|
|
||||||
local node=$1
|
|
||||||
local port=$2
|
|
||||||
local path=$3
|
|
||||||
local description=$4
|
|
||||||
|
|
||||||
local url="http://node-${node}.local:${port}${path}"
|
|
||||||
|
|
||||||
printf "Testing %-50s ... " "$description"
|
|
||||||
|
|
||||||
if curl -s -f "$url" > /dev/null 2>&1; then
|
|
||||||
echo -e "${GREEN}✓ PASS${NC}"
|
|
||||||
((PASSED++))
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
echo -e "${RED}✗ FAIL${NC}"
|
|
||||||
((FAILED++))
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "=========================================="
|
|
||||||
echo "DeBros Network Local Domain Tests"
|
|
||||||
echo "=========================================="
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Test each node's HTTP gateway
|
|
||||||
for i in "${!NODES[@]}"; do
|
|
||||||
node=${NODES[$i]}
|
|
||||||
port=${GATEWAY_PORTS[$i]}
|
|
||||||
|
|
||||||
echo "Testing node-${node}.local (port ${port}):"
|
|
||||||
|
|
||||||
# Test health endpoint
|
|
||||||
test_endpoint "$node" "$port" "/health" "Node-${node} health check"
|
|
||||||
|
|
||||||
# Test RQLite HTTP endpoint
|
|
||||||
test_endpoint "$node" "$port" "/rqlite/http/db/execute" "Node-${node} RQLite HTTP"
|
|
||||||
|
|
||||||
# Test IPFS API endpoint (may fail if IPFS not running, but at least connection should work)
|
|
||||||
test_endpoint "$node" "$port" "/ipfs/api/v0/version" "Node-${node} IPFS API" || true
|
|
||||||
|
|
||||||
# Test Cluster API endpoint (may fail if Cluster not running, but at least connection should work)
|
|
||||||
test_endpoint "$node" "$port" "/cluster/health" "Node-${node} Cluster API" || true
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
done
|
|
||||||
|
|
||||||
# Summary
|
|
||||||
echo "=========================================="
|
|
||||||
echo "Test Results"
|
|
||||||
echo "=========================================="
|
|
||||||
echo -e "${GREEN}Passed: $PASSED${NC}"
|
|
||||||
echo -e "${RED}Failed: $FAILED${NC}"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
if [ $FAILED -eq 0 ]; then
|
|
||||||
echo -e "${GREEN}✓ All tests passed!${NC}"
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo -e "${YELLOW}⚠ Some tests failed (this is expected if services aren't running)${NC}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
Loading…
x
Reference in New Issue
Block a user