refactor: rename DeBros to Orama and update configuration paths

- Replaced all instances of DeBros with Orama throughout the codebase, including CLI commands and configuration paths.
- Updated documentation to reflect the new naming convention and paths for configuration files.
- Removed the outdated PRODUCTION_INSTALL.md file and added new scripts for local domain setup and testing.
- Introduced a new interactive TUI installer for Orama Network, enhancing the installation experience.
- Improved logging and error handling across various components to provide clearer feedback during operations.
This commit is contained in:
anonpenguin23 2025-11-26 13:31:02 +02:00
parent 775289a1a2
commit 660008b0aa
55 changed files with 3388 additions and 2458 deletions

197
.github/workflows/release-apt.yml vendored Normal file
View File

@ -0,0 +1,197 @@
name: Release APT Package
on:
release:
types: [published]
workflow_dispatch:
inputs:
version:
description: "Version to release (e.g., 0.69.20)"
required: true
permissions:
contents: write
packages: write
jobs:
build-deb:
name: Build Debian Package
runs-on: ubuntu-latest
strategy:
matrix:
arch: [amd64, arm64]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.23"
- name: Get version
id: version
run: |
if [ "${{ github.event_name }}" = "release" ]; then
VERSION="${{ github.event.release.tag_name }}"
VERSION="${VERSION#v}" # Remove 'v' prefix if present
else
VERSION="${{ github.event.inputs.version }}"
fi
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Set up QEMU (for arm64)
if: matrix.arch == 'arm64'
uses: docker/setup-qemu-action@v3
- name: Build binary
env:
GOARCH: ${{ matrix.arch }}
CGO_ENABLED: 0
run: |
VERSION="${{ steps.version.outputs.version }}"
COMMIT=$(git rev-parse --short HEAD)
DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS="-X 'main.version=$VERSION' -X 'main.commit=$COMMIT' -X 'main.date=$DATE'"
mkdir -p build/usr/local/bin
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama cmd/cli/main.go
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-node cmd/node/main.go
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-gateway cmd/gateway/main.go
- name: Create Debian package structure
run: |
VERSION="${{ steps.version.outputs.version }}"
ARCH="${{ matrix.arch }}"
PKG_NAME="orama_${VERSION}_${ARCH}"
mkdir -p ${PKG_NAME}/DEBIAN
mkdir -p ${PKG_NAME}/usr/local/bin
# Copy binaries
cp build/usr/local/bin/* ${PKG_NAME}/usr/local/bin/
chmod 755 ${PKG_NAME}/usr/local/bin/*
# Create control file
cat > ${PKG_NAME}/DEBIAN/control << EOF
Package: orama
Version: ${VERSION}
Section: net
Priority: optional
Architecture: ${ARCH}
Depends: libc6
Maintainer: DeBros Team <team@debros.network>
Description: Orama Network - Distributed P2P Database System
Orama is a distributed peer-to-peer network that combines
RQLite for distributed SQL, IPFS for content-addressed storage,
and LibP2P for peer discovery and communication.
EOF
# Create postinst script
cat > ${PKG_NAME}/DEBIAN/postinst << 'EOF'
#!/bin/bash
set -e
echo ""
echo "Orama installed successfully!"
echo ""
echo "To set up your node, run:"
echo " sudo orama install"
echo ""
EOF
chmod 755 ${PKG_NAME}/DEBIAN/postinst
- name: Build .deb package
run: |
VERSION="${{ steps.version.outputs.version }}"
ARCH="${{ matrix.arch }}"
PKG_NAME="orama_${VERSION}_${ARCH}"
dpkg-deb --build ${PKG_NAME}
mv ${PKG_NAME}.deb orama_${VERSION}_${ARCH}.deb
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: deb-${{ matrix.arch }}
path: "*.deb"
publish-apt:
name: Publish to APT Repository
needs: build-deb
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: packages
- name: Get version
id: version
run: |
if [ "${{ github.event_name }}" = "release" ]; then
VERSION="${{ github.event.release.tag_name }}"
VERSION="${VERSION#v}"
else
VERSION="${{ github.event.inputs.version }}"
fi
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Set up GPG
if: env.GPG_PRIVATE_KEY != ''
env:
GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }}
run: |
echo "$GPG_PRIVATE_KEY" | gpg --import
- name: Create APT repository structure
run: |
mkdir -p apt-repo/pool/main/o/orama
mkdir -p apt-repo/dists/stable/main/binary-amd64
mkdir -p apt-repo/dists/stable/main/binary-arm64
# Move packages
mv packages/deb-amd64/*.deb apt-repo/pool/main/o/orama/
mv packages/deb-arm64/*.deb apt-repo/pool/main/o/orama/
# Generate Packages files
cd apt-repo
dpkg-scanpackages --arch amd64 pool/ > dists/stable/main/binary-amd64/Packages
dpkg-scanpackages --arch arm64 pool/ > dists/stable/main/binary-arm64/Packages
gzip -k dists/stable/main/binary-amd64/Packages
gzip -k dists/stable/main/binary-arm64/Packages
# Generate Release file
cat > dists/stable/Release << EOF
Origin: Orama
Label: Orama
Suite: stable
Codename: stable
Architectures: amd64 arm64
Components: main
Description: Orama Network APT Repository
EOF
cd ..
- name: Upload to release
if: github.event_name == 'release'
uses: softprops/action-gh-release@v1
with:
files: |
apt-repo/pool/main/o/orama/*.deb
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Deploy APT repository to GitHub Pages
uses: peaceiris/actions-gh-pages@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./apt-repo
destination_dir: apt
keep_files: true

View File

@ -13,12 +13,38 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Deprecated
### Fixed
## [0.69.21] - 2025-11-26
### Added
- Introduced a new interactive TUI wizard for production installation (`sudo orama install`).
- Added support for APT package repository generation and publishing via GitHub Actions.
- Added new simplified production CLI commands (`orama install`, `orama upgrade`, `orama status`, etc.) as aliases for the legacy `orama prod` commands.
- Added support for a unified HTTP reverse proxy gateway within the node process, routing internal services (RQLite, IPFS, Cluster) via a single port.
- Added support for SNI-based TCP routing for secure access to services like RQLite Raft and IPFS Swarm.
### Changed
- Renamed the primary CLI binary from `dbn` to `orama` across the entire codebase, documentation, and build system.
- Migrated the production installation directory structure from `~/.debros` to `~/.orama`.
- Consolidated production service management into unified systemd units (e.g., `debros-node.service` replaces `debros-node-bootstrap.service` and `debros-node-node.service`).
- Updated the default IPFS configuration to bind API and Gateway addresses to `127.0.0.1` for enhanced security, relying on the new unified gateway for external access.
- Updated RQLite service configuration to bind to `127.0.0.1` for HTTP and Raft ports, relying on the new SNI gateway for external cluster communication.
### Deprecated
### Removed
### Fixed
- Corrected configuration path resolution logic to correctly check for config files in the new `~/.orama/` directory structure.
## [0.69.20] - 2025-11-22
### Added
- Added verification step to ensure the IPFS Cluster secret is correctly written after configuration updates.
### Changed
- Improved reliability of `anyone-client` installation and verification by switching to using `npx` for execution and checks, especially for globally installed scoped packages.
- Updated the `anyone-client` systemd service to use `npx` for execution and explicitly set the PATH environment variable to ensure the client runs correctly.
@ -27,12 +53,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.19] - 2025-11-22
### Added
\n
### Changed
- Updated the installation command for 'anyone-client' to use the correct scoped package name (@anyone-protocol/anyone-client).
### Deprecated
@ -40,14 +71,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.18] - 2025-11-22
### Added
- Integrated `anyone-client` (SOCKS5 proxy) installation and systemd service (`debros-anyone-client.service`).
- Added port availability checking logic to prevent conflicts when starting services (e.g., `anyone-client` on port 9050).
### Changed
- Updated system dependencies installation to include `nodejs` and `npm` required for `anyone-client`.
- Modified Olric configuration generation to bind to the specific VPS IP if provided, otherwise defaults to 0.0.0.0.
- Improved IPFS Cluster initialization by passing `CLUSTER_SECRET` directly as an environment variable.
@ -57,17 +92,21 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.17] - 2025-11-21
### Added
- Initial implementation of a Push Notification Service for the Gateway, utilizing the Expo API.
- Detailed documentation for RQLite operations, monitoring, and troubleshooting was added to the README.
### Changed
- Improved `make stop` and `dbn dev down` commands to ensure all development services are forcefully killed after graceful shutdown attempt.
- Refactored RQLite startup logic to simplify cluster establishment and remove complex, error-prone leadership/recovery checks, relying on RQLite's built-in join mechanism.
- RQLite logs are now written to individual log files (e.g., `~/.debros/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
- RQLite logs are now written to individual log files (e.g., `~/.orama/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
- Improved peer exchange discovery logging to suppress expected 'protocols not supported' warnings from lightweight clients like the Gateway.
### Deprecated
@ -75,17 +114,21 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.17] - 2025-11-21
### Added
- Initial implementation of a Push Notification Service for the Gateway, utilizing the Expo API.
- Detailed documentation for RQLite operations, monitoring, and troubleshooting in the README.
### Changed
- Improved `make stop` and `dbn dev down` commands to ensure all development services are forcefully killed after graceful shutdown attempt.
- Refactored RQLite startup logic to simplify cluster establishment and remove complex, error-prone leadership/recovery checks, relying on RQLite's built-in join mechanism.
- RQLite logs are now written to individual log files (e.g., `~/.debros/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
- RQLite logs are now written to individual log files (e.g., `~/.orama/logs/rqlite-bootstrap.log`) instead of stdout/stderr, improving development environment clarity.
- Improved peer exchange discovery logging to suppress expected 'protocols not supported' warnings from lightweight clients like the Gateway.
### Deprecated
@ -93,12 +136,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.16] - 2025-11-16
### Added
\n
### Changed
- Improved the `make stop` command to ensure a more robust and graceful shutdown of development services.
- Enhanced the `make kill` command and underlying scripts for more reliable force termination of stray development processes.
- Increased the graceful shutdown timeout for development processes from 500ms to 2 seconds before resorting to force kill.
@ -108,12 +156,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.15] - 2025-11-16
### Added
\n
### Changed
- Improved authentication flow to handle wallet addresses case-insensitively during nonce creation and verification.
### Deprecated
@ -121,13 +174,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.14] - 2025-11-14
### Added
- Added support for background reconnection to the Olric cache cluster in the Gateway, improving resilience if the cache is temporarily unavailable.
### Changed
- Improved the RQLite database client connection handling to ensure connections are properly closed and reused safely.
- RQLite Manager now updates its advertised addresses if cluster discovery provides more accurate information (e.g., replacing localhost).
@ -136,13 +193,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Removed internal RQLite process management from the development runner, as RQLite is now expected to be managed externally or via Docker.
## [0.69.13] - 2025-11-14
### Added
\n
### Changed
- The Gateway service now waits for the Olric cache service to start before attempting initialization.
- Improved robustness of Olric cache client initialization with retry logic and exponential backoff.
@ -151,14 +212,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Corrected the default path logic for 'gateway.yaml' to prioritize the production data directory while maintaining fallback to legacy paths.
## [0.69.12] - 2025-11-14
### Added
- The `prod install` command now requires the `--cluster-secret` flag for all non-bootstrap nodes to ensure correct IPFS Cluster configuration.
### Changed
- Updated IPFS configuration to bind API and Gateway addresses to `0.0.0.0` instead of `127.0.0.1` for better network accessibility.
### Deprecated
@ -166,13 +230,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.11] - 2025-11-13
### Added
- Added a new comprehensive shell script (`scripts/test-cluster-health.sh`) for checking the health and replication status of RQLite, IPFS, and IPFS Cluster across production environments.
### Changed
- Improved RQLite cluster discovery logic to ensure `peers.json` is correctly generated and includes the local node, which is crucial for reliable cluster recovery.
- Refactored logging across discovery and RQLite components for cleaner, more concise output, especially for routine operations.
- Updated the installation and upgrade process to correctly configure IPFS Cluster bootstrap peers using the node's public IP, improving cluster formation reliability.
@ -182,16 +250,19 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where RQLite recovery operations (like clearing Raft state) did not correctly force the regeneration of `peers.json`, preventing successful cluster rejoin.
- Corrected the port calculation logic for IPFS Cluster to ensure the correct LibP2P listen port (9098) is used for bootstrap peer addressing.
## [0.69.10] - 2025-11-13
### Added
- Automatic health monitoring and recovery for RQLite cluster split-brain scenarios.
- RQLite now waits indefinitely for the minimum cluster size to be met before starting, preventing single-node cluster formation.
### Changed
- Updated default IPFS swarm port from 4001 to 4101 to avoid conflicts with LibP2P.
### Deprecated
@ -199,16 +270,19 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Resolved an issue where RQLite could start as a single-node cluster if peer discovery was slow, by enforcing minimum cluster size before startup.
- Improved cluster recovery logic to correctly use `bootstrap-expect` for new clusters and ensure proper process restart during recovery.
## [0.69.9] - 2025-11-12
### Added
- Added automatic recovery logic for RQLite (database) nodes stuck in a configuration mismatch, which attempts to clear stale Raft state if peers have more recent data.
- Added logic to discover IPFS Cluster peers directly from the LibP2P host's peerstore, improving peer discovery before the Cluster API is fully operational.
### Changed
- Improved the IPFS Cluster configuration update process to prioritize writing to the `peerstore` file before updating `service.json`, ensuring the source of truth is updated first.
### Deprecated
@ -216,14 +290,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.8] - 2025-11-12
### Added
- Improved `dbn prod start` to automatically unmask and re-enable services if they were previously masked or disabled.
- Added automatic discovery and configuration of all IPFS Cluster peers during runtime to improve cluster connectivity.
### Changed
- Enhanced `dbn prod start` and `dbn prod stop` reliability by adding service state resets, retries, and ensuring services are disabled when stopped.
- Filtered peer exchange addresses in LibP2P discovery to only include the standard LibP2P port (4001), preventing exposure of internal service ports.
@ -232,13 +310,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Improved IPFS Cluster bootstrap configuration repair logic to automatically infer and update bootstrap peer addresses if the bootstrap node is available.
## [0.69.7] - 2025-11-12
### Added
\n
### Changed
- Improved logic for determining Olric server addresses during configuration generation, especially for bootstrap and non-bootstrap nodes.
- Enhanced IPFS cluster configuration to correctly handle IPv6 addresses when updating bootstrap peers.
@ -247,14 +329,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.6] - 2025-11-12
### Added
- Improved production service health checks and port availability validation during install, upgrade, start, and restart commands.
- Added service aliases (node, ipfs, cluster, gateway, olric) to `dbn prod logs` command for easier log viewing.
### Changed
- Updated node configuration logic to correctly advertise public IP addresses in multiaddrs (for P2P discovery) and RQLite addresses, improving connectivity for nodes behind NAT/firewalls.
- Enhanced `dbn prod install` and `dbn prod upgrade` to automatically detect and preserve existing VPS IP, domain, and cluster join information.
- Improved RQLite cluster discovery to automatically replace localhost/loopback addresses with the actual public IP when exchanging metadata between peers.
@ -266,14 +352,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where the RQLite process would wait indefinitely for a join target; now uses a 5-minute timeout.
- Corrected the location of the gateway configuration file reference in the README.
## [0.69.5] - 2025-11-11
### Added
\n
### Changed
- Moved the default location for `gateway.yaml` configuration file from `configs/` to the new `data/` directory for better organization.
- Updated configuration path logic to search for `gateway.yaml` in the new `data/` directory first.
@ -282,13 +372,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.4] - 2025-11-11
### Added
\n
### Changed
- RQLite database management is now integrated directly into the main node process, removing separate RQLite systemd services (debros-rqlite-*).
- RQLite database management is now integrated directly into the main node process, removing separate RQLite systemd services (debros-rqlite-\*).
- Improved log file provisioning to only create necessary log files based on the node type being installed (bootstrap or node).
### Deprecated
@ -296,26 +391,35 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.3] - 2025-11-11
### Added
- Added `--ignore-resource-checks` flag to the install command to skip disk, RAM, and CPU prerequisite validation.
### Changed
\n
### Deprecated
### Removed
### Fixed
\n
## [0.69.2] - 2025-11-11
### Added
- Added `--no-pull` flag to `dbn prod upgrade` to skip git repository updates and use existing source code.
### Changed
- Removed deprecated environment management commands (`env`, `devnet`, `testnet`, `local`).
- Removed deprecated network commands (`health`, `peers`, `status`, `peer-id`, `connect`, `query`, `pubsub`) from the main CLI interface.
@ -324,14 +428,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.1] - 2025-11-11
### Added
- Added automatic service stopping before binary upgrades during the `prod upgrade` process to ensure a clean update.
- Added logic to preserve existing configuration settings (like `bootstrap_peers`, `domain`, and `rqlite_join_address`) when regenerating configurations during `prod upgrade`.
### Changed
- Improved the `prod upgrade` process to be more robust by preserving critical configuration details and gracefully stopping services.
### Deprecated
@ -339,15 +447,19 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.69.0] - 2025-11-11
### Added
- Added comprehensive documentation for setting up HTTPS using a domain name, including configuration steps for both installation and existing setups.
- Added the `--force` flag to the `install` command for reconfiguring all settings.
- Added new log targets (`ipfs-cluster`, `rqlite`, `olric`) and improved the `dbn prod logs` command documentation.
### Changed
- Improved the IPFS Cluster configuration logic to ensure the cluster secret and IPFS API port are correctly synchronized during updates.
- Refined the directory structure creation process to ensure node-specific data directories are created only when initializing services.
@ -356,13 +468,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.68.1] - 2025-11-11
### Added
- Pre-create log files during setup to ensure correct permissions for systemd logging.
### Changed
- Improved binary installation process to handle copying files individually, preventing potential shell wildcard issues.
- Enhanced ownership fixing logic during installation to ensure all files created by root (especially during service initialization) are correctly owned by the 'debros' user.
@ -371,14 +487,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.68.0] - 2025-11-11
### Added
- Added comprehensive documentation for production deployment, including installation, upgrade, service management, and troubleshooting.
- Added new CLI commands (`dbn prod start`, `dbn prod stop`, `dbn prod restart`) for convenient management of production systemd services.
### Changed
- Updated IPFS configuration during production installation to use port 4501 for the API (to avoid conflicts with RQLite on port 5001) and port 8080 for the Gateway.
### Deprecated
@ -386,15 +506,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Ensured that IPFS configuration automatically disables AutoConf when a private swarm key is present during installation and upgrade, preventing startup errors.
## [0.67.7] - 2025-11-11
### Added
- Added support for specifying the Git branch (main or nightly) during `prod install` and `prod upgrade`.
- The chosen branch is now saved and automatically used for future upgrades unless explicitly overridden.
### Changed
- Updated help messages and examples for production commands to include branch options.
### Deprecated
@ -402,12 +525,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.67.6] - 2025-11-11
### Added
\n
### Changed
- The binary installer now updates the source repository if it already exists, instead of only cloning it if missing.
### Deprecated
@ -415,15 +543,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Resolved an issue where disabling AutoConf in the IPFS repository could leave 'auto' placeholders in the config, causing startup errors.
## [0.67.5] - 2025-11-11
### Added
- Added `--restart` option to `dbn prod upgrade` to automatically restart services after upgrade.
- The gateway now supports an optional `--config` flag to specify the configuration file path.
### Changed
- Improved `dbn prod upgrade` process to better handle existing installations, including detecting node type and ensuring configurations are updated to the latest format.
- Configuration loading logic for `node` and `gateway` commands now correctly handles absolute paths passed via command line or systemd.
@ -432,13 +563,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue during production upgrades where IPFS repositories in private swarms might fail to start due to `AutoConf` not being disabled.
## [0.67.4] - 2025-11-11
### Added
\n
### Changed
- Improved configuration file loading logic to support absolute paths for config files.
- Updated IPFS Cluster initialization during setup to run `ipfs-cluster-service init` and automatically configure the cluster secret.
- IPFS repositories initialized with a private swarm key will now automatically disable AutoConf.
@ -448,13 +583,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed configuration path resolution to correctly check for config files in both the legacy (`~/.debros/`) and production (`~/.debros/configs/`) directories.
- Fixed configuration path resolution to correctly check for config files in both the legacy (`~/.orama/`) and production (`~/.orama/configs/`) directories.
## [0.67.3] - 2025-11-11
### Added
\n
### Changed
- Improved reliability of IPFS (Kubo) installation by switching from a single install script to the official step-by-step download and extraction process.
- Updated IPFS (Kubo) installation to use version v0.38.2.
- Enhanced binary installation routines (RQLite, IPFS, Go) to ensure the installed binaries are immediately available in the current process's PATH.
@ -464,14 +603,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed potential installation failures for RQLite by adding error checking to the binary copy command.
## [0.67.2] - 2025-11-11
### Added
- Added a new utility function to reliably resolve the full path of required external binaries (like ipfs, rqlited, etc.).
### Changed
- Improved service initialization by validating the availability and path of all required external binaries before creating systemd service units.
- Updated systemd service generation logic to use the resolved, fully-qualified paths for external binaries instead of relying on hardcoded paths.
@ -480,13 +622,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Changed IPFS initialization from a warning to a fatal error if the repo fails to initialize, ensuring setup stops on critical failures.
## [0.67.1] - 2025-11-11
### Added
\n
### Changed
- Improved disk space check logic to correctly check the parent directory if the specified path does not exist.
### Deprecated
@ -494,15 +640,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue in the installation script where the extracted CLI binary might be named 'dbn' instead of 'network-cli', ensuring successful installation regardless of the extracted filename.
## [0.67.0] - 2025-11-11
### Added
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
### Changed
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
@ -511,15 +660,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
## [0.67.0] - 2025-11-11
### Added
- Added support for joining a cluster as a secondary bootstrap node using the new `--bootstrap-join` flag.
- Added a new flag `--vps-ip` to specify the public IP address for non-bootstrap nodes, which is now required for cluster joining.
### Changed
- Updated the installation script to correctly download and install the CLI binary from the GitHub release archive.
- Improved RQLite service configuration to correctly use the public IP address (`--vps-ip`) for advertising its raft and HTTP addresses.
@ -528,13 +680,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where non-bootstrap nodes could be installed without specifying the required `--vps-ip`.
## [0.66.1] - 2025-11-11
### Added
\n
### Changed
- Allow bootstrap nodes to optionally define a join address to synchronize with another bootstrap cluster.
### Deprecated
@ -542,14 +698,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.66.0] - 2025-11-11
### Added
- Pre-installation checks for minimum system resources (10GB disk space, 2GB RAM, 2 CPU cores) are now performed during setup.
- All systemd services (IPFS, RQLite, Olric, Node, Gateway) now log directly to dedicated files in the logs directory instead of using the system journal.
### Changed
- Improved logging instructions in the setup completion message to reference the new dedicated log files.
### Deprecated
@ -557,14 +717,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.65.0] - 2025-11-11
### Added
- Expanded the local development environment (`dbn dev up`) from 3 nodes to 5 nodes (2 bootstraps and 3 regular nodes) for better testing of cluster resilience and quorum.
- Added a new `bootstrap2` node configuration and service to the development topology.
### Changed
- Updated the `dbn dev up` command to configure and start all 5 nodes and associated services (IPFS, RQLite, IPFS Cluster).
- Modified RQLite and LibP2P health checks in the development environment to require a quorum of 3 out of 5 nodes.
- Refactored development environment configuration logic using a new `Topology` structure for easier management of node ports and addresses.
@ -574,13 +738,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Ensured that secondary bootstrap nodes can correctly join the primary RQLite cluster in the development environment.
## [0.64.1] - 2025-11-10
### Added
\n
### Changed
- Improved the accuracy of the Raft log index reporting by falling back to reading persisted snapshot metadata from disk if the running RQLite instance is not yet reachable or reports a zero index.
### Deprecated
@ -588,16 +756,20 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.64.0] - 2025-11-10
### Added
- Comprehensive End-to-End (E2E) test suite for Gateway API endpoints (Cache, RQLite, Storage, Network, Auth).
- New E2E tests for concurrent operations and TTL expiry in the distributed cache.
- New E2E tests for LibP2P peer connectivity and discovery.
### Changed
- Improved Gateway E2E test configuration: automatically discovers Gateway URL and API Key from local `~/.debros` configuration files, removing the need for environment variables.
- Improved Gateway E2E test configuration: automatically discovers Gateway URL and API Key from local `~/.orama` configuration files, removing the need for environment variables.
- The `/v1/network/peers` endpoint now returns a flattened list of multiaddresses for all connected peers.
- Improved robustness of Cache API handlers to correctly identify and return 404 (Not Found) errors when keys are missing, even when wrapped by underlying library errors.
- The RQLite transaction handler now supports the legacy `statements` array format in addition to the `ops` array format for easier use.
@ -608,13 +780,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Corrected IPFS Add operation to return the actual file size (byte count) instead of the DAG size in the response.
## [0.63.3] - 2025-11-10
### Added
\n
### Changed
- Improved RQLite cluster stability by automatically clearing stale Raft state on startup if peers have a higher log index, allowing the node to join cleanly.
### Deprecated
@ -622,12 +798,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.63.2] - 2025-11-10
### Added
\n
### Changed
- Improved process termination logic in development environments to ensure child processes are also killed.
- Enhanced the `dev-kill-all.sh` script to reliably kill all processes using development ports, including orphaned processes and their children.
@ -636,12 +817,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.63.1] - 2025-11-10
### Added
\n
### Changed
- Increased the default minimum cluster size for database environments from 1 to 3.
### Deprecated
@ -649,15 +835,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Prevented unnecessary cluster recovery attempts when a node starts up as the first node (fresh bootstrap).
## [0.63.0] - 2025-11-10
### Added
- Added a new `kill` command to the Makefile for forcefully shutting down all development processes.
- Introduced a new `stop` command in the Makefile for graceful shutdown of development processes.
### Changed
- The `kill` command now performs a graceful shutdown attempt followed by a force kill of any lingering processes and verifies that development ports are free.
### Deprecated
@ -665,13 +854,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.62.0] - 2025-11-10
### Added
- The `prod status` command now correctly checks for both 'bootstrap' and 'node' service variants.
### Changed
- The production installation process now generates secrets (like the cluster secret and peer ID) before initializing services. This ensures all necessary secrets are available when services start.
- The `prod install` command now displays the actual Peer ID upon completion instead of a placeholder.
@ -680,15 +873,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
- Fixed an issue where IPFS Cluster initialization was using a hardcoded configuration file instead of relying on the standard `ipfs-cluster-service init` process.
## [0.61.0] - 2025-11-10
### Added
- Introduced a new simplified authentication flow (`dbn auth login`) that allows users to generate an API key directly from a wallet address without signature verification (for development/testing purposes).
- Added a new `PRODUCTION_INSTALL.md` guide for production deployment using the `dbn prod` command suite.
### Changed
- Renamed the primary CLI binary from `network-cli` to `dbn` across all configurations, documentation, and source code.
- Refactored the IPFS configuration logic in the development environment to directly modify the IPFS config file instead of relying on shell commands, improving stability.
- Improved the IPFS Cluster peer count logic to correctly handle NDJSON streaming responses from the `/peers` endpoint.
@ -699,6 +895,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed
### Fixed
\n
## [0.60.1] - 2025-11-09
@ -1032,7 +1229,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
- Interactive domain configuration during `dbn setup` command
- Automatic port availability checking for ports 80 and 443 before enabling HTTPS
- DNS resolution verification to ensure domain points to the server IP
- TLS certificate cache directory management (`~/.debros/tls-cache`)
- TLS certificate cache directory management (`~/.orama/tls-cache`)
- Gateway automatically serves HTTP (port 80) for ACME challenges and HTTPS (port 443) for traffic
- New gateway config fields: `enable_https`, `domain_name`, `tls_cache_dir`
- **Domain Validation**: Added domain name validation and DNS verification helpers in setup CLI
@ -1102,8 +1299,8 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
- Automatic GitHub Release creation with changelog and artifacts
- Semantic versioning support with pre-release handling
- **Environment Configuration**: Multi-environment switching system
- Default environments: local (http://localhost:6001), devnet (https://devnet.debros.network), testnet (https://testnet.debros.network)
- Stored in `~/.debros/environments.json`
- Default environments: local (http://localhost:6001), devnet (https://devnet.orama.network), testnet (https://testnet.orama.network)
- Stored in `~/.orama/environments.json`
- CLI auto-uses active environment for authentication and operations
- **Comprehensive Documentation**
- `.cursor/RELEASES.md`: Overview and quick start
@ -1132,7 +1329,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
- Explicit control over LibP2P listen addresses for better localhost/development support
- Production/development mode detection for NAT services (disabled for localhost, enabled for production)
- Process management with .dev/pids directory for background process tracking
- Centralized logging to ~/.debros/logs/ for all network services
- Centralized logging to ~/.orama/logs/ for all network services
### Changed
@ -1182,7 +1379,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Changed
- Updated readme
- Where we read .yaml files from and where data is saved to ~/.debros
- Where we read .yaml files from and where data is saved to ~/.orama
### Deprecated
@ -1311,7 +1508,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Changed
- replaced git.debros.io with github.com
- replaced git.orama.io with github.com
### Deprecated

View File

@ -27,14 +27,14 @@ make deps
Useful CLI commands:
```bash
./bin/dbn health
./bin/dbn peers
./bin/dbn status
./bin/orama health
./bin/orama peers
./bin/orama status
````
## Versioning
- The CLI reports its version via `dbn version`.
- The CLI reports its version via `orama version`.
- Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser.
## Pull Requests

View File

@ -6,12 +6,12 @@ test:
go test -v $(TEST)
# Gateway-focused E2E tests assume gateway and nodes are already running
# Auto-discovers configuration from ~/.debros and queries database for API key
# Auto-discovers configuration from ~/.orama and queries database for API key
# No environment variables required
.PHONY: test-e2e
test-e2e:
@echo "Running comprehensive E2E tests..."
@echo "Auto-discovering configuration from ~/.debros..."
@echo "Auto-discovering configuration from ~/.orama..."
go test -v -tags e2e ./e2e
# Network - Distributed P2P Database System
@ -19,7 +19,7 @@ test-e2e:
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
VERSION := 0.69.20
VERSION := 0.69.21
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
@ -30,10 +30,10 @@ build: deps
@mkdir -p bin
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
go build -ldflags "$(LDFLAGS)" -o bin/node ./cmd/node
go build -ldflags "$(LDFLAGS)" -o bin/dbn cmd/cli/main.go
go build -ldflags "$(LDFLAGS)" -o bin/orama cmd/cli/main.go
# Inject gateway build metadata via pkg path variables
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
@echo "Build complete! Run ./bin/dbn version"
@echo "Build complete! Run ./bin/orama version"
# Install git hooks
install-hooks:
@ -49,46 +49,42 @@ clean:
# Run bootstrap node (auto-selects identity and data dir)
run-node:
@echo "Starting bootstrap node..."
@echo "Config: ~/.debros/bootstrap.yaml"
@echo "Generate it with: dbn config init --type bootstrap"
@echo "Starting node..."
@echo "Config: ~/.orama/node.yaml"
go run ./cmd/node --config node.yaml
# Run second node (regular) - requires join address of bootstrap node
# Usage: make run-node2 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002
# Run second node - requires join address
run-node2:
@echo "Starting regular node (node.yaml)..."
@echo "Config: ~/.debros/node.yaml"
@echo "Generate it with: dbn config init --type node --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
@echo "Starting second node..."
@echo "Config: ~/.orama/node2.yaml"
go run ./cmd/node --config node2.yaml
# Run third node (regular) - requires join address of bootstrap node
# Usage: make run-node3 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003
# Run third node - requires join address
run-node3:
@echo "Starting regular node (node2.yaml)..."
@echo "Config: ~/.debros/node2.yaml"
@echo "Generate it with: dbn config init --type node --name node2.yaml --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
@echo "Starting third node..."
@echo "Config: ~/.orama/node3.yaml"
go run ./cmd/node --config node3.yaml
# Run gateway HTTP server
# Usage examples:
# make run-gateway # uses ~/.debros/gateway.yaml
# Config generated with: dbn config init --type gateway
run-gateway:
@echo "Starting gateway HTTP server..."
@echo "Note: Config must be in ~/.debros/gateway.yaml"
@echo "Generate it with: dbn config init --type gateway"
@echo "Note: Config must be in ~/.orama/data/gateway.yaml"
go run ./cmd/gateway
# Setup local domain names for development
setup-domains:
@echo "Setting up local domains..."
@sudo bash scripts/setup-local-domains.sh
# Development environment target
# Uses dbn dev up to start full stack with dependency and port checking
dev: build
@./bin/dbn dev up
# Uses orama dev up to start full stack with dependency and port checking
dev: build setup-domains
@./bin/orama dev up
# Graceful shutdown of all dev services
stop:
@if [ -f ./bin/dbn ]; then \
./bin/dbn dev down || true; \
@if [ -f ./bin/orama ]; then \
./bin/orama dev down || true; \
fi
@bash scripts/dev-kill-all.sh
@ -106,20 +102,17 @@ help:
@echo "Local Development (Recommended):"
@echo " make dev - Start full development stack with one command"
@echo " - Checks dependencies and available ports"
@echo " - Generates configs (2 bootstraps + 3 nodes + gateway)"
@echo " - Starts IPFS, RQLite, Olric, all nodes, and gateway"
@echo " - Validates cluster health (IPFS peers, RQLite, LibP2P)"
@echo " - Stops all services if health checks fail"
@echo " - Includes comprehensive logging"
@echo " - Generates configs and starts all services"
@echo " - Validates cluster health"
@echo " make stop - Gracefully stop all development services"
@echo " make kill - Force kill all development services (use if stop fails)"
@echo ""
@echo "Development Management (via dbn):"
@echo " ./bin/dbn dev status - Show status of all dev services"
@echo " ./bin/dbn dev logs <component> [--follow]"
@echo "Development Management (via orama):"
@echo " ./bin/orama dev status - Show status of all dev services"
@echo " ./bin/orama dev logs <component> [--follow]"
@echo ""
@echo "Individual Node Targets (advanced):"
@echo " run-node - Start bootstrap node directly"
@echo " run-node - Start first node directly"
@echo " run-node2 - Start second node directly"
@echo " run-node3 - Start third node directly"
@echo " run-gateway - Start HTTP gateway directly"

View File

@ -1,175 +0,0 @@
# Production Installation Guide - DeBros Network
This guide covers production deployment of the DeBros Network using the `dbn prod` command suite.
## System Requirements
- **OS**: Ubuntu 20.04 LTS or later, Debian 11+, or other Linux distributions
- **Architecture**: x86_64 (amd64) or ARM64 (aarch64)
- **RAM**: Minimum 4GB, recommended 8GB+
- **Storage**: Minimum 50GB SSD recommended
- **Ports**:
- 4001 (P2P networking)
- 4501 (IPFS HTTP API - bootstrap), 4502/4503 (node2/node3)
- 5001-5003 (RQLite HTTP - one per node)
- 6001 (Gateway)
- 7001-7003 (RQLite Raft - one per node)
- 9094 (IPFS Cluster API - bootstrap), 9104/9114 (node2/node3)
- 3320/3322 (Olric)
- 80, 443 (for HTTPS with Let's Encrypt)
## Installation
### Prerequisites
1. **Root access required**: All production operations require sudo/root privileges
2. **Supported distros**: Ubuntu, Debian, Fedora (via package manager)
3. **Basic tools**: `curl`, `git`, `make`, `build-essential`, `wget`
### Single-Node Bootstrap Installation
Deploy the first node (bootstrap node) on a VPS:
```bash
sudo dbn prod install --bootstrap
```
This will:
1. Check system prerequisites (OS, arch, root privileges, basic tools)
2. Provision the `debros` system user and filesystem structure at `~/.debros`
3. Download and install all required binaries (Go, RQLite, IPFS, IPFS Cluster, Olric, DeBros)
4. Generate secrets (cluster secret, swarm key, node identity)
5. Initialize repositories (IPFS, IPFS Cluster, RQLite)
6. Generate configurations for bootstrap node
7. Create and start systemd services
All files will be under `/home/debros/.debros`:
```
~/.debros/
├── bin/ # Compiled binaries
├── configs/ # YAML configurations
├── data/
│ ├── ipfs/ # IPFS repository
│ ├── ipfs-cluster/ # IPFS Cluster state
│ └── rqlite/ # RQLite database
├── logs/ # Service logs
└── secrets/ # Keys and certificates
```
### Joining Additional Nodes
Every non-bootstrap node must use the exact same IPFS Cluster secret as the bootstrap host. When you provision a follower node:
1. Copy the secret from the bootstrap machine:
```bash
scp debros@<bootstrap-ip>:/home/debros/.debros/secrets/cluster-secret ./cluster-secret
```
2. Run the installer with the `--cluster-secret` flag:
```bash
sudo dbn prod install --vps-ip <public_ip> \
--peers /ip4/<bootstrap-ip>/tcp/4001/p2p/<peer-id> \
--cluster-secret $(cat ./cluster-secret)
```
The installer now enforces `--cluster-secret` for all non-bootstrap nodes, which prevents mismatched cluster PSKs during deployment.
## Service Management
### Check Service Status
```bash
sudo systemctl status debros-node-bootstrap
sudo systemctl status debros-gateway
sudo systemctl status debros-rqlite-bootstrap
```
### View Service Logs
```bash
# Bootstrap node logs
sudo journalctl -u debros-node-bootstrap -f
# Gateway logs
sudo journalctl -u debros-gateway -f
# All services
sudo journalctl -u "debros-*" -f
```
## Health Checks
After installation, verify services are running:
```bash
# Check IPFS
curl http://localhost:4501/api/v0/id
# Check RQLite cluster
curl http://localhost:5001/status
# Check Gateway
curl http://localhost:6001/health
# Check Olric
curl http://localhost:3320/ping
```
## Port Reference
### Development Environment (via `make dev`)
- IPFS API: 4501 (bootstrap), 4502 (node2), 4503 (node3)
- RQLite HTTP: 5001, 5002, 5003
- RQLite Raft: 7001, 7002, 7003
- IPFS Cluster: 9094, 9104, 9114
- P2P: 4001, 4002, 4003
- Gateway: 6001
- Olric: 3320, 3322
### Production Environment (via `sudo dbn prod install`)
- Same port assignments as development for consistency
## Configuration Files
Key configuration files are located in `~/.debros/configs/`:
- **bootstrap.yaml**: Bootstrap node configuration
- **node.yaml**: Regular node configuration
- **gateway.yaml**: HTTP gateway configuration
- **olric.yaml**: In-memory cache configuration
Edit these files directly for advanced configuration, then restart services:
```bash
sudo systemctl restart debros-node-bootstrap
```
## Troubleshooting
### Port already in use
Check which process is using the port:
```bash
sudo lsof -i :4501
sudo lsof -i :5001
sudo lsof -i :7001
```
Kill conflicting processes or change ports in config.
### RQLite cluster not forming
Ensure:
1. Bootstrap node is running: `systemctl status debros-rqlite-bootstrap`
2. Network connectivity between nodes on ports 5001+ (HTTP) and 7001+ (Raft)
3. Check logs: `journalctl -u debros-rqlite-bootstrap -f`
---
**Last Updated**: November 2024
**Compatible with**: Network v1.0.0+

1117
README.md

File diff suppressed because it is too large Load Diff

View File

@ -34,7 +34,7 @@ func main() {
switch command {
case "version":
fmt.Printf("dbn %s", version)
fmt.Printf("orama %s", version)
if commit != "" {
fmt.Printf(" (commit %s)", commit)
}
@ -48,10 +48,30 @@ func main() {
case "dev":
cli.HandleDevCommand(args)
// Production environment commands
// Production environment commands (legacy with 'prod' prefix)
case "prod":
cli.HandleProdCommand(args)
// Direct production commands (new simplified interface)
case "install":
cli.HandleProdCommand(append([]string{"install"}, args...))
case "upgrade":
cli.HandleProdCommand(append([]string{"upgrade"}, args...))
case "migrate":
cli.HandleProdCommand(append([]string{"migrate"}, args...))
case "status":
cli.HandleProdCommand(append([]string{"status"}, args...))
case "start":
cli.HandleProdCommand(append([]string{"start"}, args...))
case "stop":
cli.HandleProdCommand(append([]string{"stop"}, args...))
case "restart":
cli.HandleProdCommand(append([]string{"restart"}, args...))
case "logs":
cli.HandleProdCommand(append([]string{"logs"}, args...))
case "uninstall":
cli.HandleProdCommand(append([]string{"uninstall"}, args...))
// Authentication commands
case "auth":
cli.HandleAuthCommand(args)
@ -85,8 +105,8 @@ func parseGlobalFlags(args []string) {
}
func showHelp() {
fmt.Printf("Network CLI - Distributed P2P Network Management Tool\n\n")
fmt.Printf("Usage: dbn <command> [args...]\n\n")
fmt.Printf("Orama CLI - Distributed P2P Network Management Tool\n\n")
fmt.Printf("Usage: orama <command> [args...]\n\n")
fmt.Printf("💻 Local Development:\n")
fmt.Printf(" dev up - Start full local dev environment\n")
@ -96,15 +116,14 @@ func showHelp() {
fmt.Printf(" dev help - Show dev command help\n\n")
fmt.Printf("🚀 Production Deployment:\n")
fmt.Printf(" prod install [--bootstrap] - Full production bootstrap (requires root/sudo)\n")
fmt.Printf(" prod upgrade - Upgrade existing installation\n")
fmt.Printf(" prod status - Show production service status\n")
fmt.Printf(" prod start - Start all production services (requires root/sudo)\n")
fmt.Printf(" prod stop - Stop all production services (requires root/sudo)\n")
fmt.Printf(" prod restart - Restart all production services (requires root/sudo)\n")
fmt.Printf(" prod logs <service> - View production service logs\n")
fmt.Printf(" prod uninstall - Remove production services (requires root/sudo)\n")
fmt.Printf(" prod help - Show prod command help\n\n")
fmt.Printf(" install - Install production node (requires root/sudo)\n")
fmt.Printf(" upgrade - Upgrade existing installation\n")
fmt.Printf(" status - Show production service status\n")
fmt.Printf(" start - Start all production services (requires root/sudo)\n")
fmt.Printf(" stop - Stop all production services (requires root/sudo)\n")
fmt.Printf(" restart - Restart all production services (requires root/sudo)\n")
fmt.Printf(" logs <service> - View production service logs\n")
fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n")
fmt.Printf("🔐 Authentication:\n")
fmt.Printf(" auth login - Authenticate with wallet\n")
@ -119,16 +138,14 @@ func showHelp() {
fmt.Printf(" --help, -h - Show this help message\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" # Authenticate\n")
fmt.Printf(" dbn auth login\n\n")
fmt.Printf(" # First node (creates new cluster)\n")
fmt.Printf(" sudo orama install --vps-ip 203.0.113.1 --domain node-1.orama.network\n\n")
fmt.Printf(" # Start local dev environment\n")
fmt.Printf(" dbn dev up\n")
fmt.Printf(" dbn dev status\n\n")
fmt.Printf(" # Join existing cluster\n")
fmt.Printf(" sudo orama install --vps-ip 203.0.113.2 --domain node-2.orama.network \\\n")
fmt.Printf(" --peers /ip4/203.0.113.1/tcp/4001/p2p/12D3KooW... --cluster-secret <hex>\n\n")
fmt.Printf(" # Production deployment (requires root/sudo)\n")
fmt.Printf(" sudo dbn prod install --bootstrap\n")
fmt.Printf(" sudo dbn prod upgrade\n")
fmt.Printf(" dbn prod status\n")
fmt.Printf(" dbn prod logs node --follow\n")
fmt.Printf(" # Service management\n")
fmt.Printf(" orama status\n")
fmt.Printf(" orama logs node --follow\n")
}

View File

@ -40,11 +40,11 @@ func getEnvBoolDefault(key string, def bool) bool {
}
}
// parseGatewayConfig loads gateway.yaml from ~/.debros exclusively.
// parseGatewayConfig loads gateway.yaml from ~/.orama exclusively.
// It accepts an optional --config flag for absolute paths (used by systemd services).
func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
// Parse --config flag (optional, for systemd services that pass absolute paths)
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.debros)")
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
flag.Parse()
// Determine config path
@ -63,7 +63,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
}
}
} else {
// Default behavior: look for gateway.yaml in ~/.debros/data/, ~/.debros/configs/, or ~/.debros/
// Default behavior: look for gateway.yaml in ~/.orama/data/, ~/.orama/configs/, or ~/.orama/
configPath, err = config.DefaultPath("gateway.yaml")
if err != nil {
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
@ -157,7 +157,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
// Default TLS cache directory if HTTPS is enabled but not specified
homeDir, err := os.UserHomeDir()
if err == nil {
cfg.TLSCacheDir = filepath.Join(homeDir, ".debros", "tls-cache")
cfg.TLSCacheDir = filepath.Join(homeDir, ".orama", "tls-cache")
}
}

View File

@ -33,7 +33,7 @@ func setup_logger(component logging.Component) (logger *logging.ColoredLogger) {
// parse_flags parses command-line flags and returns them.
func parse_flags() (configName *string, help *bool) {
configName = flag.String("config", "node.yaml", "Config filename in ~/.debros (default: node.yaml)")
configName = flag.String("config", "node.yaml", "Config filename in ~/.orama (default: node.yaml)")
help = flag.Bool("help", false, "Show help")
flag.Parse()
@ -63,7 +63,7 @@ func check_if_should_open_help(help *bool) {
}
}
// select_data_dir validates that we can load the config from ~/.debros
// select_data_dir validates that we can load the config from ~/.orama
func select_data_dir_check(configName *string) {
logger := setup_logger(logging.ComponentNode)
@ -272,7 +272,7 @@ func main() {
// Absolute path passed directly (e.g., from systemd service)
configPath = *configName
} else {
// Relative path - use DefaultPath which checks both ~/.debros/configs/ and ~/.debros/
// Relative path - use DefaultPath which checks both ~/.orama/configs/ and ~/.orama/
configPath, err = config.DefaultPath(*configName)
if err != nil {
logger.Error("Failed to determine config path", zap.Error(err))

19
debian/control vendored Normal file
View File

@ -0,0 +1,19 @@
Package: orama
Version: 0.69.20
Section: net
Priority: optional
Architecture: amd64
Depends: libc6
Maintainer: DeBros Team <dev@debros.io>
Description: Orama Network - Distributed P2P Database System
Orama is a distributed peer-to-peer network that combines
RQLite for distributed SQL, IPFS for content-addressed storage,
and LibP2P for peer discovery and communication.
.
Features:
- Distributed SQLite database with Raft consensus
- IPFS-based file storage with encryption
- LibP2P peer-to-peer networking
- Olric distributed cache
- Unified HTTP/HTTPS gateway

18
debian/postinst vendored Normal file
View File

@ -0,0 +1,18 @@
#!/bin/bash
set -e
# Post-installation script for orama package
echo "Orama installed successfully!"
echo ""
echo "To set up your node, run:"
echo " sudo orama install"
echo ""
echo "This will launch the interactive installer."
echo ""
echo "For command-line installation:"
echo " sudo orama install --vps-ip <your-ip> --domain <your-domain>"
echo ""
echo "For help:"
echo " orama --help"

View File

@ -35,7 +35,7 @@ var (
cacheMutex sync.RWMutex
)
// loadGatewayConfig loads gateway configuration from ~/.debros/gateway.yaml
// loadGatewayConfig loads gateway configuration from ~/.orama/gateway.yaml
func loadGatewayConfig() (map[string]interface{}, error) {
configPath, err := config.DefaultPath("gateway.yaml")
if err != nil {
@ -55,7 +55,7 @@ func loadGatewayConfig() (map[string]interface{}, error) {
return cfg, nil
}
// loadNodeConfig loads node configuration from ~/.debros/node.yaml or bootstrap.yaml
// loadNodeConfig loads node configuration from ~/.orama/node.yaml or bootstrap.yaml
func loadNodeConfig(filename string) (map[string]interface{}, error) {
configPath, err := config.DefaultPath(filename)
if err != nil {
@ -143,11 +143,11 @@ func queryAPIKeyFromRQLite() (string, error) {
// Try bootstrap first, then all nodes
dbPaths := []string{
filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "bootstrap2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node3", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".debros", "node4", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "bootstrap", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "bootstrap2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "node2", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "node3", "rqlite", "db.sqlite"),
filepath.Join(homeDir, ".orama", "node4", "rqlite", "db.sqlite"),
}
for _, dbPath := range dbPaths {
@ -562,7 +562,7 @@ func CleanupDatabaseTable(t *testing.T, tableName string) {
return
}
dbPath := filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite")
dbPath := filepath.Join(homeDir, ".orama", "bootstrap", "rqlite", "db.sqlite")
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
t.Logf("warning: failed to open database for cleanup: %v", err)

20
go.mod
View File

@ -5,29 +5,39 @@ go 1.23.8
toolchain go1.24.1
require (
github.com/charmbracelet/bubbles v0.20.0
github.com/charmbracelet/bubbletea v1.2.4
github.com/charmbracelet/lipgloss v1.0.0
github.com/ethereum/go-ethereum v1.13.14
github.com/go-chi/chi/v5 v5.2.3
github.com/gorilla/websocket v1.5.3
github.com/libp2p/go-libp2p v0.41.1
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/mackerelio/go-osstat v0.2.6
github.com/mattn/go-sqlite3 v1.14.32
github.com/multiformats/go-multiaddr v0.15.0
github.com/olric-data/olric v0.7.0
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.40.0
golang.org/x/net v0.42.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/RoaringBitmap/roaring v1.9.4 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.22.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/buraksezer/consistent v0.10.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charmbracelet/x/ansi v0.4.5 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
@ -35,6 +45,7 @@ require (
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/elastic/gosigar v0.14.3 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
@ -70,15 +81,20 @@ require (
github.com/libp2p/go-netroute v0.2.2 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-sqlite3 v1.14.32 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/miekg/dns v1.1.66 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mschoch/smat v0.2.0 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
@ -121,6 +137,7 @@ require (
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/redis/go-redis/v9 v9.8.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
@ -141,6 +158,5 @@ require (
golang.org/x/text v0.27.0 // indirect
golang.org/x/tools v0.35.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
)

34
go.sum
View File

@ -19,6 +19,10 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
@ -44,6 +48,16 @@ github.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0ma
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE=
github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU=
github.com/charmbracelet/bubbletea v1.2.4 h1:KN8aCViA0eps9SCOThb2/XPIlea3ANJLUkv3KnQRNCE=
github.com/charmbracelet/bubbletea v1.2.4/go.mod h1:Qr6fVQw+wX7JkWWkVyXYk/ZUQ92a6XNekLXa3rR18MM=
github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg=
github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo=
github.com/charmbracelet/x/ansi v0.4.5 h1:LqK4vwBNaXw2AyGIICa5/29Sbdq58GbGdFngSexTdRM=
github.com/charmbracelet/x/ansi v0.4.5/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw=
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
@ -75,6 +89,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ=
github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
@ -85,6 +101,8 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE=
github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -238,6 +256,8 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po=
github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0=
github.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA=
@ -246,6 +266,10 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -271,6 +295,12 @@ github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
@ -399,6 +429,9 @@ github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtB
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE=
@ -585,6 +618,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@ -34,15 +34,15 @@ func GetCredentialsPath() (string, error) {
return "", fmt.Errorf("failed to get home directory: %w", err)
}
debrosDir := filepath.Join(homeDir, ".debros")
if err := os.MkdirAll(debrosDir, 0700); err != nil {
return "", fmt.Errorf("failed to create .debros directory: %w", err)
oramaDir := filepath.Join(homeDir, ".orama")
if err := os.MkdirAll(oramaDir, 0700); err != nil {
return "", fmt.Errorf("failed to create .orama directory: %w", err)
}
return filepath.Join(debrosDir, "credentials.json"), nil
return filepath.Join(oramaDir, "credentials.json"), nil
}
// LoadCredentials loads credentials from ~/.debros/credentials.json
// LoadCredentials loads credentials from ~/.orama/credentials.json
func LoadCredentials() (*CredentialStore, error) {
credPath, err := GetCredentialsPath()
if err != nil {
@ -80,7 +80,7 @@ func LoadCredentials() (*CredentialStore, error) {
return &store, nil
}
// SaveCredentials saves credentials to ~/.debros/credentials.json
// SaveCredentials saves credentials to ~/.orama/credentials.json
func (store *CredentialStore) SaveCredentials() error {
credPath, err := GetCredentialsPath()
if err != nil {

View File

@ -199,7 +199,7 @@ func (as *AuthServer) handleCallback(w http.ResponseWriter, r *http.Request) {
%s
</div>
<p>Your credentials have been saved securely to <code>~/.debros/credentials.json</code></p>
<p>Your credentials have been saved securely to <code>~/.orama/credentials.json</code></p>
<p><strong>You can now close this browser window and return to your terminal.</strong></p>
</div>
</body>

View File

@ -50,7 +50,7 @@ func showAuthHelp() {
fmt.Printf(" 1. Run 'dbn auth login'\n")
fmt.Printf(" 2. Enter your wallet address when prompted\n")
fmt.Printf(" 3. Enter your namespace (or press Enter for 'default')\n")
fmt.Printf(" 4. An API key will be generated and saved to ~/.debros/credentials.json\n\n")
fmt.Printf(" 4. An API key will be generated and saved to ~/.orama/credentials.json\n\n")
fmt.Printf("Note: Authentication uses the currently active environment.\n")
fmt.Printf(" Use 'dbn env current' to see your active environment.\n")
}

View File

@ -57,13 +57,13 @@ func showDevHelp() {
func handleDevUp(args []string) {
ctx := context.Background()
// Get home directory and .debros path
// Get home directory and .orama path
homeDir, err := os.UserHomeDir()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
oramaDir := filepath.Join(homeDir, ".orama")
// Step 1: Check dependencies
fmt.Printf("📋 Checking dependencies...\n\n")
@ -90,7 +90,7 @@ func handleDevUp(args []string) {
// Step 3: Ensure configs
fmt.Printf("⚙️ Preparing configuration files...\n\n")
ensurer := development.NewConfigEnsurer(debrosDir)
ensurer := development.NewConfigEnsurer(oramaDir)
if err := ensurer.EnsureAll(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to prepare configs: %v\n", err)
os.Exit(1)
@ -98,7 +98,7 @@ func handleDevUp(args []string) {
fmt.Printf("\n")
// Step 4: Start services
pm := development.NewProcessManager(debrosDir, os.Stdout)
pm := development.NewProcessManager(oramaDir, os.Stdout)
if err := pm.StartAll(ctx); err != nil {
fmt.Fprintf(os.Stderr, "❌ Error starting services: %v\n", err)
os.Exit(1)
@ -120,7 +120,7 @@ func handleDevUp(args []string) {
fmt.Printf(" dbn dev logs bootstrap - Bootstrap logs\n")
fmt.Printf(" dbn dev logs bootstrap2 - Bootstrap2 logs\n")
fmt.Printf(" dbn dev down - Stop all services\n\n")
fmt.Printf("Logs directory: %s/logs\n\n", debrosDir)
fmt.Printf("Logs directory: %s/logs\n\n", oramaDir)
}
func handleDevDown(args []string) {
@ -129,9 +129,9 @@ func handleDevDown(args []string) {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
oramaDir := filepath.Join(homeDir, ".orama")
pm := development.NewProcessManager(debrosDir, os.Stdout)
pm := development.NewProcessManager(oramaDir, os.Stdout)
ctx := context.Background()
if err := pm.StopAll(ctx); err != nil {
@ -148,9 +148,9 @@ func handleDevStatus(args []string) {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
oramaDir := filepath.Join(homeDir, ".orama")
pm := development.NewProcessManager(debrosDir, os.Stdout)
pm := development.NewProcessManager(oramaDir, os.Stdout)
ctx := context.Background()
pm.Status(ctx)
@ -171,9 +171,9 @@ func handleDevLogs(args []string) {
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
os.Exit(1)
}
debrosDir := filepath.Join(homeDir, ".debros")
oramaDir := filepath.Join(homeDir, ".orama")
logPath := filepath.Join(debrosDir, "logs", fmt.Sprintf("%s.log", component))
logPath := filepath.Join(oramaDir, "logs", fmt.Sprintf("%s.log", component))
if _, err := os.Stat(logPath); os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "❌ Log file not found: %s\n", logPath)
os.Exit(1)

View File

@ -43,8 +43,8 @@ func showEnvHelp() {
fmt.Printf(" enable - Alias for 'switch' (e.g., 'devnet enable')\n\n")
fmt.Printf("Available Environments:\n")
fmt.Printf(" local - Local development (http://localhost:6001)\n")
fmt.Printf(" devnet - Development network (https://devnet.debros.network)\n")
fmt.Printf(" testnet - Test network (https://testnet.debros.network)\n\n")
fmt.Printf(" devnet - Development network (https://devnet.orama.network)\n")
fmt.Printf(" testnet - Test network (https://testnet.orama.network)\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" dbn env list\n")
fmt.Printf(" dbn env current\n")

View File

@ -33,13 +33,13 @@ var DefaultEnvironments = []Environment{
},
{
Name: "devnet",
GatewayURL: "https://devnet.debros.network",
GatewayURL: "https://devnet.orama.network",
Description: "Development network (testnet)",
IsActive: false,
},
{
Name: "testnet",
GatewayURL: "https://testnet.debros.network",
GatewayURL: "https://testnet.orama.network",
Description: "Test network (staging)",
IsActive: false,
},

View File

@ -15,9 +15,40 @@ import (
"time"
"github.com/DeBrosOfficial/network/pkg/environments/production"
"github.com/DeBrosOfficial/network/pkg/installer"
"github.com/multiformats/go-multiaddr"
)
// runInteractiveInstaller launches the TUI installer
func runInteractiveInstaller() {
config, err := installer.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ %v\n", err)
os.Exit(1)
}
// Convert TUI config to install args and run installation
var args []string
args = append(args, "--vps-ip", config.VpsIP)
args = append(args, "--domain", config.Domain)
args = append(args, "--branch", config.Branch)
if !config.IsFirstNode {
if config.JoinAddress != "" {
args = append(args, "--join", config.JoinAddress)
}
if config.ClusterSecret != "" {
args = append(args, "--cluster-secret", config.ClusterSecret)
}
if len(config.Peers) > 0 {
args = append(args, "--peers", strings.Join(config.Peers, ","))
}
}
// Re-run with collected args
handleProdInstall(args)
}
// normalizeBootstrapPeers normalizes and validates bootstrap peer multiaddrs
func normalizeBootstrapPeers(peersStr string) ([]string, error) {
if peersStr == "" {
@ -65,6 +96,8 @@ func HandleProdCommand(args []string) {
handleProdInstall(subargs)
case "upgrade":
handleProdUpgrade(subargs)
case "migrate":
handleProdMigrate(subargs)
case "status":
handleProdStatus()
case "start":
@ -88,24 +121,27 @@ func HandleProdCommand(args []string) {
func showProdHelp() {
fmt.Printf("Production Environment Commands\n\n")
fmt.Printf("Usage: dbn prod <subcommand> [options]\n\n")
fmt.Printf("Usage: orama <subcommand> [options]\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" install - Full production bootstrap (requires root/sudo)\n")
fmt.Printf(" install - Install production node (requires root/sudo)\n")
fmt.Printf(" Options:\n")
fmt.Printf(" --interactive - Launch interactive TUI wizard\n")
fmt.Printf(" --force - Reconfigure all settings\n")
fmt.Printf(" --bootstrap - Install as bootstrap node\n")
fmt.Printf(" --vps-ip IP - VPS public IP address (required for non-bootstrap)\n")
fmt.Printf(" --peers ADDRS - Comma-separated bootstrap peer multiaddrs (required for non-bootstrap)\n")
fmt.Printf(" --cluster-secret HEX - 64-hex cluster secret (required for non-bootstrap)\n")
fmt.Printf(" --bootstrap-join ADDR - Bootstrap raft join address (for secondary bootstrap)\n")
fmt.Printf(" --domain DOMAIN - Domain for HTTPS (optional)\n")
fmt.Printf(" --vps-ip IP - VPS public IP address (required)\n")
fmt.Printf(" --domain DOMAIN - Domain for this node (e.g., node-1.orama.network)\n")
fmt.Printf(" --peers ADDRS - Comma-separated peer multiaddrs (for joining cluster)\n")
fmt.Printf(" --join ADDR - RQLite join address IP:port (for joining cluster)\n")
fmt.Printf(" --cluster-secret HEX - 64-hex cluster secret (required when joining)\n")
fmt.Printf(" --branch BRANCH - Git branch to use (main or nightly, default: main)\n")
fmt.Printf(" --ignore-resource-checks - Skip disk/RAM/CPU prerequisite validation\n")
fmt.Printf(" upgrade - Upgrade existing installation (requires root/sudo)\n")
fmt.Printf(" Options:\n")
fmt.Printf(" --restart - Automatically restart services after upgrade\n")
fmt.Printf(" --branch BRANCH - Git branch to use (main or nightly, uses saved preference if not specified)\n")
fmt.Printf(" --no-pull - Skip git clone/pull, use existing /home/debros/src\n")
fmt.Printf(" --branch BRANCH - Git branch to use (main or nightly)\n")
fmt.Printf(" --no-pull - Skip git clone/pull, use existing source\n")
fmt.Printf(" migrate - Migrate from old bootstrap/node setup (requires root/sudo)\n")
fmt.Printf(" Options:\n")
fmt.Printf(" --dry-run - Show what would be migrated without making changes\n")
fmt.Printf(" status - Show status of production services\n")
fmt.Printf(" start - Start all production services (requires root/sudo)\n")
fmt.Printf(" stop - Stop all production services (requires root/sudo)\n")
@ -116,27 +152,20 @@ func showProdHelp() {
fmt.Printf(" --follow - Follow logs in real-time\n")
fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" # Bootstrap node (main branch)\n")
fmt.Printf(" sudo dbn prod install --bootstrap\n\n")
fmt.Printf(" # Bootstrap node (nightly branch)\n")
fmt.Printf(" sudo dbn prod install --bootstrap --branch nightly\n\n")
fmt.Printf(" # First node (creates new cluster)\n")
fmt.Printf(" sudo orama install --vps-ip 203.0.113.1 --domain node-1.orama.network\n\n")
fmt.Printf(" # Join existing cluster\n")
fmt.Printf(" sudo dbn prod install --vps-ip 10.0.0.2 --peers /ip4/10.0.0.1/tcp/4001/p2p/Qm...\n\n")
fmt.Printf(" # Secondary bootstrap joining existing cluster\n")
fmt.Printf(" sudo dbn prod install --bootstrap --vps-ip 10.0.0.2 --bootstrap-join 10.0.0.1:7001 --peers /ip4/10.0.0.1/tcp/4001/p2p/Qm...\n\n")
fmt.Printf(" # Upgrade using saved branch preference\n")
fmt.Printf(" sudo dbn prod upgrade --restart\n\n")
fmt.Printf(" # Upgrade and switch to nightly branch\n")
fmt.Printf(" sudo dbn prod upgrade --restart --branch nightly\n\n")
fmt.Printf(" # Upgrade without pulling latest code (use existing /home/debros/src)\n")
fmt.Printf(" sudo dbn prod upgrade --restart --no-pull\n\n")
fmt.Printf(" sudo orama install --vps-ip 203.0.113.2 --domain node-2.orama.network \\\n")
fmt.Printf(" --peers /ip4/203.0.113.1/tcp/4001/p2p/12D3KooW... \\\n")
fmt.Printf(" --cluster-secret <64-hex-secret>\n\n")
fmt.Printf(" # Upgrade\n")
fmt.Printf(" sudo orama upgrade --restart\n\n")
fmt.Printf(" # Service management\n")
fmt.Printf(" sudo dbn prod start\n")
fmt.Printf(" sudo dbn prod stop\n")
fmt.Printf(" sudo dbn prod restart\n\n")
fmt.Printf(" dbn prod status\n")
fmt.Printf(" dbn prod logs node --follow\n")
fmt.Printf(" dbn prod logs gateway --follow\n")
fmt.Printf(" sudo orama start\n")
fmt.Printf(" sudo orama stop\n")
fmt.Printf(" sudo orama restart\n\n")
fmt.Printf(" orama status\n")
fmt.Printf(" orama logs node --follow\n")
}
func handleProdInstall(args []string) {
@ -145,14 +174,14 @@ func handleProdInstall(args []string) {
fs.SetOutput(os.Stderr)
force := fs.Bool("force", false, "Reconfigure all settings")
isBootstrap := fs.Bool("bootstrap", false, "Install as bootstrap node")
skipResourceChecks := fs.Bool("ignore-resource-checks", false, "Skip disk/RAM/CPU prerequisite validation")
vpsIP := fs.String("vps-ip", "", "VPS public IP address (required for non-bootstrap)")
domain := fs.String("domain", "", "Domain for HTTPS (optional)")
peersStr := fs.String("peers", "", "Comma-separated bootstrap peer multiaddrs (required for non-bootstrap)")
bootstrapJoin := fs.String("bootstrap-join", "", "Bootstrap raft join address (for secondary bootstrap)")
vpsIP := fs.String("vps-ip", "", "VPS public IP address")
domain := fs.String("domain", "", "Domain for this node (e.g., node-123.orama.network)")
peersStr := fs.String("peers", "", "Comma-separated peer multiaddrs to connect to")
joinAddress := fs.String("join", "", "RQLite join address (IP:port) to join existing cluster")
branch := fs.String("branch", "main", "Git branch to use (main or nightly)")
clusterSecret := fs.String("cluster-secret", "", "Hex-encoded 32-byte cluster secret (required for non-bootstrap nodes)")
clusterSecret := fs.String("cluster-secret", "", "Hex-encoded 32-byte cluster secret (for joining existing cluster)")
interactive := fs.Bool("interactive", false, "Run interactive TUI installer")
if err := fs.Parse(args); err != nil {
if err == flag.ErrHelp {
@ -162,16 +191,22 @@ func handleProdInstall(args []string) {
os.Exit(1)
}
// Launch TUI installer if --interactive flag or no required args provided
if *interactive || (*vpsIP == "" && len(args) == 0) {
runInteractiveInstaller()
return
}
// Validate branch
if *branch != "main" && *branch != "nightly" {
fmt.Fprintf(os.Stderr, "❌ Invalid branch: %s (must be 'main' or 'nightly')\n", *branch)
os.Exit(1)
}
// Normalize and validate bootstrap peers
bootstrapPeers, err := normalizeBootstrapPeers(*peersStr)
// Normalize and validate peers
peers, err := normalizeBootstrapPeers(*peersStr)
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Invalid bootstrap peers: %v\n", err)
fmt.Fprintf(os.Stderr, "❌ Invalid peers: %v\n", err)
fmt.Fprintf(os.Stderr, " Example: --peers /ip4/10.0.0.1/tcp/4001/p2p/Qm...,/ip4/10.0.0.2/tcp/4001/p2p/Qm...\n")
os.Exit(1)
}
@ -182,62 +217,60 @@ func handleProdInstall(args []string) {
os.Exit(1)
}
// Validate bootstrap node requirements
if *isBootstrap {
if *vpsIP == "" {
fmt.Fprintf(os.Stderr, "❌ --vps-ip is required for bootstrap nodes\n")
fmt.Fprintf(os.Stderr, " Bootstrap nodes must advertise a public IP address for other nodes to connect\n")
fmt.Fprintf(os.Stderr, " Usage: sudo dbn prod install --bootstrap --vps-ip <public_ip>\n")
fmt.Fprintf(os.Stderr, " Example: sudo dbn prod install --bootstrap --vps-ip 203.0.113.1\n")
os.Exit(1)
}
// Validate secondary bootstrap requirements
if *bootstrapJoin == "" {
fmt.Fprintf(os.Stderr, "⚠️ Warning: Primary bootstrap node detected (--bootstrap without --bootstrap-join)\n")
fmt.Fprintf(os.Stderr, " This node will form a new cluster. To join existing cluster as secondary bootstrap:\n")
fmt.Fprintf(os.Stderr, " sudo dbn prod install --bootstrap --vps-ip %s --bootstrap-join <bootstrap_ip>:7001 --peers <multiaddr>\n", *vpsIP)
}
// Validate VPS IP is provided
if *vpsIP == "" {
fmt.Fprintf(os.Stderr, "❌ --vps-ip is required\n")
fmt.Fprintf(os.Stderr, " Usage: sudo orama install --vps-ip <public_ip>\n")
fmt.Fprintf(os.Stderr, " Or run: sudo orama install --interactive\n")
os.Exit(1)
}
// Validate non-bootstrap node requirements
if !*isBootstrap {
if *vpsIP == "" {
fmt.Fprintf(os.Stderr, "❌ --vps-ip is required for non-bootstrap nodes\n")
fmt.Fprintf(os.Stderr, " Usage: sudo dbn prod install --vps-ip <public_ip> --peers <multiaddr>\n")
os.Exit(1)
}
if len(bootstrapPeers) == 0 {
fmt.Fprintf(os.Stderr, "❌ --peers is required for non-bootstrap nodes\n")
fmt.Fprintf(os.Stderr, " Usage: sudo dbn prod install --vps-ip <public_ip> --peers <multiaddr>\n")
fmt.Fprintf(os.Stderr, " Example: --peers /ip4/10.0.0.1/tcp/4001/p2p/Qm...\n")
os.Exit(1)
}
// Determine if this is the first node (creates new cluster) or joining existing cluster
isFirstNode := len(peers) == 0 && *joinAddress == ""
if isFirstNode {
fmt.Printf(" First node detected - will create new cluster\n")
} else {
fmt.Printf(" Joining existing cluster\n")
// Cluster secret is required when joining
if *clusterSecret == "" {
fmt.Fprintf(os.Stderr, "❌ --cluster-secret is required for non-bootstrap nodes\n")
fmt.Fprintf(os.Stderr, " Provide the 64-hex secret from the bootstrap node (cat ~/.debros/secrets/cluster-secret)\n")
fmt.Fprintf(os.Stderr, "❌ --cluster-secret is required when joining an existing cluster\n")
fmt.Fprintf(os.Stderr, " Provide the 64-hex secret from an existing node (cat ~/.orama/secrets/cluster-secret)\n")
os.Exit(1)
}
}
if *clusterSecret != "" {
if err := production.ValidateClusterSecret(*clusterSecret); err != nil {
fmt.Fprintf(os.Stderr, "❌ Invalid --cluster-secret: %v\n", err)
os.Exit(1)
}
}
debrosHome := "/home/debros"
debrosDir := debrosHome + "/.debros"
setup := production.NewProductionSetup(debrosHome, os.Stdout, *force, *branch, false, *skipResourceChecks, *clusterSecret)
oramaHome := "/home/debros"
oramaDir := oramaHome + "/.orama"
// If cluster secret was provided, save it to secrets directory before setup
if *clusterSecret != "" {
secretsDir := filepath.Join(oramaDir, "secrets")
if err := os.MkdirAll(secretsDir, 0755); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to create secrets directory: %v\n", err)
os.Exit(1)
}
secretPath := filepath.Join(secretsDir, "cluster-secret")
if err := os.WriteFile(secretPath, []byte(*clusterSecret), 0600); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to save cluster secret: %v\n", err)
os.Exit(1)
}
fmt.Printf(" ✓ Cluster secret saved\n")
}
setup := production.NewProductionSetup(oramaHome, os.Stdout, *force, *branch, false, *skipResourceChecks)
// Check port availability before proceeding
if err := ensurePortsAvailable("prod install", defaultPorts()); err != nil {
if err := ensurePortsAvailable("install", defaultPorts()); err != nil {
fmt.Fprintf(os.Stderr, "❌ %v\n", err)
os.Exit(1)
}
// Save branch preference for future upgrades
if err := production.SaveBranchPreference(debrosDir, *branch); err != nil {
if err := production.SaveBranchPreference(oramaDir, *branch); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Warning: Failed to save branch preference: %v\n", err)
}
@ -262,23 +295,17 @@ func handleProdInstall(args []string) {
os.Exit(1)
}
// Determine node type early
nodeType := "node"
if *isBootstrap {
nodeType = "bootstrap"
}
// Phase 3: Generate secrets FIRST (before service initialization)
// This ensures cluster secret and swarm key exist before repos are seeded
fmt.Printf("\n🔐 Phase 3: Generating secrets...\n")
if err := setup.Phase3GenerateSecrets(*isBootstrap); err != nil {
if err := setup.Phase3GenerateSecrets(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Secret generation failed: %v\n", err)
os.Exit(1)
}
// Phase 2c: Initialize services (after secrets are in place)
fmt.Printf("\nPhase 2c: Initializing services...\n")
if err := setup.Phase2cInitializeServices(nodeType, bootstrapPeers, *vpsIP); err != nil {
if err := setup.Phase2cInitializeServices(peers, *vpsIP); err != nil {
fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err)
os.Exit(1)
}
@ -286,14 +313,14 @@ func handleProdInstall(args []string) {
// Phase 4: Generate configs
fmt.Printf("\n⚙ Phase 4: Generating configurations...\n")
enableHTTPS := *domain != ""
if err := setup.Phase4GenerateConfigs(*isBootstrap, bootstrapPeers, *vpsIP, enableHTTPS, *domain, *bootstrapJoin); err != nil {
if err := setup.Phase4GenerateConfigs(peers, *vpsIP, enableHTTPS, *domain, *joinAddress); err != nil {
fmt.Fprintf(os.Stderr, "❌ Configuration generation failed: %v\n", err)
os.Exit(1)
}
// Phase 5: Create systemd services
fmt.Printf("\n🔧 Phase 5: Creating systemd services...\n")
if err := setup.Phase5CreateSystemdServices(nodeType, *vpsIP); err != nil {
if err := setup.Phase5CreateSystemdServices(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Service creation failed: %v\n", err)
os.Exit(1)
}
@ -357,30 +384,30 @@ func handleProdUpgrade(args []string) {
os.Exit(1)
}
debrosHome := "/home/debros"
debrosDir := debrosHome + "/.debros"
oramaHome := "/home/debros"
oramaDir := oramaHome + "/.orama"
fmt.Printf("🔄 Upgrading production installation...\n")
fmt.Printf(" This will preserve existing configurations and data\n")
fmt.Printf(" Configurations will be updated to latest format\n\n")
setup := production.NewProductionSetup(debrosHome, os.Stdout, *force, *branch, *noPull, false, "")
setup := production.NewProductionSetup(oramaHome, os.Stdout, *force, *branch, *noPull, false)
// Log if --no-pull is enabled
if *noPull {
fmt.Printf(" ⚠️ --no-pull flag enabled: Skipping git clone/pull\n")
fmt.Printf(" Using existing repository at %s/src\n", debrosHome)
fmt.Printf(" Using existing repository at %s/src\n", oramaHome)
}
// If branch was explicitly provided, save it for future upgrades
if *branch != "" {
if err := production.SaveBranchPreference(debrosDir, *branch); err != nil {
if err := production.SaveBranchPreference(oramaDir, *branch); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Warning: Failed to save branch preference: %v\n", err)
} else {
fmt.Printf(" Using branch: %s (saved for future upgrades)\n", *branch)
}
} else {
// Show which branch is being used (read from saved preference)
currentBranch := production.ReadBranchPreference(debrosDir)
currentBranch := production.ReadBranchPreference(oramaDir)
fmt.Printf(" Using branch: %s (from saved preference)\n", currentBranch)
}
@ -404,12 +431,9 @@ func handleProdUpgrade(args []string) {
serviceController := production.NewSystemdController()
services := []string{
"debros-gateway.service",
"debros-node-bootstrap.service",
"debros-node-node.service",
"debros-ipfs-cluster-bootstrap.service",
"debros-ipfs-cluster-node.service",
"debros-ipfs-bootstrap.service",
"debros-ipfs-node.service",
"debros-node.service",
"debros-ipfs-cluster.service",
"debros-ipfs.service",
// Note: RQLite is managed by node process, not as separate service
"debros-olric.service",
}
@ -440,30 +464,17 @@ func handleProdUpgrade(args []string) {
os.Exit(1)
}
// Detect node type from existing installation
nodeType := "node"
// Detect existing installation
if setup.IsUpdate() {
// Check if bootstrap config exists
bootstrapConfig := filepath.Join("/home/debros/.debros", "configs", "bootstrap.yaml")
if _, err := os.Stat(bootstrapConfig); err == nil {
nodeType = "bootstrap"
} else {
// Check data directory structure
bootstrapDataPath := filepath.Join("/home/debros/.debros", "data", "bootstrap")
if _, err := os.Stat(bootstrapDataPath); err == nil {
nodeType = "bootstrap"
}
}
fmt.Printf(" Detected node type: %s\n", nodeType)
fmt.Printf(" Detected existing installation\n")
} else {
fmt.Printf(" ⚠️ No existing installation detected, treating as fresh install\n")
fmt.Printf(" Use 'dbn prod install --bootstrap' for fresh bootstrap installation\n")
nodeType = "bootstrap" // Default for upgrade if nothing exists
fmt.Printf(" Use 'orama install' for fresh installation\n")
}
// Phase 3: Ensure secrets exist (preserves existing secrets)
fmt.Printf("\n🔐 Phase 3: Ensuring secrets...\n")
if err := setup.Phase3GenerateSecrets(nodeType == "bootstrap"); err != nil {
if err := setup.Phase3GenerateSecrets(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Secret generation failed: %v\n", err)
os.Exit(1)
}
@ -472,7 +483,6 @@ func handleProdUpgrade(args []string) {
// Preserve existing config settings (bootstrap_peers, domain, join_address, etc.)
enableHTTPS := false
domain := ""
bootstrapJoin := ""
// Helper function to extract multiaddr list from config
extractBootstrapPeers := func(configPath string) []string {
@ -507,18 +517,16 @@ func handleProdUpgrade(args []string) {
return peers
}
// Read existing node config to preserve bootstrap_peers and join_address
nodeConfigFile := "bootstrap.yaml"
if nodeType == "node" {
nodeConfigFile = "node.yaml"
}
nodeConfigPath := filepath.Join(debrosDir, "configs", nodeConfigFile)
// Read existing node config to preserve settings
// Unified config file name (no bootstrap/node distinction)
nodeConfigPath := filepath.Join(oramaDir, "configs", "node.yaml")
// Extract bootstrap peers from existing node config
bootstrapPeers := extractBootstrapPeers(nodeConfigPath)
// Extract peers from existing node config
peers := extractBootstrapPeers(nodeConfigPath)
// Extract VPS IP from advertise addresses and bootstrap join address
// Extract VPS IP and join address from advertise addresses
vpsIP := ""
joinAddress := ""
if data, err := os.ReadFile(nodeConfigPath); err == nil {
configStr := string(data)
for _, line := range strings.Split(configStr, "\n") {
@ -534,19 +542,19 @@ func handleProdUpgrade(args []string) {
// Extract IP from address (format: "IP:PORT" or "[IPv6]:PORT")
if host, _, err := net.SplitHostPort(addr); err == nil && host != "" && host != "localhost" {
vpsIP = host
// Continue loop to also check for bootstrap join address
// Continue loop to also check for join address
}
}
}
}
// Extract bootstrap join address if it's a bootstrap node
if nodeType == "bootstrap" && strings.HasPrefix(trimmed, "rqlite_join_address:") {
// Extract join address
if strings.HasPrefix(trimmed, "rqlite_join_address:") {
parts := strings.SplitN(trimmed, ":", 2)
if len(parts) > 1 {
bootstrapJoin = strings.TrimSpace(parts[1])
bootstrapJoin = strings.Trim(bootstrapJoin, "\"'")
if bootstrapJoin == "null" || bootstrapJoin == "" {
bootstrapJoin = ""
joinAddress = strings.TrimSpace(parts[1])
joinAddress = strings.Trim(joinAddress, "\"'")
if joinAddress == "null" || joinAddress == "" {
joinAddress = ""
}
}
}
@ -554,7 +562,7 @@ func handleProdUpgrade(args []string) {
}
// Read existing gateway config to preserve domain and HTTPS settings
gatewayConfigPath := filepath.Join(debrosDir, "configs", "gateway.yaml")
gatewayConfigPath := filepath.Join(oramaDir, "configs", "gateway.yaml")
if data, err := os.ReadFile(gatewayConfigPath); err == nil {
configStr := string(data)
if strings.Contains(configStr, "domain:") {
@ -578,8 +586,8 @@ func handleProdUpgrade(args []string) {
}
fmt.Printf(" Preserving existing configuration:\n")
if len(bootstrapPeers) > 0 {
fmt.Printf(" - Bootstrap peers: %d peer(s) preserved\n", len(bootstrapPeers))
if len(peers) > 0 {
fmt.Printf(" - Peers: %d peer(s) preserved\n", len(peers))
}
if vpsIP != "" {
fmt.Printf(" - VPS IP: %s\n", vpsIP)
@ -587,26 +595,26 @@ func handleProdUpgrade(args []string) {
if domain != "" {
fmt.Printf(" - Domain: %s\n", domain)
}
if bootstrapJoin != "" {
fmt.Printf(" - Bootstrap join address: %s\n", bootstrapJoin)
if joinAddress != "" {
fmt.Printf(" - Join address: %s\n", joinAddress)
}
// Phase 2c: Ensure services are properly initialized (fixes existing repos)
// Now that we have bootstrap peers and VPS IP, we can properly configure IPFS Cluster
// Now that we have peers and VPS IP, we can properly configure IPFS Cluster
fmt.Printf("\nPhase 2c: Ensuring services are properly initialized...\n")
if err := setup.Phase2cInitializeServices(nodeType, bootstrapPeers, vpsIP); err != nil {
if err := setup.Phase2cInitializeServices(peers, vpsIP); err != nil {
fmt.Fprintf(os.Stderr, "❌ Service initialization failed: %v\n", err)
os.Exit(1)
}
if err := setup.Phase4GenerateConfigs(nodeType == "bootstrap", bootstrapPeers, vpsIP, enableHTTPS, domain, bootstrapJoin); err != nil {
if err := setup.Phase4GenerateConfigs(peers, vpsIP, enableHTTPS, domain, joinAddress); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Config generation warning: %v\n", err)
fmt.Fprintf(os.Stderr, " Existing configs preserved\n")
}
// Phase 5: Update systemd services
fmt.Printf("\n🔧 Phase 5: Updating systemd services...\n")
if err := setup.Phase5CreateSystemdServices(nodeType, ""); err != nil {
if err := setup.Phase5CreateSystemdServices(); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Service update warning: %v\n", err)
}
@ -652,29 +660,23 @@ func handleProdUpgrade(args []string) {
func handleProdStatus() {
fmt.Printf("Production Environment Status\n\n")
// Check for all possible service names (bootstrap and node variants)
// Unified service names (no bootstrap/node distinction)
serviceNames := []string{
"debros-ipfs-bootstrap",
"debros-ipfs-node",
"debros-ipfs-cluster-bootstrap",
"debros-ipfs-cluster-node",
"debros-ipfs",
"debros-ipfs-cluster",
// Note: RQLite is managed by node process, not as separate service
"debros-olric",
"debros-node-bootstrap",
"debros-node-node",
"debros-node",
"debros-gateway",
}
// Friendly descriptions
descriptions := map[string]string{
"debros-ipfs-bootstrap": "IPFS Daemon (Bootstrap)",
"debros-ipfs-node": "IPFS Daemon (Node)",
"debros-ipfs-cluster-bootstrap": "IPFS Cluster (Bootstrap)",
"debros-ipfs-cluster-node": "IPFS Cluster (Node)",
"debros-olric": "Olric Cache Server",
"debros-node-bootstrap": "DeBros Node (Bootstrap) - includes RQLite",
"debros-node-node": "DeBros Node (Node) - includes RQLite",
"debros-gateway": "DeBros Gateway",
"debros-ipfs": "IPFS Daemon",
"debros-ipfs-cluster": "IPFS Cluster",
"debros-olric": "Olric Cache Server",
"debros-node": "DeBros Node (includes RQLite)",
"debros-gateway": "DeBros Gateway",
}
fmt.Printf("Services:\n")
@ -695,11 +697,11 @@ func handleProdStatus() {
}
fmt.Printf("\nDirectories:\n")
debrosDir := "/home/debros/.debros"
if _, err := os.Stat(debrosDir); err == nil {
fmt.Printf(" ✅ %s exists\n", debrosDir)
oramaDir := "/home/debros/.orama"
if _, err := os.Stat(oramaDir); err == nil {
fmt.Printf(" ✅ %s exists\n", oramaDir)
} else {
fmt.Printf(" ❌ %s not found\n", debrosDir)
fmt.Printf(" ❌ %s not found\n", oramaDir)
}
fmt.Printf("\nView logs with: dbn prod logs <service>\n")
@ -707,15 +709,15 @@ func handleProdStatus() {
// resolveServiceName resolves service aliases to actual systemd service names
func resolveServiceName(alias string) ([]string, error) {
// Service alias mapping
// Service alias mapping (unified - no bootstrap/node distinction)
aliases := map[string][]string{
"node": {"debros-node-bootstrap", "debros-node-node"},
"ipfs": {"debros-ipfs-bootstrap", "debros-ipfs-node"},
"cluster": {"debros-ipfs-cluster-bootstrap", "debros-ipfs-cluster-node"},
"ipfs-cluster": {"debros-ipfs-cluster-bootstrap", "debros-ipfs-cluster-node"},
"node": {"debros-node"},
"ipfs": {"debros-ipfs"},
"cluster": {"debros-ipfs-cluster"},
"ipfs-cluster": {"debros-ipfs-cluster"},
"gateway": {"debros-gateway"},
"olric": {"debros-olric"},
"rqlite": {"debros-node-bootstrap", "debros-node-node"}, // RQLite logs are in node logs
"rqlite": {"debros-node"}, // RQLite logs are in node logs
}
// Check if it's an alias
@ -757,7 +759,7 @@ func handleProdLogs(args []string) {
fmt.Fprintf(os.Stderr, "\nService aliases:\n")
fmt.Fprintf(os.Stderr, " node, ipfs, cluster, gateway, olric\n")
fmt.Fprintf(os.Stderr, "\nOr use full service name:\n")
fmt.Fprintf(os.Stderr, " debros-node-bootstrap, debros-gateway, etc.\n")
fmt.Fprintf(os.Stderr, " debros-node, debros-gateway, etc.\n")
os.Exit(1)
}
@ -772,7 +774,7 @@ func handleProdLogs(args []string) {
if err != nil {
fmt.Fprintf(os.Stderr, "❌ %v\n", err)
fmt.Fprintf(os.Stderr, "\nAvailable service aliases: node, ipfs, cluster, gateway, olric\n")
fmt.Fprintf(os.Stderr, "Or use full service name like: debros-node-bootstrap\n")
fmt.Fprintf(os.Stderr, "Or use full service name like: debros-node\n")
os.Exit(1)
}
@ -836,14 +838,11 @@ type portSpec struct {
}
var servicePorts = map[string][]portSpec{
"debros-gateway": {{"Gateway API", 6001}},
"debros-olric": {{"Olric HTTP", 3320}, {"Olric Memberlist", 3322}},
"debros-node-bootstrap": {{"RQLite HTTP", 5001}, {"RQLite Raft", 7001}, {"IPFS Cluster API", 9094}},
"debros-node-node": {{"RQLite HTTP", 5001}, {"RQLite Raft", 7001}, {"IPFS Cluster API", 9094}},
"debros-ipfs-bootstrap": {{"IPFS API", 4501}, {"IPFS Gateway", 8080}, {"IPFS Swarm", 4001}},
"debros-ipfs-node": {{"IPFS API", 4501}, {"IPFS Gateway", 8080}, {"IPFS Swarm", 4001}},
"debros-ipfs-cluster-bootstrap": {{"IPFS Cluster API", 9094}},
"debros-ipfs-cluster-node": {{"IPFS Cluster API", 9094}},
"debros-gateway": {{"Gateway API", 6001}},
"debros-olric": {{"Olric HTTP", 3320}, {"Olric Memberlist", 3322}},
"debros-node": {{"RQLite HTTP", 5001}, {"RQLite Raft", 7001}},
"debros-ipfs": {{"IPFS API", 4501}, {"IPFS Gateway", 8080}, {"IPFS Swarm", 4101}},
"debros-ipfs-cluster": {{"IPFS Cluster API", 9094}},
}
// defaultPorts is used for fresh installs/upgrades before unit files exist.
@ -972,17 +971,17 @@ func verifyProductionRuntime(action string) error {
client := &http.Client{Timeout: 3 * time.Second}
if err := checkHTTP(client, "GET", "http://127.0.0.1:5001/status", "RQLite status"); err == nil {
} else if serviceExists("debros-node-bootstrap") || serviceExists("debros-node-node") {
} else if serviceExists("debros-node") {
issues = append(issues, err.Error())
}
if err := checkHTTP(client, "POST", "http://127.0.0.1:4501/api/v0/version", "IPFS API"); err == nil {
} else if serviceExists("debros-ipfs-bootstrap") || serviceExists("debros-ipfs-node") {
} else if serviceExists("debros-ipfs") {
issues = append(issues, err.Error())
}
if err := checkHTTP(client, "GET", "http://127.0.0.1:9094/health", "IPFS Cluster"); err == nil {
} else if serviceExists("debros-ipfs-cluster-bootstrap") || serviceExists("debros-ipfs-cluster-node") {
} else if serviceExists("debros-ipfs-cluster") {
issues = append(issues, err.Error())
}
@ -1004,17 +1003,14 @@ func verifyProductionRuntime(action string) error {
// getProductionServices returns a list of all DeBros production service names that exist
func getProductionServices() []string {
// All possible service names (both bootstrap and node variants)
// Unified service names (no bootstrap/node distinction)
allServices := []string{
"debros-gateway",
"debros-node-node",
"debros-node-bootstrap",
"debros-node",
"debros-olric",
// Note: RQLite is managed by node process, not as separate service
"debros-ipfs-cluster-bootstrap",
"debros-ipfs-cluster-node",
"debros-ipfs-bootstrap",
"debros-ipfs-node",
"debros-ipfs-cluster",
"debros-ipfs",
}
// Filter to only existing services by checking if unit file exists
@ -1339,7 +1335,7 @@ func handleProdUninstall() {
}
fmt.Printf("⚠️ This will stop and remove all DeBros production services\n")
fmt.Printf("⚠️ Configuration and data will be preserved in /home/debros/.debros\n\n")
fmt.Printf("⚠️ Configuration and data will be preserved in /home/debros/.orama\n\n")
fmt.Printf("Continue? (yes/no): ")
reader := bufio.NewReader(os.Stdin)
@ -1353,14 +1349,11 @@ func handleProdUninstall() {
services := []string{
"debros-gateway",
"debros-node-node",
"debros-node-bootstrap",
"debros-node",
"debros-olric",
// Note: RQLite is managed by node process, not as separate service
"debros-ipfs-cluster-bootstrap",
"debros-ipfs-cluster-node",
"debros-ipfs-bootstrap",
"debros-ipfs-node",
"debros-ipfs-cluster",
"debros-ipfs",
}
fmt.Printf("Stopping services...\n")
@ -1373,6 +1366,162 @@ func handleProdUninstall() {
exec.Command("systemctl", "daemon-reload").Run()
fmt.Printf("✅ Services uninstalled\n")
fmt.Printf(" Configuration and data preserved in /home/debros/.debros\n")
fmt.Printf(" To remove all data: rm -rf /home/debros/.debros\n\n")
fmt.Printf(" Configuration and data preserved in /home/debros/.orama\n")
fmt.Printf(" To remove all data: rm -rf /home/debros/.orama\n\n")
}
// handleProdMigrate migrates from old bootstrap/node setup to unified node setup
func handleProdMigrate(args []string) {
// Parse flags
fs := flag.NewFlagSet("migrate", flag.ContinueOnError)
fs.SetOutput(os.Stderr)
dryRun := fs.Bool("dry-run", false, "Show what would be migrated without making changes")
if err := fs.Parse(args); err != nil {
if err == flag.ErrHelp {
return
}
fmt.Fprintf(os.Stderr, "❌ Failed to parse flags: %v\n", err)
os.Exit(1)
}
if os.Geteuid() != 0 && !*dryRun {
fmt.Fprintf(os.Stderr, "❌ Migration must be run as root (use sudo)\n")
os.Exit(1)
}
oramaDir := "/home/debros/.orama"
fmt.Printf("🔄 Checking for installations to migrate...\n\n")
// Check for old-style installations
oldDataDirs := []string{
filepath.Join(oramaDir, "data", "bootstrap"),
filepath.Join(oramaDir, "data", "node"),
}
oldServices := []string{
"debros-ipfs-bootstrap",
"debros-ipfs-node",
"debros-ipfs-cluster-bootstrap",
"debros-ipfs-cluster-node",
"debros-node-bootstrap",
"debros-node-node",
}
oldConfigs := []string{
filepath.Join(oramaDir, "configs", "bootstrap.yaml"),
}
// Check what needs to be migrated
var needsMigration bool
fmt.Printf("Checking data directories:\n")
for _, dir := range oldDataDirs {
if _, err := os.Stat(dir); err == nil {
fmt.Printf(" ⚠️ Found old directory: %s\n", dir)
needsMigration = true
}
}
fmt.Printf("\nChecking services:\n")
for _, svc := range oldServices {
unitPath := filepath.Join("/etc/systemd/system", svc+".service")
if _, err := os.Stat(unitPath); err == nil {
fmt.Printf(" ⚠️ Found old service: %s\n", svc)
needsMigration = true
}
}
fmt.Printf("\nChecking configs:\n")
for _, cfg := range oldConfigs {
if _, err := os.Stat(cfg); err == nil {
fmt.Printf(" ⚠️ Found old config: %s\n", cfg)
needsMigration = true
}
}
if !needsMigration {
fmt.Printf("\n✅ No migration needed - installation already uses unified structure\n")
return
}
if *dryRun {
fmt.Printf("\n📋 Dry run - no changes made\n")
fmt.Printf(" Run without --dry-run to perform migration\n")
return
}
fmt.Printf("\n🔄 Starting migration...\n")
// Stop old services first
fmt.Printf("\n Stopping old services...\n")
for _, svc := range oldServices {
if err := exec.Command("systemctl", "stop", svc).Run(); err == nil {
fmt.Printf(" ✓ Stopped %s\n", svc)
}
}
// Migrate data directories
newDataDir := filepath.Join(oramaDir, "data")
fmt.Printf("\n Migrating data directories...\n")
// Prefer bootstrap data if it exists, otherwise use node data
sourceDir := ""
if _, err := os.Stat(filepath.Join(oramaDir, "data", "bootstrap")); err == nil {
sourceDir = filepath.Join(oramaDir, "data", "bootstrap")
} else if _, err := os.Stat(filepath.Join(oramaDir, "data", "node")); err == nil {
sourceDir = filepath.Join(oramaDir, "data", "node")
}
if sourceDir != "" {
// Move contents to unified data directory
entries, _ := os.ReadDir(sourceDir)
for _, entry := range entries {
src := filepath.Join(sourceDir, entry.Name())
dst := filepath.Join(newDataDir, entry.Name())
if _, err := os.Stat(dst); os.IsNotExist(err) {
if err := os.Rename(src, dst); err == nil {
fmt.Printf(" ✓ Moved %s → %s\n", src, dst)
}
}
}
}
// Remove old data directories
for _, dir := range oldDataDirs {
if err := os.RemoveAll(dir); err == nil {
fmt.Printf(" ✓ Removed %s\n", dir)
}
}
// Migrate config files
fmt.Printf("\n Migrating config files...\n")
oldBootstrapConfig := filepath.Join(oramaDir, "configs", "bootstrap.yaml")
newNodeConfig := filepath.Join(oramaDir, "configs", "node.yaml")
if _, err := os.Stat(oldBootstrapConfig); err == nil {
if _, err := os.Stat(newNodeConfig); os.IsNotExist(err) {
if err := os.Rename(oldBootstrapConfig, newNodeConfig); err == nil {
fmt.Printf(" ✓ Renamed bootstrap.yaml → node.yaml\n")
}
} else {
os.Remove(oldBootstrapConfig)
fmt.Printf(" ✓ Removed old bootstrap.yaml (node.yaml already exists)\n")
}
}
// Remove old services
fmt.Printf("\n Removing old service files...\n")
for _, svc := range oldServices {
unitPath := filepath.Join("/etc/systemd/system", svc+".service")
if err := os.Remove(unitPath); err == nil {
fmt.Printf(" ✓ Removed %s\n", unitPath)
}
}
// Reload systemd
exec.Command("systemctl", "daemon-reload").Run()
fmt.Printf("\n✅ Migration complete!\n")
fmt.Printf(" Run 'sudo orama upgrade --restart' to regenerate services with new names\n\n")
}

View File

@ -8,11 +8,12 @@ import (
// Config represents the main configuration for a network node
type Config struct {
Node NodeConfig `yaml:"node"`
Database DatabaseConfig `yaml:"database"`
Discovery DiscoveryConfig `yaml:"discovery"`
Security SecurityConfig `yaml:"security"`
Logging LoggingConfig `yaml:"logging"`
Node NodeConfig `yaml:"node"`
Database DatabaseConfig `yaml:"database"`
Discovery DiscoveryConfig `yaml:"discovery"`
Security SecurityConfig `yaml:"security"`
Logging LoggingConfig `yaml:"logging"`
HTTPGateway HTTPGatewayConfig `yaml:"http_gateway"`
}
// NodeConfig contains node-specific configuration
@ -97,6 +98,46 @@ type LoggingConfig struct {
OutputFile string `yaml:"output_file"` // Empty for stdout
}
// HTTPGatewayConfig contains HTTP reverse proxy gateway configuration
type HTTPGatewayConfig struct {
Enabled bool `yaml:"enabled"` // Enable HTTP gateway
ListenAddr string `yaml:"listen_addr"` // Address to listen on (e.g., ":8080")
NodeName string `yaml:"node_name"` // Node name for routing
Routes map[string]RouteConfig `yaml:"routes"` // Service routes
HTTPS HTTPSConfig `yaml:"https"` // HTTPS/TLS configuration
SNI SNIConfig `yaml:"sni"` // SNI-based TCP routing configuration
}
// HTTPSConfig contains HTTPS/TLS configuration for the gateway
type HTTPSConfig struct {
Enabled bool `yaml:"enabled"` // Enable HTTPS (port 443)
Domain string `yaml:"domain"` // Primary domain (e.g., node-123.orama.network)
AutoCert bool `yaml:"auto_cert"` // Use Let's Encrypt for automatic certificate
CertFile string `yaml:"cert_file"` // Path to certificate file (if not using auto_cert)
KeyFile string `yaml:"key_file"` // Path to key file (if not using auto_cert)
CacheDir string `yaml:"cache_dir"` // Directory for Let's Encrypt certificate cache
HTTPPort int `yaml:"http_port"` // HTTP port for ACME challenge (default: 80)
HTTPSPort int `yaml:"https_port"` // HTTPS port (default: 443)
Email string `yaml:"email"` // Email for Let's Encrypt account
}
// SNIConfig contains SNI-based TCP routing configuration for port 7001
type SNIConfig struct {
Enabled bool `yaml:"enabled"` // Enable SNI-based TCP routing
ListenAddr string `yaml:"listen_addr"` // Address to listen on (e.g., ":7001")
Routes map[string]string `yaml:"routes"` // SNI hostname -> backend address mapping
CertFile string `yaml:"cert_file"` // Path to certificate file
KeyFile string `yaml:"key_file"` // Path to key file
}
// RouteConfig defines a single reverse proxy route
type RouteConfig struct {
PathPrefix string `yaml:"path_prefix"` // URL path prefix (e.g., "/rqlite/http")
BackendURL string `yaml:"backend_url"` // Backend service URL
Timeout time.Duration `yaml:"timeout"` // Request timeout
WebSocket bool `yaml:"websocket"` // Support WebSocket upgrades
}
// ClientConfig represents configuration for network clients
type ClientConfig struct {
AppName string `yaml:"app_name"`
@ -175,5 +216,11 @@ func DefaultConfig() *Config {
Level: "info",
Format: "console",
},
HTTPGateway: HTTPGatewayConfig{
Enabled: true,
ListenAddr: ":8080",
NodeName: "default",
Routes: make(map[string]RouteConfig),
},
}
}

View File

@ -6,13 +6,13 @@ import (
"path/filepath"
)
// ConfigDir returns the path to the DeBros config directory (~/.debros).
// ConfigDir returns the path to the DeBros config directory (~/.orama).
func ConfigDir() (string, error) {
home, err := os.UserHomeDir()
if err != nil {
return "", fmt.Errorf("failed to determine home directory: %w", err)
}
return filepath.Join(home, ".debros"), nil
return filepath.Join(home, ".orama"), nil
}
// EnsureConfigDir creates the config directory if it does not exist.
@ -29,7 +29,7 @@ func EnsureConfigDir() (string, error) {
// DefaultPath returns the path to the config file for the given component name.
// component should be e.g., "node.yaml", "bootstrap.yaml", "gateway.yaml"
// It checks ~/.debros/data/, ~/.debros/configs/, and ~/.debros/ for backward compatibility.
// It checks ~/.orama/data/, ~/.orama/configs/, and ~/.orama/ for backward compatibility.
// If component is already an absolute path, it returns it as-is.
func DefaultPath(component string) (string, error) {
// If component is already an absolute path, return it directly
@ -53,13 +53,13 @@ func DefaultPath(component string) (string, error) {
gatewayDefault = dataPath
}
// First check in ~/.debros/configs/ (production installer location)
// First check in ~/.orama/configs/ (production installer location)
configsPath := filepath.Join(dir, "configs", component)
if _, err := os.Stat(configsPath); err == nil {
return configsPath, nil
}
// Fallback to ~/.debros/ (legacy/development location)
// Fallback to ~/.orama/ (legacy/development location)
legacyPath := filepath.Join(dir, component)
if _, err := os.Stat(legacyPath); err == nil {
return legacyPath, nil

View File

@ -14,24 +14,24 @@ import (
// ConfigEnsurer handles all config file creation and validation
type ConfigEnsurer struct {
debrosDir string
oramaDir string
}
// NewConfigEnsurer creates a new config ensurer
func NewConfigEnsurer(debrosDir string) *ConfigEnsurer {
func NewConfigEnsurer(oramaDir string) *ConfigEnsurer {
return &ConfigEnsurer{
debrosDir: debrosDir,
oramaDir: oramaDir,
}
}
// EnsureAll ensures all necessary config files and secrets exist
func (ce *ConfigEnsurer) EnsureAll() error {
// Create directories
if err := os.MkdirAll(ce.debrosDir, 0755); err != nil {
return fmt.Errorf("failed to create .debros directory: %w", err)
if err := os.MkdirAll(ce.oramaDir, 0755); err != nil {
return fmt.Errorf("failed to create .orama directory: %w", err)
}
if err := os.MkdirAll(filepath.Join(ce.debrosDir, "logs"), 0755); err != nil {
if err := os.MkdirAll(filepath.Join(ce.oramaDir, "logs"), 0755); err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}
@ -75,7 +75,7 @@ func (ce *ConfigEnsurer) EnsureAll() error {
// ensureSharedSecrets creates cluster secret and swarm key if they don't exist
func (ce *ConfigEnsurer) ensureSharedSecrets() error {
secretPath := filepath.Join(ce.debrosDir, "cluster-secret")
secretPath := filepath.Join(ce.oramaDir, "cluster-secret")
if _, err := os.Stat(secretPath); os.IsNotExist(err) {
secret := generateRandomHex(64) // 64 hex chars = 32 bytes
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
@ -84,7 +84,7 @@ func (ce *ConfigEnsurer) ensureSharedSecrets() error {
fmt.Printf("✓ Generated cluster secret\n")
}
swarmKeyPath := filepath.Join(ce.debrosDir, "swarm.key")
swarmKeyPath := filepath.Join(ce.oramaDir, "swarm.key")
if _, err := os.Stat(swarmKeyPath); os.IsNotExist(err) {
keyHex := strings.ToUpper(generateRandomHex(64))
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
@ -99,7 +99,7 @@ func (ce *ConfigEnsurer) ensureSharedSecrets() error {
// ensureNodeIdentity creates or loads a node identity and returns its multiaddr
func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
nodeDir := filepath.Join(ce.oramaDir, nodeSpec.DataDir)
identityPath := filepath.Join(nodeDir, "identity.key")
// Create identity if missing
@ -134,69 +134,44 @@ func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
// ensureNodeConfig creates or updates a node configuration
func (ce *ConfigEnsurer) ensureNodeConfig(nodeSpec NodeSpec, bootstrapAddrs []string) error {
nodeDir := filepath.Join(ce.debrosDir, nodeSpec.DataDir)
configPath := filepath.Join(ce.debrosDir, nodeSpec.ConfigFilename)
nodeDir := filepath.Join(ce.oramaDir, nodeSpec.DataDir)
configPath := filepath.Join(ce.oramaDir, nodeSpec.ConfigFilename)
if err := os.MkdirAll(nodeDir, 0755); err != nil {
return fmt.Errorf("failed to create node directory: %w", err)
}
if nodeSpec.Role == "bootstrap" {
// Generate bootstrap config
data := templates.BootstrapConfigData{
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
BootstrapPeers: bootstrapAddrs,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
}
config, err := templates.RenderBootstrapConfig(data)
if err != nil {
return fmt.Errorf("failed to render bootstrap config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write bootstrap config: %w", err)
}
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
} else {
// Generate regular node config
data := templates.NodeConfigData{
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
BootstrapPeers: bootstrapAddrs,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
}
config, err := templates.RenderNodeConfig(data)
if err != nil {
return fmt.Errorf("failed to render node config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write node config: %w", err)
}
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
// Generate node config (unified - no bootstrap/node distinction)
data := templates.NodeConfigData{
NodeID: nodeSpec.Name,
P2PPort: nodeSpec.P2PPort,
DataDir: nodeDir,
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
BootstrapPeers: bootstrapAddrs,
ClusterAPIPort: nodeSpec.ClusterAPIPort,
IPFSAPIPort: nodeSpec.IPFSAPIPort,
UnifiedGatewayPort: nodeSpec.UnifiedGatewayPort,
}
config, err := templates.RenderNodeConfig(data)
if err != nil {
return fmt.Errorf("failed to render node config: %w", err)
}
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
return fmt.Errorf("failed to write node config: %w", err)
}
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
return nil
}
// ensureGateway creates gateway config
func (ce *ConfigEnsurer) ensureGateway(bootstrapAddrs []string) error {
configPath := filepath.Join(ce.debrosDir, "gateway.yaml")
configPath := filepath.Join(ce.oramaDir, "gateway.yaml")
// Get first bootstrap's cluster API port for default
topology := DefaultTopology()
@ -225,7 +200,7 @@ func (ce *ConfigEnsurer) ensureGateway(bootstrapAddrs []string) error {
// ensureOlric creates Olric config
func (ce *ConfigEnsurer) ensureOlric() error {
configPath := filepath.Join(ce.debrosDir, "olric-config.yaml")
configPath := filepath.Join(ce.oramaDir, "olric-config.yaml")
topology := DefaultTopology()
data := templates.OlricConfigData{

View File

@ -164,43 +164,42 @@ func (pm *ProcessManager) LibP2PHealthCheck(ctx context.Context) HealthCheckResu
// HealthCheckWithRetry performs a health check with retry logic
func (pm *ProcessManager) HealthCheckWithRetry(ctx context.Context, nodes []ipfsNodeInfo, retries int, retryInterval time.Duration, timeout time.Duration) bool {
fmt.Fprintf(pm.logWriter, "\n⚕️ Validating cluster health...\n")
fmt.Fprintf(pm.logWriter, "⚕️ Validating cluster health...")
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
spinnerFrames := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
spinnerIndex := 0
for attempt := 1; attempt <= retries; attempt++ {
// Perform all checks
ipfsResult := pm.IPFSHealthCheck(deadlineCtx, nodes)
rqliteResult := pm.RQLiteHealthCheck(deadlineCtx)
libp2pResult := pm.LibP2PHealthCheck(deadlineCtx)
// Log results
if attempt == 1 || attempt == retries || (attempt%3 == 0) {
fmt.Fprintf(pm.logWriter, " Attempt %d/%d:\n", attempt, retries)
pm.logHealthCheckResult(pm.logWriter, " ", ipfsResult)
pm.logHealthCheckResult(pm.logWriter, " ", rqliteResult)
pm.logHealthCheckResult(pm.logWriter, " ", libp2pResult)
}
// All checks must pass
if ipfsResult.Healthy && rqliteResult.Healthy && libp2pResult.Healthy {
fmt.Fprintf(pm.logWriter, "\n✓ All health checks passed!\n")
fmt.Fprintf(pm.logWriter, "\r✓ Cluster health validated\n")
return true
}
// Show spinner progress
fmt.Fprintf(pm.logWriter, "\r%s Validating cluster health... (%d/%d)", spinnerFrames[spinnerIndex%len(spinnerFrames)], attempt, retries)
spinnerIndex++
if attempt < retries {
select {
case <-time.After(retryInterval):
continue
case <-deadlineCtx.Done():
fmt.Fprintf(pm.logWriter, "\n❌ Health check timeout reached\n")
fmt.Fprintf(pm.logWriter, "\r❌ Health check timeout reached\n")
return false
}
}
}
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed after %d attempts\n", retries)
fmt.Fprintf(pm.logWriter, "\r❌ Health checks failed - services not ready\n")
return false
}

View File

@ -19,7 +19,7 @@ import (
// ProcessManager manages all dev environment processes
type ProcessManager struct {
debrosDir string
oramaDir string
pidsDir string
processes map[string]*ManagedProcess
mutex sync.Mutex
@ -35,12 +35,12 @@ type ManagedProcess struct {
}
// NewProcessManager creates a new process manager
func NewProcessManager(debrosDir string, logWriter io.Writer) *ProcessManager {
pidsDir := filepath.Join(debrosDir, ".pids")
func NewProcessManager(oramaDir string, logWriter io.Writer) *ProcessManager {
pidsDir := filepath.Join(oramaDir, ".pids")
os.MkdirAll(pidsDir, 0755)
return &ProcessManager{
debrosDir: debrosDir,
oramaDir: oramaDir,
pidsDir: pidsDir,
processes: make(map[string]*ManagedProcess),
logWriter: logWriter,
@ -49,7 +49,8 @@ func NewProcessManager(debrosDir string, logWriter io.Writer) *ProcessManager {
// StartAll starts all development services
func (pm *ProcessManager) StartAll(ctx context.Context) error {
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n\n")
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n")
fmt.Fprintf(pm.logWriter, "═══════════════════════════════════════\n\n")
topology := DefaultTopology()
@ -76,6 +77,8 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
}
}
fmt.Fprintf(pm.logWriter, "\n")
// Run health checks with retries before declaring success
const (
healthCheckRetries = 20
@ -84,15 +87,45 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
)
if !pm.HealthCheckWithRetry(ctx, ipfsNodes, healthCheckRetries, healthCheckInterval, healthCheckTimeout) {
fmt.Fprintf(pm.logWriter, "\n❌ Development environment failed health checks - stopping all services\n")
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed - stopping all services\n")
pm.StopAll(ctx)
return fmt.Errorf("cluster health checks failed - services stopped")
}
fmt.Fprintf(pm.logWriter, "\n✅ Development environment started!\n\n")
// Print success and key endpoints
pm.printStartupSummary(topology)
return nil
}
// printStartupSummary prints the final startup summary with key endpoints
func (pm *ProcessManager) printStartupSummary(topology *Topology) {
fmt.Fprintf(pm.logWriter, "\n✅ Development environment ready!\n")
fmt.Fprintf(pm.logWriter, "═══════════════════════════════════════\n\n")
fmt.Fprintf(pm.logWriter, "📡 Access your nodes via unified gateway ports:\n\n")
for _, node := range topology.Nodes {
fmt.Fprintf(pm.logWriter, " %s:\n", node.Name)
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/health\n", node.UnifiedGatewayPort)
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/rqlite/http/db/execute\n", node.UnifiedGatewayPort)
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/cluster/health\n\n", node.UnifiedGatewayPort)
}
fmt.Fprintf(pm.logWriter, "🌐 Main Gateway:\n")
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/v1/status\n\n", topology.GatewayPort)
fmt.Fprintf(pm.logWriter, "📊 Other Services:\n")
fmt.Fprintf(pm.logWriter, " Olric: http://localhost:%d\n", topology.OlricHTTPPort)
fmt.Fprintf(pm.logWriter, " Anon SOCKS: 127.0.0.1:%d\n\n", topology.AnonSOCKSPort)
fmt.Fprintf(pm.logWriter, "📝 Useful Commands:\n")
fmt.Fprintf(pm.logWriter, " ./bin/orama dev status - Check service status\n")
fmt.Fprintf(pm.logWriter, " ./bin/orama dev logs node-1 - View logs\n")
fmt.Fprintf(pm.logWriter, " ./bin/orama dev down - Stop all services\n\n")
fmt.Fprintf(pm.logWriter, "📂 Logs: %s/logs\n", pm.oramaDir)
fmt.Fprintf(pm.logWriter, "⚙️ Config: %s\n\n", pm.oramaDir)
}
// StopAll stops all running processes
func (pm *ProcessManager) StopAll(ctx context.Context) error {
fmt.Fprintf(pm.logWriter, "\n🛑 Stopping development environment...\n\n")
@ -204,10 +237,10 @@ func (pm *ProcessManager) Status(ctx context.Context) {
fmt.Fprintf(pm.logWriter, " %-25s %s (%s)\n", svc.name, status, portStr)
}
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.debrosDir)
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.oramaDir)
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node2.yaml", "node3.yaml", "node4.yaml", "gateway.yaml", "olric-config.yaml"}
for _, f := range configFiles {
path := filepath.Join(pm.debrosDir, f)
path := filepath.Join(pm.oramaDir, f)
if _, err := os.Stat(path); err == nil {
fmt.Fprintf(pm.logWriter, " ✓ %s\n", f)
} else {
@ -215,7 +248,7 @@ func (pm *ProcessManager) Status(ctx context.Context) {
}
}
fmt.Fprintf(pm.logWriter, "\nLogs directory: %s/logs\n\n", pm.debrosDir)
fmt.Fprintf(pm.logWriter, "\nLogs directory: %s/logs\n\n", pm.oramaDir)
}
// Helper functions for starting individual services
@ -226,7 +259,7 @@ func (pm *ProcessManager) buildIPFSNodes(topology *Topology) []ipfsNodeInfo {
for _, nodeSpec := range topology.Nodes {
nodes = append(nodes, ipfsNodeInfo{
name: nodeSpec.Name,
ipfsPath: filepath.Join(pm.debrosDir, nodeSpec.DataDir, "ipfs/repo"),
ipfsPath: filepath.Join(pm.oramaDir, nodeSpec.DataDir, "ipfs/repo"),
apiPort: nodeSpec.IPFSAPIPort,
swarmPort: nodeSpec.IPFSSwarmPort,
gatewayPort: nodeSpec.IPFSGatewayPort,
@ -240,7 +273,7 @@ func (pm *ProcessManager) buildIPFSNodes(topology *Topology) []ipfsNodeInfo {
func (pm *ProcessManager) startNodes(ctx context.Context) error {
topology := DefaultTopology()
for _, nodeSpec := range topology.Nodes {
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("%s.log", nodeSpec.Name))
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("%s.log", nodeSpec.Name))
if err := pm.startNode(nodeSpec.Name, nodeSpec.ConfigFilename, logPath); err != nil {
return fmt.Errorf("failed to start %s: %w", nodeSpec.Name, err)
}
@ -485,7 +518,7 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
}
// Copy swarm key
swarmKeyPath := filepath.Join(pm.debrosDir, "swarm.key")
swarmKeyPath := filepath.Join(pm.oramaDir, "swarm.key")
if data, err := os.ReadFile(swarmKeyPath); err == nil {
os.WriteFile(filepath.Join(nodes[i].ipfsPath, "swarm.key"), data, 0600)
}
@ -505,7 +538,7 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
// Phase 2: Start all IPFS daemons
for i := range nodes {
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-%s.pid", nodes[i].name))
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-%s.log", nodes[i].name))
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-%s.log", nodes[i].name))
cmd := exec.CommandContext(ctx, "ipfs", "daemon", "--enable-pubsub-experiment", "--repo-dir="+nodes[i].ipfsPath)
logFile, _ := os.Create(logPath)
@ -556,7 +589,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
ipfsPort int
}{
nodeSpec.Name,
filepath.Join(pm.debrosDir, nodeSpec.DataDir, "ipfs-cluster"),
filepath.Join(pm.oramaDir, nodeSpec.DataDir, "ipfs-cluster"),
nodeSpec.ClusterAPIPort,
nodeSpec.ClusterPort,
nodeSpec.IPFSAPIPort,
@ -573,7 +606,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
}
// Read cluster secret to ensure all nodes use the same PSK
secretPath := filepath.Join(pm.debrosDir, "cluster-secret")
secretPath := filepath.Join(pm.oramaDir, "cluster-secret")
clusterSecret, err := os.ReadFile(secretPath)
if err != nil {
return fmt.Errorf("failed to read cluster secret: %w", err)
@ -622,7 +655,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
// Start bootstrap cluster service
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
cmd = exec.CommandContext(ctx, "ipfs-cluster-service", "daemon")
cmd.Env = append(os.Environ(), fmt.Sprintf("IPFS_CLUSTER_PATH=%s", node.clusterPath))
@ -696,7 +729,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
// Start follower cluster service with bootstrap flag
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
args := []string{"daemon"}
if bootstrapMultiaddr != "" {
@ -943,8 +976,8 @@ func (pm *ProcessManager) ensureIPFSClusterPorts(clusterPath string, restAPIPort
func (pm *ProcessManager) startOlric(ctx context.Context) error {
pidPath := filepath.Join(pm.pidsDir, "olric.pid")
logPath := filepath.Join(pm.debrosDir, "logs", "olric.log")
configPath := filepath.Join(pm.debrosDir, "olric-config.yaml")
logPath := filepath.Join(pm.oramaDir, "logs", "olric.log")
configPath := filepath.Join(pm.oramaDir, "olric-config.yaml")
cmd := exec.CommandContext(ctx, "olric-server")
cmd.Env = append(os.Environ(), fmt.Sprintf("OLRIC_SERVER_CONFIG=%s", configPath))
@ -969,7 +1002,7 @@ func (pm *ProcessManager) startAnon(ctx context.Context) error {
}
pidPath := filepath.Join(pm.pidsDir, "anon.pid")
logPath := filepath.Join(pm.debrosDir, "logs", "anon.log")
logPath := filepath.Join(pm.oramaDir, "logs", "anon.log")
cmd := exec.CommandContext(ctx, "npx", "anyone-client")
logFile, _ := os.Create(logPath)
@ -1007,7 +1040,7 @@ func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
func (pm *ProcessManager) startGateway(ctx context.Context) error {
pidPath := filepath.Join(pm.pidsDir, "gateway.pid")
logPath := filepath.Join(pm.debrosDir, "logs", "gateway.log")
logPath := filepath.Join(pm.oramaDir, "logs", "gateway.log")
cmd := exec.Command("./bin/gateway", "--config", "gateway.yaml")
logFile, _ := os.Create(logPath)

View File

@ -7,7 +7,7 @@ type NodeSpec struct {
Name string // bootstrap, bootstrap2, node2, node3, node4
Role string // "bootstrap" or "node"
ConfigFilename string // bootstrap.yaml, bootstrap2.yaml, node2.yaml, etc.
DataDir string // relative path from .debros root
DataDir string // relative path from .orama root
P2PPort int // LibP2P listen port
IPFSAPIPort int // IPFS API port
IPFSSwarmPort int // IPFS Swarm port
@ -16,6 +16,7 @@ type NodeSpec struct {
RQLiteRaftPort int // RQLite Raft consensus port
ClusterAPIPort int // IPFS Cluster REST API port
ClusterPort int // IPFS Cluster P2P port
UnifiedGatewayPort int // Unified gateway port (proxies all services)
RQLiteJoinTarget string // which bootstrap RQLite port to join (leave empty for bootstraps that lead)
ClusterJoinTarget string // which bootstrap cluster to join (leave empty for bootstrap that leads)
}
@ -33,86 +34,91 @@ type Topology struct {
func DefaultTopology() *Topology {
return &Topology{
Nodes: []NodeSpec{
{
Name: "bootstrap",
Role: "bootstrap",
ConfigFilename: "bootstrap.yaml",
DataDir: "bootstrap",
P2PPort: 4001,
IPFSAPIPort: 4501,
IPFSSwarmPort: 4101,
IPFSGatewayPort: 7501,
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
ClusterPort: 9096,
RQLiteJoinTarget: "",
ClusterJoinTarget: "",
},
{
Name: "bootstrap2",
Role: "bootstrap",
ConfigFilename: "bootstrap2.yaml",
DataDir: "bootstrap2",
P2PPort: 4011,
IPFSAPIPort: 4511,
IPFSSwarmPort: 4111,
IPFSGatewayPort: 7511,
RQLiteHTTPPort: 5011,
RQLiteRaftPort: 7011,
ClusterAPIPort: 9104,
ClusterPort: 9106,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node2",
Role: "node",
ConfigFilename: "node2.yaml",
DataDir: "node2",
P2PPort: 4002,
IPFSAPIPort: 4502,
IPFSSwarmPort: 4102,
IPFSGatewayPort: 7502,
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
ClusterAPIPort: 9114,
ClusterPort: 9116,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node3",
Role: "node",
ConfigFilename: "node3.yaml",
DataDir: "node3",
P2PPort: 4003,
IPFSAPIPort: 4503,
IPFSSwarmPort: 4103,
IPFSGatewayPort: 7503,
RQLiteHTTPPort: 5003,
RQLiteRaftPort: 7003,
ClusterAPIPort: 9124,
ClusterPort: 9126,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node4",
Role: "node",
ConfigFilename: "node4.yaml",
DataDir: "node4",
P2PPort: 4004,
IPFSAPIPort: 4504,
IPFSSwarmPort: 4104,
IPFSGatewayPort: 7504,
RQLiteHTTPPort: 5004,
RQLiteRaftPort: 7004,
ClusterAPIPort: 9134,
ClusterPort: 9136,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "bootstrap",
Role: "bootstrap",
ConfigFilename: "bootstrap.yaml",
DataDir: "bootstrap",
P2PPort: 4001,
IPFSAPIPort: 4501,
IPFSSwarmPort: 4101,
IPFSGatewayPort: 7501,
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
ClusterPort: 9096,
UnifiedGatewayPort: 6001,
RQLiteJoinTarget: "",
ClusterJoinTarget: "",
},
{
Name: "bootstrap2",
Role: "bootstrap",
ConfigFilename: "bootstrap2.yaml",
DataDir: "bootstrap2",
P2PPort: 4011,
IPFSAPIPort: 4511,
IPFSSwarmPort: 4111,
IPFSGatewayPort: 7511,
RQLiteHTTPPort: 5011,
RQLiteRaftPort: 7011,
ClusterAPIPort: 9104,
ClusterPort: 9106,
UnifiedGatewayPort: 6002,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node2",
Role: "node",
ConfigFilename: "node2.yaml",
DataDir: "node2",
P2PPort: 4002,
IPFSAPIPort: 4502,
IPFSSwarmPort: 4102,
IPFSGatewayPort: 7502,
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
ClusterAPIPort: 9114,
ClusterPort: 9116,
UnifiedGatewayPort: 6003,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node3",
Role: "node",
ConfigFilename: "node3.yaml",
DataDir: "node3",
P2PPort: 4003,
IPFSAPIPort: 4503,
IPFSSwarmPort: 4103,
IPFSGatewayPort: 7503,
RQLiteHTTPPort: 5003,
RQLiteRaftPort: 7003,
ClusterAPIPort: 9124,
ClusterPort: 9126,
UnifiedGatewayPort: 6004,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
{
Name: "node4",
Role: "node",
ConfigFilename: "node4.yaml",
DataDir: "node4",
P2PPort: 4004,
IPFSAPIPort: 4504,
IPFSSwarmPort: 4104,
IPFSGatewayPort: 7504,
RQLiteHTTPPort: 5004,
RQLiteRaftPort: 7004,
ClusterAPIPort: 9134,
ClusterPort: 9136,
UnifiedGatewayPort: 6005,
RQLiteJoinTarget: "localhost:7001",
ClusterJoinTarget: "localhost:9096",
},
},
GatewayPort: 6001,
OlricHTTPPort: 3320,
@ -136,6 +142,7 @@ func (t *Topology) AllPorts() []int {
node.RQLiteRaftPort,
node.ClusterAPIPort,
node.ClusterPort,
node.UnifiedGatewayPort,
)
}
@ -163,6 +170,7 @@ func (t *Topology) PortMap() map[int]string {
portMap[node.RQLiteRaftPort] = fmt.Sprintf("%s RQLite Raft", node.Name)
portMap[node.ClusterAPIPort] = fmt.Sprintf("%s IPFS Cluster API", node.Name)
portMap[node.ClusterPort] = fmt.Sprintf("%s IPFS Cluster P2P", node.Name)
portMap[node.UnifiedGatewayPort] = fmt.Sprintf("%s Unified Gateway", node.Name)
}
portMap[t.GatewayPort] = "Gateway"

View File

@ -20,13 +20,13 @@ import (
// ConfigGenerator manages generation of node, gateway, and service configs
type ConfigGenerator struct {
debrosDir string
oramaDir string
}
// NewConfigGenerator creates a new config generator
func NewConfigGenerator(debrosDir string) *ConfigGenerator {
func NewConfigGenerator(oramaDir string) *ConfigGenerator {
return &ConfigGenerator{
debrosDir: debrosDir,
oramaDir: oramaDir,
}
}
@ -93,102 +93,62 @@ func inferBootstrapIP(bootstrapPeers []string, vpsIP string) string {
return ""
}
// GenerateNodeConfig generates node.yaml configuration
func (cg *ConfigGenerator) GenerateNodeConfig(isBootstrap bool, bootstrapPeers []string, vpsIP string, bootstrapJoin string) (string, error) {
var nodeID string
if isBootstrap {
nodeID = "bootstrap"
} else {
nodeID = "node"
// GenerateNodeConfig generates node.yaml configuration (unified - no bootstrap/node distinction)
func (cg *ConfigGenerator) GenerateNodeConfig(bootstrapPeers []string, vpsIP string, joinAddress string, domain string) (string, error) {
// Generate node ID from domain or use default
nodeID := "node"
if domain != "" {
// Extract node identifier from domain (e.g., "node-123" from "node-123.orama.network")
parts := strings.Split(domain, ".")
if len(parts) > 0 {
nodeID = parts[0]
}
}
// Determine advertise addresses
// For bootstrap: use vpsIP if provided, otherwise localhost
// For regular nodes: infer from bootstrap peers or use vpsIP
// Determine advertise addresses - use vpsIP if provided
var httpAdvAddr, raftAdvAddr string
if isBootstrap {
if vpsIP != "" {
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
} else {
httpAdvAddr = "localhost:5001"
raftAdvAddr = "localhost:7001"
}
if vpsIP != "" {
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
} else {
// Regular node: infer from bootstrap peers or use vpsIP
bootstrapIP := inferBootstrapIP(bootstrapPeers, vpsIP)
if bootstrapIP != "" {
// Use the bootstrap IP for advertise addresses (this node should be reachable at same network)
// If vpsIP is provided, use it; otherwise use bootstrap IP
if vpsIP != "" {
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
} else {
httpAdvAddr = net.JoinHostPort(bootstrapIP, "5001")
raftAdvAddr = net.JoinHostPort(bootstrapIP, "7001")
}
} else {
// Fallback to localhost if nothing can be inferred
httpAdvAddr = "localhost:5001"
raftAdvAddr = "localhost:7001"
}
// Fallback to localhost if no vpsIP
httpAdvAddr = "localhost:5001"
raftAdvAddr = "localhost:7001"
}
if isBootstrap {
// Bootstrap node - populate peer list and optional join address
data := templates.BootstrapConfigData{
NodeID: nodeID,
P2PPort: 4001,
DataDir: filepath.Join(cg.debrosDir, "data", "bootstrap"),
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
BootstrapPeers: bootstrapPeers,
RQLiteJoinAddress: bootstrapJoin,
HTTPAdvAddress: httpAdvAddr,
RaftAdvAddress: raftAdvAddr,
}
return templates.RenderBootstrapConfig(data)
}
// Regular node - infer join address from bootstrap peers
// MUST extract from bootstrap_peers - no fallback to vpsIP (would cause self-join)
// Determine RQLite join address
var rqliteJoinAddr string
bootstrapIP := inferBootstrapIP(bootstrapPeers, "")
if bootstrapIP == "" {
// Try to extract from first bootstrap peer directly as fallback
if len(bootstrapPeers) > 0 {
if extractedIP := extractIPFromMultiaddr(bootstrapPeers[0]); extractedIP != "" {
bootstrapIP = extractedIP
if joinAddress != "" {
// Use explicitly provided join address
rqliteJoinAddr = joinAddress
} else if len(bootstrapPeers) > 0 {
// Infer join address from bootstrap peers
bootstrapIP := inferBootstrapIP(bootstrapPeers, "")
if bootstrapIP != "" {
rqliteJoinAddr = net.JoinHostPort(bootstrapIP, "7001")
// Validate that join address doesn't match this node's own raft address (would cause self-join)
if rqliteJoinAddr == raftAdvAddr {
rqliteJoinAddr = "" // Clear it - this is the first node
}
}
// If still no IP, fail - we cannot join without a valid bootstrap address
if bootstrapIP == "" {
return "", fmt.Errorf("cannot determine RQLite join address: failed to extract IP from bootstrap peers %v (required for non-bootstrap nodes)", bootstrapPeers)
}
}
rqliteJoinAddr = net.JoinHostPort(bootstrapIP, "7001")
// Validate that join address doesn't match this node's own raft address (would cause self-join)
if rqliteJoinAddr == raftAdvAddr {
return "", fmt.Errorf("invalid configuration: rqlite_join_address (%s) cannot match raft_adv_address (%s) - node cannot join itself", rqliteJoinAddr, raftAdvAddr)
}
// If no join address and no peers, this is the first node - it will create the cluster
// Unified data directory (no bootstrap/node distinction)
data := templates.NodeConfigData{
NodeID: nodeID,
P2PPort: 4001,
DataDir: filepath.Join(cg.debrosDir, "data", "node"),
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
RQLiteJoinAddress: rqliteJoinAddr,
BootstrapPeers: bootstrapPeers,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
HTTPAdvAddress: httpAdvAddr,
RaftAdvAddress: raftAdvAddr,
NodeID: nodeID,
P2PPort: 4001,
DataDir: filepath.Join(cg.oramaDir, "data"),
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
RQLiteJoinAddress: rqliteJoinAddr,
BootstrapPeers: bootstrapPeers,
ClusterAPIPort: 9094,
IPFSAPIPort: 4501,
HTTPAdvAddress: httpAdvAddr,
RaftAdvAddress: raftAdvAddr,
UnifiedGatewayPort: 6001,
Domain: domain,
}
return templates.RenderNodeConfig(data)
}
@ -197,7 +157,7 @@ func (cg *ConfigGenerator) GenerateNodeConfig(isBootstrap bool, bootstrapPeers [
func (cg *ConfigGenerator) GenerateGatewayConfig(bootstrapPeers []string, enableHTTPS bool, domain string, olricServers []string) (string, error) {
tlsCacheDir := ""
if enableHTTPS {
tlsCacheDir = filepath.Join(cg.debrosDir, "tls-cache")
tlsCacheDir = filepath.Join(cg.oramaDir, "tls-cache")
}
data := templates.GatewayConfigData{
@ -226,15 +186,13 @@ func (cg *ConfigGenerator) GenerateOlricConfig(bindAddr string, httpPort, member
// SecretGenerator manages generation of shared secrets and keys
type SecretGenerator struct {
debrosDir string
clusterSecretOverride string
oramaDir string
}
// NewSecretGenerator creates a new secret generator
func NewSecretGenerator(debrosDir string, clusterSecretOverride string) *SecretGenerator {
func NewSecretGenerator(oramaDir string) *SecretGenerator {
return &SecretGenerator{
debrosDir: debrosDir,
clusterSecretOverride: clusterSecretOverride,
oramaDir: oramaDir,
}
}
@ -255,7 +213,7 @@ func ValidateClusterSecret(secret string) error {
// EnsureClusterSecret gets or generates the IPFS Cluster secret
func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
secretPath := filepath.Join(sg.debrosDir, "secrets", "cluster-secret")
secretPath := filepath.Join(sg.oramaDir, "secrets", "cluster-secret")
secretDir := filepath.Dir(secretPath)
// Ensure secrets directory exists
@ -263,31 +221,6 @@ func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
return "", fmt.Errorf("failed to create secrets directory: %w", err)
}
// Use override if provided
if sg.clusterSecretOverride != "" {
secret := strings.TrimSpace(sg.clusterSecretOverride)
if err := ValidateClusterSecret(secret); err != nil {
return "", err
}
needsWrite := true
if data, err := os.ReadFile(secretPath); err == nil {
if strings.TrimSpace(string(data)) == secret {
needsWrite = false
}
}
if needsWrite {
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
return "", fmt.Errorf("failed to save cluster secret override: %w", err)
}
}
if err := ensureSecretFilePermissions(secretPath); err != nil {
return "", err
}
return secret, nil
}
// Try to read existing secret
if data, err := os.ReadFile(secretPath); err == nil {
secret := strings.TrimSpace(string(data))
@ -341,7 +274,7 @@ func ensureSecretFilePermissions(secretPath string) error {
// EnsureSwarmKey gets or generates the IPFS private swarm key
func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
swarmKeyPath := filepath.Join(sg.debrosDir, "secrets", "swarm.key")
swarmKeyPath := filepath.Join(sg.oramaDir, "secrets", "swarm.key")
secretDir := filepath.Dir(swarmKeyPath)
// Ensure secrets directory exists
@ -373,9 +306,10 @@ func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
return []byte(content), nil
}
// EnsureNodeIdentity gets or generates the node's LibP2P identity
func (sg *SecretGenerator) EnsureNodeIdentity(nodeType string) (peer.ID, error) {
keyDir := filepath.Join(sg.debrosDir, "data", nodeType)
// EnsureNodeIdentity gets or generates the node's LibP2P identity (unified - no bootstrap/node distinction)
func (sg *SecretGenerator) EnsureNodeIdentity() (peer.ID, error) {
// Unified data directory (no bootstrap/node distinction)
keyDir := filepath.Join(sg.oramaDir, "data")
keyPath := filepath.Join(keyDir, "identity.key")
// Ensure data directory exists
@ -419,9 +353,9 @@ func (sg *SecretGenerator) SaveConfig(filename string, content string) error {
var configDir string
// gateway.yaml goes to data/ directory, other configs go to configs/
if filename == "gateway.yaml" {
configDir = filepath.Join(sg.debrosDir, "data")
configDir = filepath.Join(sg.oramaDir, "data")
} else {
configDir = filepath.Join(sg.debrosDir, "configs")
configDir = filepath.Join(sg.oramaDir, "configs")
}
if err := os.MkdirAll(configDir, 0755); err != nil {

View File

@ -275,11 +275,11 @@ func (bi *BinaryInstaller) ResolveBinaryPath(binary string, extraPaths ...string
}
// InstallDeBrosBinaries clones and builds DeBros binaries
func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, debrosHome string, skipRepoUpdate bool) error {
func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, oramaHome string, skipRepoUpdate bool) error {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building DeBros binaries...\n")
srcDir := filepath.Join(debrosHome, "src")
binDir := filepath.Join(debrosHome, "bin")
srcDir := filepath.Join(oramaHome, "src")
binDir := filepath.Join(oramaHome, "bin")
// Ensure directories exist
os.MkdirAll(srcDir, 0755)
@ -331,7 +331,7 @@ func (bi *BinaryInstaller) InstallDeBrosBinaries(branch string, debrosHome strin
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Building binaries...\n")
cmd := exec.Command("make", "build")
cmd.Dir = srcDir
cmd.Env = append(os.Environ(), "HOME="+debrosHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin")
cmd.Env = append(os.Environ(), "HOME="+oramaHome, "PATH="+os.Getenv("PATH")+":/usr/local/go/bin")
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to build: %v\n%s", err, string(output))
}
@ -401,15 +401,15 @@ func (bi *BinaryInstaller) InstallSystemDependencies() error {
return nil
}
// InitializeIPFSRepo initializes an IPFS repository for a node
func (bi *BinaryInstaller) InitializeIPFSRepo(nodeType, ipfsRepoPath string, swarmKeyPath string, apiPort, gatewayPort, swarmPort int) error {
// InitializeIPFSRepo initializes an IPFS repository for a node (unified - no bootstrap/node distinction)
func (bi *BinaryInstaller) InitializeIPFSRepo(ipfsRepoPath string, swarmKeyPath string, apiPort, gatewayPort, swarmPort int) error {
configPath := filepath.Join(ipfsRepoPath, "config")
repoExists := false
if _, err := os.Stat(configPath); err == nil {
repoExists = true
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS repo for %s already exists, ensuring configuration...\n", nodeType)
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS repo already exists, ensuring configuration...\n")
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS repo for %s...\n", nodeType)
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing IPFS repo...\n")
}
if err := os.MkdirAll(ipfsRepoPath, 0755); err != nil {
@ -507,16 +507,17 @@ func (bi *BinaryInstaller) configureIPFSAddresses(ipfsRepoPath string, apiPort,
}
// Set Addresses
// Bind API and Gateway to localhost only for security
// Swarm remains on all interfaces for peer connections (routed via SNI gateway in production)
config["Addresses"] = map[string]interface{}{
"API": []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", apiPort),
fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort),
},
"Gateway": []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", gatewayPort),
fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", gatewayPort),
},
"Swarm": []string{
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", swarmPort),
fmt.Sprintf("/ip6/::/tcp/%d", swarmPort),
fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", swarmPort),
},
}
@ -533,18 +534,18 @@ func (bi *BinaryInstaller) configureIPFSAddresses(ipfsRepoPath string, apiPort,
return nil
}
// InitializeIPFSClusterConfig initializes IPFS Cluster configuration
// InitializeIPFSClusterConfig initializes IPFS Cluster configuration (unified - no bootstrap/node distinction)
// This runs `ipfs-cluster-service init` to create the service.json configuration file.
// For existing installations, it ensures the cluster secret is up to date.
// bootstrapClusterPeers should be in format: ["/ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>"]
func (bi *BinaryInstaller) InitializeIPFSClusterConfig(nodeType, clusterPath, clusterSecret string, ipfsAPIPort int, bootstrapClusterPeers []string) error {
// clusterPeers should be in format: ["/ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>"]
func (bi *BinaryInstaller) InitializeIPFSClusterConfig(clusterPath, clusterSecret string, ipfsAPIPort int, clusterPeers []string) error {
serviceJSONPath := filepath.Join(clusterPath, "service.json")
configExists := false
if _, err := os.Stat(serviceJSONPath); err == nil {
configExists = true
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS Cluster config for %s already exists, ensuring it's up to date...\n", nodeType)
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " IPFS Cluster config already exists, ensuring it's up to date...\n")
} else {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Preparing IPFS Cluster path for %s...\n", nodeType)
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Preparing IPFS Cluster path...\n")
}
if err := os.MkdirAll(clusterPath, 0755); err != nil {
@ -581,7 +582,7 @@ func (bi *BinaryInstaller) InitializeIPFSClusterConfig(nodeType, clusterPath, cl
// We do this AFTER init to ensure our secret takes precedence
if clusterSecret != "" {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Updating cluster secret, IPFS port, and peer addresses...\n")
if err := bi.updateClusterConfig(clusterPath, clusterSecret, ipfsAPIPort, bootstrapClusterPeers); err != nil {
if err := bi.updateClusterConfig(clusterPath, clusterSecret, ipfsAPIPort, clusterPeers); err != nil {
return fmt.Errorf("failed to update cluster config: %w", err)
}
@ -717,8 +718,8 @@ func (bi *BinaryInstaller) GetClusterPeerMultiaddr(clusterPath string, nodeIP st
}
// InitializeRQLiteDataDir initializes RQLite data directory
func (bi *BinaryInstaller) InitializeRQLiteDataDir(nodeType, dataDir string) error {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing RQLite data dir for %s...\n", nodeType)
func (bi *BinaryInstaller) InitializeRQLiteDataDir(dataDir string) error {
fmt.Fprintf(bi.logWriter.(interface{ Write([]byte) (int, error) }), " Initializing RQLite data dir...\n")
if err := os.MkdirAll(dataDir, 0755); err != nil {
return fmt.Errorf("failed to create RQLite data directory: %w", err)

View File

@ -12,36 +12,35 @@ import (
// ProductionSetup orchestrates the entire production deployment
type ProductionSetup struct {
osInfo *OSInfo
arch string
debrosHome string
debrosDir string
logWriter io.Writer
forceReconfigure bool
skipOptionalDeps bool
skipResourceChecks bool
clusterSecretOverride string
privChecker *PrivilegeChecker
osDetector *OSDetector
archDetector *ArchitectureDetector
resourceChecker *ResourceChecker
portChecker *PortChecker
fsProvisioner *FilesystemProvisioner
userProvisioner *UserProvisioner
stateDetector *StateDetector
configGenerator *ConfigGenerator
secretGenerator *SecretGenerator
serviceGenerator *SystemdServiceGenerator
serviceController *SystemdController
binaryInstaller *BinaryInstaller
branch string
skipRepoUpdate bool
NodePeerID string // Captured during Phase3 for later display
osInfo *OSInfo
arch string
oramaHome string
oramaDir string
logWriter io.Writer
forceReconfigure bool
skipOptionalDeps bool
skipResourceChecks bool
privChecker *PrivilegeChecker
osDetector *OSDetector
archDetector *ArchitectureDetector
resourceChecker *ResourceChecker
portChecker *PortChecker
fsProvisioner *FilesystemProvisioner
userProvisioner *UserProvisioner
stateDetector *StateDetector
configGenerator *ConfigGenerator
secretGenerator *SecretGenerator
serviceGenerator *SystemdServiceGenerator
serviceController *SystemdController
binaryInstaller *BinaryInstaller
branch string
skipRepoUpdate bool
NodePeerID string // Captured during Phase3 for later display
}
// ReadBranchPreference reads the stored branch preference from disk
func ReadBranchPreference(debrosDir string) string {
branchFile := filepath.Join(debrosDir, ".branch")
func ReadBranchPreference(oramaDir string) string {
branchFile := filepath.Join(oramaDir, ".branch")
data, err := os.ReadFile(branchFile)
if err != nil {
return "main" // Default to main if file doesn't exist
@ -54,9 +53,9 @@ func ReadBranchPreference(debrosDir string) string {
}
// SaveBranchPreference saves the branch preference to disk
func SaveBranchPreference(debrosDir, branch string) error {
branchFile := filepath.Join(debrosDir, ".branch")
if err := os.MkdirAll(debrosDir, 0755); err != nil {
func SaveBranchPreference(oramaDir, branch string) error {
branchFile := filepath.Join(oramaDir, ".branch")
if err := os.MkdirAll(oramaDir, 0755); err != nil {
return fmt.Errorf("failed to create debros directory: %w", err)
}
if err := os.WriteFile(branchFile, []byte(branch), 0644); err != nil {
@ -67,39 +66,37 @@ func SaveBranchPreference(debrosDir, branch string) error {
}
// NewProductionSetup creates a new production setup orchestrator
func NewProductionSetup(debrosHome string, logWriter io.Writer, forceReconfigure bool, branch string, skipRepoUpdate bool, skipResourceChecks bool, clusterSecretOverride string) *ProductionSetup {
debrosDir := debrosHome + "/.debros"
func NewProductionSetup(oramaHome string, logWriter io.Writer, forceReconfigure bool, branch string, skipRepoUpdate bool, skipResourceChecks bool) *ProductionSetup {
oramaDir := oramaHome + "/.orama"
arch, _ := (&ArchitectureDetector{}).Detect()
normalizedSecret := strings.TrimSpace(strings.ToLower(clusterSecretOverride))
// If branch is empty, try to read from stored preference, otherwise default to main
if branch == "" {
branch = ReadBranchPreference(debrosDir)
branch = ReadBranchPreference(oramaDir)
}
return &ProductionSetup{
debrosHome: debrosHome,
debrosDir: debrosDir,
logWriter: logWriter,
forceReconfigure: forceReconfigure,
arch: arch,
branch: branch,
skipRepoUpdate: skipRepoUpdate,
skipResourceChecks: skipResourceChecks,
clusterSecretOverride: normalizedSecret,
privChecker: &PrivilegeChecker{},
osDetector: &OSDetector{},
archDetector: &ArchitectureDetector{},
resourceChecker: NewResourceChecker(),
portChecker: NewPortChecker(),
fsProvisioner: NewFilesystemProvisioner(debrosHome),
userProvisioner: NewUserProvisioner("debros", debrosHome, "/bin/bash"),
stateDetector: NewStateDetector(debrosDir),
configGenerator: NewConfigGenerator(debrosDir),
secretGenerator: NewSecretGenerator(debrosDir, normalizedSecret),
serviceGenerator: NewSystemdServiceGenerator(debrosHome, debrosDir),
serviceController: NewSystemdController(),
binaryInstaller: NewBinaryInstaller(arch, logWriter),
oramaHome: oramaHome,
oramaDir: oramaDir,
logWriter: logWriter,
forceReconfigure: forceReconfigure,
arch: arch,
branch: branch,
skipRepoUpdate: skipRepoUpdate,
skipResourceChecks: skipResourceChecks,
privChecker: &PrivilegeChecker{},
osDetector: &OSDetector{},
archDetector: &ArchitectureDetector{},
resourceChecker: NewResourceChecker(),
portChecker: NewPortChecker(),
fsProvisioner: NewFilesystemProvisioner(oramaHome),
userProvisioner: NewUserProvisioner("debros", oramaHome, "/bin/bash"),
stateDetector: NewStateDetector(oramaDir),
configGenerator: NewConfigGenerator(oramaDir),
secretGenerator: NewSecretGenerator(oramaDir),
serviceGenerator: NewSystemdServiceGenerator(oramaHome, oramaDir),
serviceController: NewSystemdController(),
binaryInstaller: NewBinaryInstaller(arch, logWriter),
}
}
@ -168,7 +165,7 @@ func (ps *ProductionSetup) Phase1CheckPrerequisites() error {
if ps.skipResourceChecks {
ps.logf(" ⚠️ Skipping system resource checks (disk, RAM, CPU) due to --ignore-resource-checks flag")
} else {
if err := ps.resourceChecker.CheckDiskSpace(ps.debrosHome); err != nil {
if err := ps.resourceChecker.CheckDiskSpace(ps.oramaHome); err != nil {
ps.logf(" ❌ %v", err)
return err
}
@ -214,8 +211,8 @@ func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
}
}
// Create directory structure (base directories only - node-specific dirs created in Phase2c)
if err := ps.fsProvisioner.EnsureDirectoryStructure(""); err != nil {
// Create directory structure (unified structure)
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
return fmt.Errorf("failed to create directory structure: %w", err)
}
ps.logf(" ✓ Directory structure created")
@ -266,7 +263,7 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
}
// Install DeBros binaries
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.debrosHome, ps.skipRepoUpdate); err != nil {
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.oramaHome, ps.skipRepoUpdate); err != nil {
return fmt.Errorf("failed to install DeBros binaries: %w", err)
}
@ -275,21 +272,21 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
}
// Phase2cInitializeServices initializes service repositories and configurations
func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string, bootstrapPeers []string, vpsIP string) error {
func (ps *ProductionSetup) Phase2cInitializeServices(bootstrapPeers []string, vpsIP string) error {
ps.logf("Phase 2c: Initializing services...")
// Ensure node-specific directories exist
if err := ps.fsProvisioner.EnsureDirectoryStructure(nodeType); err != nil {
return fmt.Errorf("failed to create node-specific directories: %w", err)
// Ensure directories exist (unified structure)
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
return fmt.Errorf("failed to create directories: %w", err)
}
// Build paths with nodeType awareness to match systemd unit definitions
dataDir := filepath.Join(ps.debrosDir, "data", nodeType)
// Build paths - unified data directory (no bootstrap/node distinction)
dataDir := filepath.Join(ps.oramaDir, "data")
// Initialize IPFS repo with correct path structure
// Use port 4501 for API (to avoid conflict with RQLite on 5001), 8080 for gateway (standard), 4101 for swarm (to avoid conflict with LibP2P on 4001)
ipfsRepoPath := filepath.Join(dataDir, "ipfs", "repo")
if err := ps.binaryInstaller.InitializeIPFSRepo(nodeType, ipfsRepoPath, filepath.Join(ps.debrosDir, "secrets", "swarm.key"), 4501, 8080, 4101); err != nil {
if err := ps.binaryInstaller.InitializeIPFSRepo(ipfsRepoPath, filepath.Join(ps.oramaDir, "secrets", "swarm.key"), 4501, 8080, 4101); err != nil {
return fmt.Errorf("failed to initialize IPFS repo: %w", err)
}
@ -300,39 +297,23 @@ func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string, bootstrapP
return fmt.Errorf("failed to get cluster secret: %w", err)
}
// Get bootstrap cluster peer addresses for non-bootstrap nodes
var bootstrapClusterPeers []string
if nodeType != "bootstrap" && len(bootstrapPeers) > 0 {
// Try to read bootstrap cluster peer ID and construct multiaddress
bootstrapClusterPath := filepath.Join(ps.debrosDir, "data", "bootstrap", "ipfs-cluster")
// Infer bootstrap IP from bootstrap peers
// Get cluster peer addresses from bootstrap peers if available
var clusterPeers []string
if len(bootstrapPeers) > 0 {
// Infer IP from bootstrap peers
bootstrapIP := inferBootstrapIP(bootstrapPeers, vpsIP)
if bootstrapIP != "" {
// Check if bootstrap cluster identity exists
if _, err := os.Stat(filepath.Join(bootstrapClusterPath, "identity.json")); err == nil {
// Bootstrap cluster is initialized, get its multiaddress
if clusterMultiaddr, err := ps.binaryInstaller.GetClusterPeerMultiaddr(bootstrapClusterPath, bootstrapIP); err == nil {
bootstrapClusterPeers = []string{clusterMultiaddr}
ps.logf(" Configured IPFS Cluster to connect to bootstrap: %s", clusterMultiaddr)
} else {
ps.logf(" ⚠️ Could not read bootstrap cluster peer ID: %v", err)
ps.logf(" ⚠️ IPFS Cluster will rely on mDNS discovery (may not work across internet)")
}
} else {
ps.logf(" Bootstrap cluster not yet initialized, peer_addresses will be empty")
ps.logf(" IPFS Cluster will rely on mDNS discovery (may not work across internet)")
}
ps.logf(" Will attempt to connect to cluster peers at %s", bootstrapIP)
}
}
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(nodeType, clusterPath, clusterSecret, 4501, bootstrapClusterPeers); err != nil {
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(clusterPath, clusterSecret, 4501, clusterPeers); err != nil {
return fmt.Errorf("failed to initialize IPFS Cluster: %w", err)
}
// Initialize RQLite data directory
rqliteDataDir := filepath.Join(dataDir, "rqlite")
if err := ps.binaryInstaller.InitializeRQLiteDataDir(nodeType, rqliteDataDir); err != nil {
if err := ps.binaryInstaller.InitializeRQLiteDataDir(rqliteDataDir); err != nil {
ps.logf(" ⚠️ RQLite initialization warning: %v", err)
}
@ -347,7 +328,7 @@ func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string, bootstrapP
}
// Phase3GenerateSecrets generates shared secrets and keys
func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
func (ps *ProductionSetup) Phase3GenerateSecrets() error {
ps.logf("Phase 3: Generating secrets...")
// Cluster secret
@ -362,13 +343,8 @@ func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
}
ps.logf(" ✓ IPFS swarm key ensured")
// Node identity
nodeType := "node"
if isBootstrap {
nodeType = "bootstrap"
}
peerID, err := ps.secretGenerator.EnsureNodeIdentity(nodeType)
// Node identity (unified - no bootstrap/node distinction)
peerID, err := ps.secretGenerator.EnsureNodeIdentity()
if err != nil {
return fmt.Errorf("failed to ensure node identity: %w", err)
}
@ -380,7 +356,7 @@ func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
}
// Phase4GenerateConfigs generates node, gateway, and service configs
func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeers []string, vpsIP string, enableHTTPS bool, domain string, bootstrapJoin string) error {
func (ps *ProductionSetup) Phase4GenerateConfigs(bootstrapPeers []string, vpsIP string, enableHTTPS bool, domain string, joinAddress string) error {
if ps.IsUpdate() {
ps.logf("Phase 4: Updating configurations...")
ps.logf(" (Existing configs will be updated to latest format)")
@ -388,51 +364,34 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeer
ps.logf("Phase 4: Generating configurations...")
}
// Node config
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(isBootstrap, bootstrapPeers, vpsIP, bootstrapJoin)
// Node config (unified - no bootstrap/node distinction)
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(bootstrapPeers, vpsIP, joinAddress, domain)
if err != nil {
return fmt.Errorf("failed to generate node config: %w", err)
}
var configFile string
if isBootstrap {
configFile = "bootstrap.yaml"
} else {
configFile = "node.yaml"
}
configFile := "node.yaml"
if err := ps.secretGenerator.SaveConfig(configFile, nodeConfig); err != nil {
return fmt.Errorf("failed to save node config: %w", err)
}
ps.logf(" ✓ Node config generated: %s", configFile)
// Determine Olric servers for gateway config
// Olric will bind to 0.0.0.0 (all interfaces) but gateway needs specific addresses
// Olric binds to localhost, gateway connects locally
var olricServers []string
if isBootstrap {
// Bootstrap node: gateway should connect to vpsIP if provided, otherwise localhost
if vpsIP != "" {
olricServers = []string{net.JoinHostPort(vpsIP, "3320")}
} else {
olricServers = []string{"127.0.0.1:3320"}
}
// Start with local Olric server
if vpsIP != "" {
olricServers = []string{net.JoinHostPort(vpsIP, "3320")}
} else {
// Non-bootstrap node: include bootstrap server and local server
olricServers = []string{"127.0.0.1:3320"} // Default to localhost for single-node
if len(bootstrapPeers) > 0 {
// Try to infer Olric servers from bootstrap peers
bootstrapIP := inferBootstrapIP(bootstrapPeers, vpsIP)
if bootstrapIP != "" {
// Add bootstrap Olric server (use net.JoinHostPort for IPv6 support)
olricServers = []string{net.JoinHostPort(bootstrapIP, "3320")}
// Add local Olric server too
if vpsIP != "" {
olricServers = append(olricServers, net.JoinHostPort(vpsIP, "3320"))
} else {
olricServers = append(olricServers, "127.0.0.1:3320")
}
}
olricServers = []string{"127.0.0.1:3320"}
}
// If joining existing cluster, also include peer Olric servers
if len(bootstrapPeers) > 0 {
peerIP := inferBootstrapIP(bootstrapPeers, "")
if peerIP != "" && peerIP != vpsIP {
olricServers = append(olricServers, net.JoinHostPort(peerIP, "3320"))
}
}
@ -446,19 +405,16 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeer
}
ps.logf(" ✓ Gateway config generated")
// Olric config - bind to vpsIP if provided, otherwise all interfaces
// Gateway will connect using the specific address from olricServers list above
olricBindAddr := vpsIP
if olricBindAddr == "" {
olricBindAddr = "0.0.0.0"
}
// Olric config - bind to localhost for security
// External access goes through the HTTP gateway
olricBindAddr := "127.0.0.1"
olricConfig, err := ps.configGenerator.GenerateOlricConfig(olricBindAddr, 3320, 3322)
if err != nil {
return fmt.Errorf("failed to generate olric config: %w", err)
}
// Create olric config directory
olricConfigDir := ps.debrosDir + "/configs/olric"
olricConfigDir := ps.oramaDir + "/configs/olric"
if err := os.MkdirAll(olricConfigDir, 0755); err != nil {
return fmt.Errorf("failed to create olric config directory: %w", err)
}
@ -474,7 +430,7 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeer
}
// Phase5CreateSystemdServices creates and enables systemd units
func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP string) error {
func (ps *ProductionSetup) Phase5CreateSystemdServices() error {
ps.logf("Phase 5: Creating systemd services...")
// Validate all required binaries are available before creating services
@ -492,21 +448,19 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
return fmt.Errorf("olric-server binary not available: %w", err)
}
// IPFS service
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(nodeType, ipfsBinary)
unitName := fmt.Sprintf("debros-ipfs-%s.service", nodeType)
if err := ps.serviceController.WriteServiceUnit(unitName, ipfsUnit); err != nil {
// IPFS service (unified - no bootstrap/node distinction)
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(ipfsBinary)
if err := ps.serviceController.WriteServiceUnit("debros-ipfs.service", ipfsUnit); err != nil {
return fmt.Errorf("failed to write IPFS service: %w", err)
}
ps.logf(" ✓ IPFS service created: %s", unitName)
ps.logf(" ✓ IPFS service created: debros-ipfs.service")
// IPFS Cluster service
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(nodeType, clusterBinary)
clusterUnitName := fmt.Sprintf("debros-ipfs-cluster-%s.service", nodeType)
if err := ps.serviceController.WriteServiceUnit(clusterUnitName, clusterUnit); err != nil {
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(clusterBinary)
if err := ps.serviceController.WriteServiceUnit("debros-ipfs-cluster.service", clusterUnit); err != nil {
return fmt.Errorf("failed to write IPFS Cluster service: %w", err)
}
ps.logf(" ✓ IPFS Cluster service created: %s", clusterUnitName)
ps.logf(" ✓ IPFS Cluster service created: debros-ipfs-cluster.service")
// Note: RQLite is managed internally by the node process, not as a separate systemd service
ps.logf(" RQLite will be managed by the node process")
@ -518,16 +472,15 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
}
ps.logf(" ✓ Olric service created")
// Node service
nodeUnit := ps.serviceGenerator.GenerateNodeService(nodeType)
nodeUnitName := fmt.Sprintf("debros-node-%s.service", nodeType)
if err := ps.serviceController.WriteServiceUnit(nodeUnitName, nodeUnit); err != nil {
// Node service (unified)
nodeUnit := ps.serviceGenerator.GenerateNodeService()
if err := ps.serviceController.WriteServiceUnit("debros-node.service", nodeUnit); err != nil {
return fmt.Errorf("failed to write Node service: %w", err)
}
ps.logf(" ✓ Node service created: %s", nodeUnitName)
ps.logf(" ✓ Node service created: debros-node.service")
// Gateway service (optional, only on specific nodes)
gatewayUnit := ps.serviceGenerator.GenerateGatewayService(nodeType)
// Gateway service
gatewayUnit := ps.serviceGenerator.GenerateGatewayService()
if err := ps.serviceController.WriteServiceUnit("debros-gateway.service", gatewayUnit); err != nil {
return fmt.Errorf("failed to write Gateway service: %w", err)
}
@ -546,8 +499,8 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
}
ps.logf(" ✓ Systemd daemon reloaded")
// Enable services (RQLite is managed by node, not as separate service)
services := []string{unitName, clusterUnitName, "debros-olric.service", nodeUnitName, "debros-gateway.service", "debros-anyone-client.service"}
// Enable services (unified names - no bootstrap/node distinction)
services := []string{"debros-ipfs.service", "debros-ipfs-cluster.service", "debros-olric.service", "debros-node.service", "debros-gateway.service", "debros-anyone-client.service"}
for _, svc := range services {
if err := ps.serviceController.EnableService(svc); err != nil {
ps.logf(" ⚠️ Failed to enable %s: %v", svc, err)
@ -560,7 +513,7 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
ps.logf(" Starting services...")
// Start infrastructure first (IPFS, Olric, Anyone Client) - RQLite is managed by node
infraServices := []string{unitName, "debros-olric.service"}
infraServices := []string{"debros-ipfs.service", "debros-olric.service"}
// Check if port 9050 is already in use (e.g., another anyone-client or similar service)
if ps.portChecker.IsPortInUse(9050) {
@ -582,14 +535,14 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string, vpsIP st
exec.Command("sleep", "2").Run()
// Start IPFS Cluster
if err := ps.serviceController.StartService(clusterUnitName); err != nil {
ps.logf(" ⚠️ Failed to start %s: %v", clusterUnitName, err)
if err := ps.serviceController.StartService("debros-ipfs-cluster.service"); err != nil {
ps.logf(" ⚠️ Failed to start debros-ipfs-cluster.service: %v", err)
} else {
ps.logf(" - %s started", clusterUnitName)
ps.logf(" - debros-ipfs-cluster.service started")
}
// Start application services
appServices := []string{nodeUnitName, "debros-gateway.service"}
appServices := []string{"debros-node.service", "debros-gateway.service"}
for _, svc := range appServices {
if err := ps.serviceController.StartService(svc); err != nil {
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
@ -609,19 +562,19 @@ func (ps *ProductionSetup) LogSetupComplete(peerID string) {
ps.logf(strings.Repeat("=", 70))
ps.logf("\nNode Peer ID: %s", peerID)
ps.logf("\nService Management:")
ps.logf(" systemctl status debros-ipfs-bootstrap")
ps.logf(" journalctl -u debros-node-bootstrap -f")
ps.logf(" tail -f %s/logs/node-bootstrap.log", ps.debrosDir)
ps.logf(" systemctl status debros-ipfs")
ps.logf(" journalctl -u debros-node -f")
ps.logf(" tail -f %s/logs/node.log", ps.oramaDir)
ps.logf("\nLog Files:")
ps.logf(" %s/logs/ipfs-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/ipfs-cluster-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/rqlite-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/olric.log", ps.debrosDir)
ps.logf(" %s/logs/node-bootstrap.log", ps.debrosDir)
ps.logf(" %s/logs/gateway.log", ps.debrosDir)
ps.logf(" %s/logs/anyone-client.log", ps.debrosDir)
ps.logf(" %s/logs/ipfs.log", ps.oramaDir)
ps.logf(" %s/logs/ipfs-cluster.log", ps.oramaDir)
ps.logf(" %s/logs/rqlite.log", ps.oramaDir)
ps.logf(" %s/logs/olric.log", ps.oramaDir)
ps.logf(" %s/logs/node.log", ps.oramaDir)
ps.logf(" %s/logs/gateway.log", ps.oramaDir)
ps.logf(" %s/logs/anyone-client.log", ps.oramaDir)
ps.logf("\nStart All Services:")
ps.logf(" systemctl start debros-ipfs-bootstrap debros-ipfs-cluster-bootstrap debros-olric debros-anyone-client debros-node-bootstrap debros-gateway")
ps.logf(" systemctl start debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-client debros-node debros-gateway")
ps.logf("\nVerify Installation:")
ps.logf(" curl http://localhost:6001/health")
ps.logf(" curl http://localhost:5001/status")

View File

@ -10,42 +10,35 @@ import (
// FilesystemProvisioner manages directory creation and permissions
type FilesystemProvisioner struct {
debrosHome string
debrosDir string
oramaHome string
oramaDir string
logWriter interface{} // Can be io.Writer for logging
}
// NewFilesystemProvisioner creates a new provisioner
func NewFilesystemProvisioner(debrosHome string) *FilesystemProvisioner {
func NewFilesystemProvisioner(oramaHome string) *FilesystemProvisioner {
return &FilesystemProvisioner{
debrosHome: debrosHome,
debrosDir: filepath.Join(debrosHome, ".debros"),
oramaHome: oramaHome,
oramaDir: filepath.Join(oramaHome, ".orama"),
}
}
// EnsureDirectoryStructure creates all required directories
// nodeType can be "bootstrap", "node", or "" (empty string means create base directories only)
func (fp *FilesystemProvisioner) EnsureDirectoryStructure(nodeType string) error {
// Base directories that are always needed
// EnsureDirectoryStructure creates all required directories (unified structure)
func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
// All directories needed for unified node structure
dirs := []string{
fp.debrosDir,
filepath.Join(fp.debrosDir, "configs"),
filepath.Join(fp.debrosDir, "secrets"),
filepath.Join(fp.debrosDir, "data"),
filepath.Join(fp.debrosDir, "logs"),
filepath.Join(fp.debrosDir, "tls-cache"),
filepath.Join(fp.debrosDir, "backups"),
filepath.Join(fp.debrosHome, "bin"),
filepath.Join(fp.debrosHome, "src"),
}
// Only create directories for the requested node type
if nodeType == "bootstrap" || nodeType == "node" {
dirs = append(dirs,
filepath.Join(fp.debrosDir, "data", nodeType, "ipfs", "repo"),
filepath.Join(fp.debrosDir, "data", nodeType, "ipfs-cluster"),
filepath.Join(fp.debrosDir, "data", nodeType, "rqlite"),
)
fp.oramaDir,
filepath.Join(fp.oramaDir, "configs"),
filepath.Join(fp.oramaDir, "secrets"),
filepath.Join(fp.oramaDir, "data"),
filepath.Join(fp.oramaDir, "data", "ipfs", "repo"),
filepath.Join(fp.oramaDir, "data", "ipfs-cluster"),
filepath.Join(fp.oramaDir, "data", "rqlite"),
filepath.Join(fp.oramaDir, "logs"),
filepath.Join(fp.oramaDir, "tls-cache"),
filepath.Join(fp.oramaDir, "backups"),
filepath.Join(fp.oramaHome, "bin"),
filepath.Join(fp.oramaHome, "src"),
}
for _, dir := range dirs {
@ -55,26 +48,15 @@ func (fp *FilesystemProvisioner) EnsureDirectoryStructure(nodeType string) error
}
// Create log files with correct permissions so systemd can write to them
// Only create logs for the specific nodeType being installed
logsDir := filepath.Join(fp.debrosDir, "logs")
logsDir := filepath.Join(fp.oramaDir, "logs")
logFiles := []string{
"olric.log",
"gateway.log",
}
// Add node-type-specific log files only if nodeType is specified
if nodeType == "bootstrap" {
logFiles = append(logFiles,
"ipfs-bootstrap.log",
"ipfs-cluster-bootstrap.log",
"node-bootstrap.log",
)
} else if nodeType == "node" {
logFiles = append(logFiles,
"ipfs-node.log",
"ipfs-cluster-node.log",
"node-node.log",
)
"ipfs.log",
"ipfs-cluster.log",
"node.log",
"rqlite.log",
"anyone-client.log",
}
for _, logFile := range logFiles {
@ -90,22 +72,22 @@ func (fp *FilesystemProvisioner) EnsureDirectoryStructure(nodeType string) error
return nil
}
// FixOwnership changes ownership of .debros directory to debros user
// FixOwnership changes ownership of .orama directory to debros user
func (fp *FilesystemProvisioner) FixOwnership() error {
// Fix entire .debros directory recursively (includes all data, configs, logs, etc.)
cmd := exec.Command("chown", "-R", "debros:debros", fp.debrosDir)
// Fix entire .orama directory recursively (includes all data, configs, logs, etc.)
cmd := exec.Command("chown", "-R", "debros:debros", fp.oramaDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.debrosDir, err, string(output))
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaDir, err, string(output))
}
// Also fix home directory ownership
cmd = exec.Command("chown", "debros:debros", fp.debrosHome)
cmd = exec.Command("chown", "debros:debros", fp.oramaHome)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.debrosHome, err, string(output))
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaHome, err, string(output))
}
// Fix bin directory
binDir := filepath.Join(fp.debrosHome, "bin")
binDir := filepath.Join(fp.oramaHome, "bin")
cmd = exec.Command("chown", "-R", "debros:debros", binDir)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", binDir, err, string(output))
@ -186,20 +168,20 @@ func (up *UserProvisioner) SetupSudoersAccess(invokerUser string) error {
// StateDetector checks for existing production state
type StateDetector struct {
debrosDir string
oramaDir string
}
// NewStateDetector creates a state detector
func NewStateDetector(debrosDir string) *StateDetector {
func NewStateDetector(oramaDir string) *StateDetector {
return &StateDetector{
debrosDir: debrosDir,
oramaDir: oramaDir,
}
}
// IsConfigured checks if basic configs exist
func (sd *StateDetector) IsConfigured() bool {
nodeConfig := filepath.Join(sd.debrosDir, "configs", "node.yaml")
gatewayConfig := filepath.Join(sd.debrosDir, "configs", "gateway.yaml")
nodeConfig := filepath.Join(sd.oramaDir, "configs", "node.yaml")
gatewayConfig := filepath.Join(sd.oramaDir, "configs", "gateway.yaml")
_, err1 := os.Stat(nodeConfig)
_, err2 := os.Stat(gatewayConfig)
return err1 == nil || err2 == nil
@ -207,24 +189,36 @@ func (sd *StateDetector) IsConfigured() bool {
// HasSecrets checks if cluster secret and swarm key exist
func (sd *StateDetector) HasSecrets() bool {
clusterSecret := filepath.Join(sd.debrosDir, "secrets", "cluster-secret")
swarmKey := filepath.Join(sd.debrosDir, "secrets", "swarm.key")
clusterSecret := filepath.Join(sd.oramaDir, "secrets", "cluster-secret")
swarmKey := filepath.Join(sd.oramaDir, "secrets", "swarm.key")
_, err1 := os.Stat(clusterSecret)
_, err2 := os.Stat(swarmKey)
return err1 == nil && err2 == nil
}
// HasIPFSData checks if IPFS repo is initialized
// HasIPFSData checks if IPFS repo is initialized (unified path)
func (sd *StateDetector) HasIPFSData() bool {
ipfsRepoPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "ipfs", "repo", "config")
_, err := os.Stat(ipfsRepoPath)
// Check unified path first
ipfsRepoPath := filepath.Join(sd.oramaDir, "data", "ipfs", "repo", "config")
if _, err := os.Stat(ipfsRepoPath); err == nil {
return true
}
// Fallback: check legacy bootstrap path for migration
legacyPath := filepath.Join(sd.oramaDir, "data", "bootstrap", "ipfs", "repo", "config")
_, err := os.Stat(legacyPath)
return err == nil
}
// HasRQLiteData checks if RQLite data exists
// HasRQLiteData checks if RQLite data exists (unified path)
func (sd *StateDetector) HasRQLiteData() bool {
rqliteDataPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "rqlite")
info, err := os.Stat(rqliteDataPath)
// Check unified path first
rqliteDataPath := filepath.Join(sd.oramaDir, "data", "rqlite")
if info, err := os.Stat(rqliteDataPath); err == nil && info.IsDir() {
return true
}
// Fallback: check legacy bootstrap path for migration
legacyPath := filepath.Join(sd.oramaDir, "data", "bootstrap", "rqlite")
info, err := os.Stat(legacyPath)
return err == nil && info.IsDir()
}

View File

@ -10,31 +10,25 @@ import (
// SystemdServiceGenerator generates systemd unit files
type SystemdServiceGenerator struct {
debrosHome string
debrosDir string
oramaHome string
oramaDir string
}
// NewSystemdServiceGenerator creates a new service generator
func NewSystemdServiceGenerator(debrosHome, debrosDir string) *SystemdServiceGenerator {
func NewSystemdServiceGenerator(oramaHome, oramaDir string) *SystemdServiceGenerator {
return &SystemdServiceGenerator{
debrosHome: debrosHome,
debrosDir: debrosDir,
oramaHome: oramaHome,
oramaDir: oramaDir,
}
}
// GenerateIPFSService generates the IPFS daemon systemd unit
func (ssg *SystemdServiceGenerator) GenerateIPFSService(nodeType string, ipfsBinary string) string {
var ipfsRepoPath string
if nodeType == "bootstrap" {
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs", "repo")
} else {
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs", "repo")
}
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("ipfs-%s.log", nodeType))
func (ssg *SystemdServiceGenerator) GenerateIPFSService(ipfsBinary string) string {
ipfsRepoPath := filepath.Join(ssg.oramaDir, "data", "ipfs", "repo")
logFile := filepath.Join(ssg.oramaDir, "logs", "ipfs.log")
return fmt.Sprintf(`[Unit]
Description=IPFS Daemon (%[1]s)
Description=IPFS Daemon
After=network-online.target
Wants=network-online.target
@ -42,15 +36,64 @@ Wants=network-online.target
Type=simple
User=debros
Group=debros
Environment=HOME=%[2]s
Environment=IPFS_PATH=%[3]s
ExecStartPre=/bin/bash -c 'if [ -f %[4]s/secrets/swarm.key ] && [ ! -f %[3]s/swarm.key ]; then cp %[4]s/secrets/swarm.key %[3]s/swarm.key && chmod 600 %[3]s/swarm.key; fi'
ExecStart=%[6]s daemon --enable-pubsub-experiment --repo-dir=%[3]s
Environment=HOME=%[1]s
Environment=IPFS_PATH=%[2]s
ExecStartPre=/bin/bash -c 'if [ -f %[3]s/secrets/swarm.key ] && [ ! -f %[2]s/swarm.key ]; then cp %[3]s/secrets/swarm.key %[2]s/swarm.key && chmod 600 %[2]s/swarm.key; fi'
ExecStart=%[5]s daemon --enable-pubsub-experiment --repo-dir=%[2]s
Restart=always
RestartSec=5
StandardOutput=file:%[5]s
StandardError=file:%[5]s
SyslogIdentifier=ipfs-%[1]s
StandardOutput=file:%[4]s
StandardError=file:%[4]s
SyslogIdentifier=debros-ipfs
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[3]s
[Install]
WantedBy=multi-user.target
`, ssg.oramaHome, ipfsRepoPath, ssg.oramaDir, logFile, ipfsBinary)
}
// GenerateIPFSClusterService generates the IPFS Cluster systemd unit
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(clusterBinary string) string {
clusterPath := filepath.Join(ssg.oramaDir, "data", "ipfs-cluster")
logFile := filepath.Join(ssg.oramaDir, "logs", "ipfs-cluster.log")
// Read cluster secret from file to pass to daemon
clusterSecretPath := filepath.Join(ssg.oramaDir, "secrets", "cluster-secret")
clusterSecret := ""
if data, err := os.ReadFile(clusterSecretPath); err == nil {
clusterSecret = strings.TrimSpace(string(data))
}
// Escape the secret for use in bash command (escape single quotes and backslashes)
escapedSecret := strings.ReplaceAll(clusterSecret, "'", "'\"'\"'")
escapedSecret = strings.ReplaceAll(escapedSecret, "\\", "\\\\")
_ = escapedSecret // Used in ExecStartPre
return fmt.Sprintf(`[Unit]
Description=IPFS Cluster Service
After=debros-ipfs.service
Wants=debros-ipfs.service
Requires=debros-ipfs.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%[1]s
Environment=HOME=%[1]s
Environment=IPFS_CLUSTER_PATH=%[2]s
Environment=CLUSTER_SECRET=%[6]s
ExecStartPre=/bin/bash -c 'if [ -f %[7]s ] && [ -f %[2]s/service.json ]; then SECRET=$(cat %[7]s | tr -d "[:space:]"); python3 -c "import json, sys; f=open(\"%[2]s/service.json\", \"r\"); d=json.load(f); f.close(); d.setdefault(\"cluster\", {})[\"secret\"]=\"$SECRET\"; f=open(\"%[2]s/service.json\", \"w\"); json.dump(d, f, indent=2); f.close()" 2>/dev/null || sed -i "s|\"secret\"[[:space:]]*:[[:space:]]*\"[^\"]*\"|\"secret\": \"$SECRET\"|" %[2]s/service.json; fi'
ExecStart=%[5]s daemon
Restart=always
RestartSec=5
StandardOutput=file:%[3]s
StandardError=file:%[3]s
SyslogIdentifier=debros-ipfs-cluster
NoNewPrivileges=yes
PrivateTmp=yes
@ -59,66 +102,22 @@ ReadWritePaths=%[4]s
[Install]
WantedBy=multi-user.target
`, nodeType, ssg.debrosHome, ipfsRepoPath, ssg.debrosDir, logFile, ipfsBinary)
}
// GenerateIPFSClusterService generates the IPFS Cluster systemd unit
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(nodeType string, clusterBinary string) string {
var clusterPath string
if nodeType == "bootstrap" {
clusterPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs-cluster")
} else {
clusterPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs-cluster")
}
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", nodeType))
return fmt.Sprintf(`[Unit]
Description=IPFS Cluster Service (%[1]s)
After=debros-ipfs-%[1]s.service
Wants=debros-ipfs-%[1]s.service
Requires=debros-ipfs-%[1]s.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%[2]s
Environment=HOME=%[2]s
Environment=IPFS_CLUSTER_PATH=%[3]s
ExecStart=%[6]s daemon
Restart=always
RestartSec=5
StandardOutput=file:%[4]s
StandardError=file:%[4]s
SyslogIdentifier=ipfs-cluster-%[1]s
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[5]s
[Install]
WantedBy=multi-user.target
`, nodeType, ssg.debrosHome, clusterPath, logFile, ssg.debrosDir, clusterBinary)
`, ssg.oramaHome, clusterPath, logFile, ssg.oramaDir, clusterBinary, clusterSecret, clusterSecretPath)
}
// GenerateRQLiteService generates the RQLite systemd unit
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(nodeType string, rqliteBinary string, httpPort, raftPort int, joinAddr string, advertiseIP string) string {
var dataDir string
if nodeType == "bootstrap" {
dataDir = filepath.Join(ssg.debrosDir, "data", "bootstrap", "rqlite")
} else {
dataDir = filepath.Join(ssg.debrosDir, "data", "node", "rqlite")
}
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(rqliteBinary string, httpPort, raftPort int, joinAddr string, advertiseIP string) string {
dataDir := filepath.Join(ssg.oramaDir, "data", "rqlite")
logFile := filepath.Join(ssg.oramaDir, "logs", "rqlite.log")
// Use public IP for advertise if provided, otherwise default to localhost
if advertiseIP == "" {
advertiseIP = "127.0.0.1"
}
// Bind RQLite to localhost only - external access via SNI gateway
args := fmt.Sprintf(
`-http-addr 0.0.0.0:%d -http-adv-addr %s:%d -raft-adv-addr %s:%d -raft-addr 0.0.0.0:%d`,
`-http-addr 127.0.0.1:%d -http-adv-addr %s:%d -raft-adv-addr %s:%d -raft-addr 127.0.0.1:%d`,
httpPort, advertiseIP, httpPort, advertiseIP, raftPort, raftPort,
)
@ -128,10 +127,8 @@ func (ssg *SystemdServiceGenerator) GenerateRQLiteService(nodeType string, rqlit
args += fmt.Sprintf(` %s`, dataDir)
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("rqlite-%s.log", nodeType))
return fmt.Sprintf(`[Unit]
Description=RQLite Database (%[1]s)
Description=RQLite Database
After=network-online.target
Wants=network-online.target
@ -139,28 +136,28 @@ Wants=network-online.target
Type=simple
User=debros
Group=debros
Environment=HOME=%[2]s
ExecStart=%[6]s %[3]s
Environment=HOME=%[1]s
ExecStart=%[5]s %[2]s
Restart=always
RestartSec=5
StandardOutput=file:%[4]s
StandardError=file:%[4]s
SyslogIdentifier=rqlite-%[1]s
StandardOutput=file:%[3]s
StandardError=file:%[3]s
SyslogIdentifier=debros-rqlite
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%[5]s
ReadWritePaths=%[4]s
[Install]
WantedBy=multi-user.target
`, nodeType, ssg.debrosHome, args, logFile, ssg.debrosDir, rqliteBinary)
`, ssg.oramaHome, args, logFile, ssg.oramaDir, rqliteBinary)
}
// GenerateOlricService generates the Olric systemd unit
func (ssg *SystemdServiceGenerator) GenerateOlricService(olricBinary string) string {
olricConfigPath := filepath.Join(ssg.debrosDir, "configs", "olric", "config.yaml")
logFile := filepath.Join(ssg.debrosDir, "logs", "olric.log")
olricConfigPath := filepath.Join(ssg.oramaDir, "configs", "olric", "config.yaml")
logFile := filepath.Join(ssg.oramaDir, "logs", "olric.log")
return fmt.Sprintf(`[Unit]
Description=Olric Cache Server
@ -187,70 +184,62 @@ ReadWritePaths=%[4]s
[Install]
WantedBy=multi-user.target
`, ssg.debrosHome, olricConfigPath, logFile, ssg.debrosDir, olricBinary)
`, ssg.oramaHome, olricConfigPath, logFile, ssg.oramaDir, olricBinary)
}
// GenerateNodeService generates the DeBros Node systemd unit
func (ssg *SystemdServiceGenerator) GenerateNodeService(nodeType string) string {
var configFile string
if nodeType == "bootstrap" {
configFile = "bootstrap.yaml"
} else {
configFile = "node.yaml"
}
logFile := filepath.Join(ssg.debrosDir, "logs", fmt.Sprintf("node-%s.log", nodeType))
func (ssg *SystemdServiceGenerator) GenerateNodeService() string {
configFile := "node.yaml"
logFile := filepath.Join(ssg.oramaDir, "logs", "node.log")
return fmt.Sprintf(`[Unit]
Description=DeBros Network Node (%s)
After=debros-ipfs-cluster-%s.service
Wants=debros-ipfs-cluster-%s.service
Requires=debros-ipfs-cluster-%s.service
Description=DeBros Network Node
After=debros-ipfs-cluster.service
Wants=debros-ipfs-cluster.service
Requires=debros-ipfs-cluster.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%s
Environment=HOME=%s
ExecStart=%s/bin/node --config %s/configs/%s
WorkingDirectory=%[1]s
Environment=HOME=%[1]s
ExecStart=%[1]s/bin/node --config %[2]s/configs/%[3]s
Restart=always
RestartSec=5
StandardOutput=file:%s
StandardError=file:%s
SyslogIdentifier=debros-node-%s
StandardOutput=file:%[4]s
StandardError=file:%[4]s
SyslogIdentifier=debros-node
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%s
ReadWritePaths=%[2]s
[Install]
WantedBy=multi-user.target
`, nodeType, nodeType, nodeType, nodeType, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, configFile, logFile, logFile, nodeType, ssg.debrosDir)
`, ssg.oramaHome, ssg.oramaDir, configFile, logFile)
}
// GenerateGatewayService generates the DeBros Gateway systemd unit
func (ssg *SystemdServiceGenerator) GenerateGatewayService(nodeType string) string {
nodeService := fmt.Sprintf("debros-node-%s.service", nodeType)
olricService := "debros-olric.service"
logFile := filepath.Join(ssg.debrosDir, "logs", "gateway.log")
func (ssg *SystemdServiceGenerator) GenerateGatewayService() string {
logFile := filepath.Join(ssg.oramaDir, "logs", "gateway.log")
return fmt.Sprintf(`[Unit]
Description=DeBros Gateway
After=%s %s
Wants=%s %s
After=debros-node.service debros-olric.service
Wants=debros-node.service debros-olric.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=%s
Environment=HOME=%s
ExecStart=%s/bin/gateway --config %s/data/gateway.yaml
WorkingDirectory=%[1]s
Environment=HOME=%[1]s
ExecStart=%[1]s/bin/gateway --config %[2]s/data/gateway.yaml
Restart=always
RestartSec=5
StandardOutput=file:%s
StandardError=file:%s
StandardOutput=file:%[3]s
StandardError=file:%[3]s
SyslogIdentifier=debros-gateway
AmbientCapabilities=CAP_NET_BIND_SERVICE
@ -259,16 +248,16 @@ CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=%s
ReadWritePaths=%[2]s
[Install]
WantedBy=multi-user.target
`, nodeService, olricService, nodeService, olricService, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, logFile, logFile, ssg.debrosDir)
`, ssg.oramaHome, ssg.oramaDir, logFile)
}
// GenerateAnyoneClientService generates the Anyone Client SOCKS5 proxy systemd unit
func (ssg *SystemdServiceGenerator) GenerateAnyoneClientService() string {
logFile := filepath.Join(ssg.debrosDir, "logs", "anyone-client.log")
logFile := filepath.Join(ssg.oramaDir, "logs", "anyone-client.log")
return fmt.Sprintf(`[Unit]
Description=Anyone Client SOCKS5 Proxy
@ -295,7 +284,7 @@ ReadWritePaths=%[3]s
[Install]
WantedBy=multi-user.target
`, ssg.debrosHome, logFile, ssg.debrosDir)
`, ssg.oramaHome, logFile, ssg.oramaDir)
}
// SystemdController manages systemd service operations

View File

@ -9,23 +9,20 @@ import (
func TestGenerateRQLiteService(t *testing.T) {
tests := []struct {
name string
nodeType string
joinAddr string
advertiseIP string
expectJoinInUnit bool
expectAdvertiseIP string
}{
{
name: "bootstrap with localhost advertise",
nodeType: "bootstrap",
name: "first node with localhost advertise",
joinAddr: "",
advertiseIP: "",
expectJoinInUnit: false,
expectAdvertiseIP: "127.0.0.1",
},
{
name: "bootstrap with public IP advertise",
nodeType: "bootstrap",
name: "first node with public IP advertise",
joinAddr: "",
advertiseIP: "10.0.0.1",
expectJoinInUnit: false,
@ -33,7 +30,6 @@ func TestGenerateRQLiteService(t *testing.T) {
},
{
name: "node joining cluster",
nodeType: "node",
joinAddr: "10.0.0.1:7001",
advertiseIP: "10.0.0.2",
expectJoinInUnit: true,
@ -41,7 +37,6 @@ func TestGenerateRQLiteService(t *testing.T) {
},
{
name: "node with localhost (should still include join)",
nodeType: "node",
joinAddr: "localhost:7001",
advertiseIP: "127.0.0.1",
expectJoinInUnit: true,
@ -52,11 +47,11 @@ func TestGenerateRQLiteService(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ssg := &SystemdServiceGenerator{
debrosHome: "/home/debros",
debrosDir: "/home/debros/.debros",
oramaHome: "/home/debros",
oramaDir: "/home/debros/.orama",
}
unit := ssg.GenerateRQLiteService(tt.nodeType, "/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP)
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP)
// Check advertise IP is present
expectedAdvertise := tt.expectAdvertiseIP + ":5001"
@ -86,21 +81,21 @@ func TestGenerateRQLiteService(t *testing.T) {
// TestGenerateRQLiteServiceArgs verifies the ExecStart command arguments
func TestGenerateRQLiteServiceArgs(t *testing.T) {
ssg := &SystemdServiceGenerator{
debrosHome: "/home/debros",
debrosDir: "/home/debros/.debros",
oramaHome: "/home/debros",
oramaDir: "/home/debros/.orama",
}
unit := ssg.GenerateRQLiteService("node", "/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2")
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2")
// Verify essential flags are present
if !strings.Contains(unit, "-http-addr 0.0.0.0:5001") {
t.Error("missing -http-addr 0.0.0.0:5001")
// Verify essential flags are present (localhost binding for security)
if !strings.Contains(unit, "-http-addr 127.0.0.1:5001") {
t.Error("missing -http-addr 127.0.0.1:5001")
}
if !strings.Contains(unit, "-http-adv-addr 10.0.0.2:5001") {
t.Error("missing -http-adv-addr 10.0.0.2:5001")
}
if !strings.Contains(unit, "-raft-addr 0.0.0.0:7001") {
t.Error("missing -raft-addr 0.0.0.0:7001")
if !strings.Contains(unit, "-raft-addr 127.0.0.1:7001") {
t.Error("missing -raft-addr 127.0.0.1:7001")
}
if !strings.Contains(unit, "-raft-adv-addr 10.0.0.2:7001") {
t.Error("missing -raft-adv-addr 10.0.0.2:7001")

View File

@ -1,43 +0,0 @@
node:
id: "{{.NodeID}}"
type: "bootstrap"
listen_addresses:
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
data_dir: "{{.DataDir}}"
max_connections: 50
database:
data_dir: "{{.DataDir}}/rqlite"
replication_factor: 3
shard_count: 16
max_database_size: 1073741824
backup_interval: "24h"
rqlite_port: {{.RQLiteHTTPPort}}
rqlite_raft_port: {{.RQLiteRaftPort}}
rqlite_join_address: "{{.RQLiteJoinAddress}}"
cluster_sync_interval: "30s"
peer_inactivity_limit: "24h"
min_cluster_size: 3
ipfs:
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
api_url: "http://localhost:{{.IPFSAPIPort}}"
timeout: "60s"
replication_factor: 3
enable_encryption: true
discovery:
bootstrap_peers:
{{range .BootstrapPeers}} - "{{.}}"
{{end}}
discovery_interval: "15s"
bootstrap_port: {{.P2PPort}}
http_adv_address: "{{.HTTPAdvAddress}}"
raft_adv_address: "{{.RaftAdvAddress}}"
node_namespace: "default"
security:
enable_tls: false
logging:
level: "info"
format: "console"

View File

@ -5,6 +5,7 @@ node:
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
data_dir: "{{.DataDir}}"
max_connections: 50
domain: "{{.Domain}}"
database:
data_dir: "{{.DataDir}}/rqlite"
@ -42,3 +43,34 @@ logging:
level: "info"
format: "console"
http_gateway:
enabled: true
listen_addr: ":{{.UnifiedGatewayPort}}"
node_name: "{{.NodeID}}"
routes:
# Node internal services - accessible on unified gateway port
rqlite_http:
path_prefix: "/rqlite/http"
backend_url: "http://localhost:{{.RQLiteHTTPPort}}"
timeout: "30s"
websocket: false
rqlite_raft:
path_prefix: "/rqlite/raft"
backend_url: "http://localhost:{{.RQLiteRaftPort}}"
timeout: "30s"
websocket: false
ipfs_api:
path_prefix: "/ipfs/api"
backend_url: "http://localhost:{{.IPFSAPIPort}}"
timeout: "60s"
websocket: true
ipfs_swarm:
path_prefix: "/ipfs/swarm"
backend_url: "http://localhost:4102"
timeout: "30s"
websocket: false
cluster_api:
path_prefix: "/cluster"
backend_url: "http://localhost:{{.ClusterAPIPort}}"
timeout: "30s"
websocket: false

View File

@ -11,34 +11,21 @@ import (
//go:embed *.yaml *.service
var templatesFS embed.FS
// BootstrapConfigData holds parameters for bootstrap.yaml rendering
type BootstrapConfigData struct {
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int
ClusterAPIPort int
IPFSAPIPort int // Default: 4501
BootstrapPeers []string // List of bootstrap peer multiaddrs
RQLiteJoinAddress string // Optional: join address for secondary bootstraps
HTTPAdvAddress string // Advertised HTTP address (IP:port)
RaftAdvAddress string // Advertised Raft address (IP:port)
}
// NodeConfigData holds parameters for node.yaml rendering
// NodeConfigData holds parameters for node.yaml rendering (unified - no bootstrap/node distinction)
type NodeConfigData struct {
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int
RQLiteJoinAddress string
BootstrapPeers []string
ClusterAPIPort int
IPFSAPIPort int // Default: 4501+
HTTPAdvAddress string // Advertised HTTP address (IP:port)
RaftAdvAddress string // Advertised Raft address (IP:port)
NodeID string
P2PPort int
DataDir string
RQLiteHTTPPort int
RQLiteRaftPort int
RQLiteJoinAddress string // Optional: join address for joining existing cluster
BootstrapPeers []string // List of peer multiaddrs to connect to
ClusterAPIPort int
IPFSAPIPort int // Default: 4501
HTTPAdvAddress string // Advertised HTTP address (IP:port)
RaftAdvAddress string // Advertised Raft address (IP:port)
UnifiedGatewayPort int // Unified gateway port for all node services
Domain string // Domain for this node (e.g., node-123.orama.network)
}
// GatewayConfigData holds parameters for gateway.yaml rendering
@ -63,56 +50,37 @@ type OlricConfigData struct {
// SystemdIPFSData holds parameters for systemd IPFS service rendering
type SystemdIPFSData struct {
NodeType string
HomeDir string
IPFSRepoPath string
SecretsDir string
DebrosDir string
OramaDir string
}
// SystemdIPFSClusterData holds parameters for systemd IPFS Cluster service rendering
type SystemdIPFSClusterData struct {
NodeType string
HomeDir string
ClusterPath string
DebrosDir string
}
// SystemdRQLiteData holds parameters for systemd RQLite service rendering
type SystemdRQLiteData struct {
NodeType string
HomeDir string
HTTPPort int
RaftPort int
DataDir string
JoinAddr string
DebrosDir string
OramaDir string
}
// SystemdOlricData holds parameters for systemd Olric service rendering
type SystemdOlricData struct {
HomeDir string
ConfigPath string
DebrosDir string
OramaDir string
}
// SystemdNodeData holds parameters for systemd Node service rendering
type SystemdNodeData struct {
NodeType string
HomeDir string
ConfigFile string
DebrosDir string
OramaDir string
}
// SystemdGatewayData holds parameters for systemd Gateway service rendering
type SystemdGatewayData struct {
HomeDir string
DebrosDir string
}
// RenderBootstrapConfig renders the bootstrap config template with the given data
func RenderBootstrapConfig(data BootstrapConfigData) (string, error) {
return renderTemplate("bootstrap.yaml", data)
OramaDir string
}
// RenderNodeConfig renders the node config template with the given data
@ -140,11 +108,6 @@ func RenderIPFSClusterService(data SystemdIPFSClusterData) (string, error) {
return renderTemplate("systemd_ipfs_cluster.service", data)
}
// RenderRQLiteService renders the RQLite systemd service template
func RenderRQLiteService(data SystemdRQLiteData) (string, error) {
return renderTemplate("systemd_rqlite.service", data)
}
// RenderOlricService renders the Olric systemd service template
func RenderOlricService(data SystemdOlricData) (string, error) {
return renderTemplate("systemd_olric.service", data)

View File

@ -5,46 +5,12 @@ import (
"testing"
)
func TestRenderBootstrapConfig(t *testing.T) {
data := BootstrapConfigData{
NodeID: "bootstrap",
P2PPort: 4001,
DataDir: "/home/debros/.debros/bootstrap",
RQLiteHTTPPort: 5001,
RQLiteRaftPort: 7001,
ClusterAPIPort: 9094,
IPFSAPIPort: 5001,
}
result, err := RenderBootstrapConfig(data)
if err != nil {
t.Fatalf("RenderBootstrapConfig failed: %v", err)
}
// Check for required fields
checks := []string{
"id: \"bootstrap\"",
"type: \"bootstrap\"",
"tcp/4001",
"rqlite_port: 5001",
"rqlite_raft_port: 7001",
"cluster_api_url: \"http://localhost:9094\"",
"api_url: \"http://localhost:5001\"",
}
for _, check := range checks {
if !strings.Contains(result, check) {
t.Errorf("Bootstrap config missing: %s", check)
}
}
}
func TestRenderNodeConfig(t *testing.T) {
bootstrapMultiaddr := "/ip4/127.0.0.1/tcp/4001/p2p/Qm1234567890"
data := NodeConfigData{
NodeID: "node2",
P2PPort: 4002,
DataDir: "/home/debros/.debros/node2",
DataDir: "/home/debros/.orama/node2",
RQLiteHTTPPort: 5002,
RQLiteRaftPort: 7002,
RQLiteJoinAddress: "localhost:5001",

View File

@ -1,7 +1,7 @@
[Unit]
Description=DeBros Gateway
After=debros-node-node.service
Wants=debros-node-node.service
After=debros-node.service
Wants=debros-node.service
[Service]
Type=simple
@ -9,7 +9,7 @@ User=debros
Group=debros
WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}}
ExecStart={{.HomeDir}}/bin/gateway --config {{.DebrosDir}}/data/gateway.yaml
ExecStart={{.HomeDir}}/bin/gateway --config {{.OramaDir}}/data/gateway.yaml
Restart=always
RestartSec=5
StandardOutput=journal
@ -22,7 +22,7 @@ CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -20,7 +20,7 @@ SyslogIdentifier=ipfs-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -21,7 +21,7 @@ SyslogIdentifier=ipfs-cluster-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -10,7 +10,7 @@ User=debros
Group=debros
WorkingDirectory={{.HomeDir}}
Environment=HOME={{.HomeDir}}
ExecStart={{.HomeDir}}/bin/node --config {{.DebrosDir}}/configs/{{.ConfigFile}}
ExecStart={{.HomeDir}}/bin/node --config {{.OramaDir}}/configs/{{.ConfigFile}}
Restart=always
RestartSec=5
StandardOutput=journal
@ -20,7 +20,7 @@ SyslogIdentifier=debros-node-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -19,7 +19,7 @@ SyslogIdentifier=olric
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
ReadWritePaths={{.OramaDir}}
[Install]
WantedBy=multi-user.target

View File

@ -1,25 +0,0 @@
[Unit]
Description=RQLite Database ({{.NodeType}})
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=debros
Group=debros
Environment=HOME={{.HomeDir}}
ExecStart=/usr/local/bin/rqlited -http-addr 0.0.0.0:{{.HTTPPort}} -http-adv-addr 127.0.0.1:{{.HTTPPort}} -raft-adv-addr 127.0.0.1:{{.RaftPort}} -raft-addr 0.0.0.0:{{.RaftPort}}{{if .JoinAddr}} -join {{.JoinAddr}} -join-attempts 30 -join-interval 10s{{end}} {{.DataDir}}
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=rqlite-{{.NodeType}}
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths={{.DebrosDir}}
[Install]
WantedBy=multi-user.target

View File

@ -45,7 +45,7 @@ type Config struct {
// HTTPS configuration
EnableHTTPS bool // Enable HTTPS with ACME (Let's Encrypt)
DomainName string // Domain name for HTTPS certificate
TLSCacheDir string // Directory to cache TLS certificates (default: ~/.debros/tls-cache)
TLSCacheDir string // Directory to cache TLS certificates (default: ~/.orama/tls-cache)
// Olric cache configuration
OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"]
@ -522,7 +522,7 @@ func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
return ipfsDiscoveryResult{}
}
configDir := filepath.Join(homeDir, ".debros")
configDir := filepath.Join(homeDir, ".orama")
// Try bootstrap.yaml first, then bootstrap2.yaml, node.yaml, node2.yaml, node3.yaml, node4.yaml
configFiles := []string{"bootstrap.yaml", "bootstrap2.yaml", "node.yaml", "node2.yaml", "node3.yaml", "node4.yaml"}

258
pkg/gateway/http_gateway.go Normal file
View File

@ -0,0 +1,258 @@
package gateway
import (
"context"
"fmt"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"sync"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"go.uber.org/zap"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/logging"
)
// HTTPGateway is the main reverse proxy router
type HTTPGateway struct {
logger *logging.ColoredLogger
config *config.HTTPGatewayConfig
router chi.Router
reverseProxies map[string]*httputil.ReverseProxy
mu sync.RWMutex
server *http.Server
}
// NewHTTPGateway creates a new HTTP reverse proxy gateway
func NewHTTPGateway(logger *logging.ColoredLogger, cfg *config.HTTPGatewayConfig) (*HTTPGateway, error) {
if !cfg.Enabled {
return nil, nil
}
if logger == nil {
var err error
logger, err = logging.NewColoredLogger(logging.ComponentGeneral, true)
if err != nil {
return nil, fmt.Errorf("failed to create logger: %w", err)
}
}
gateway := &HTTPGateway{
logger: logger,
config: cfg,
router: chi.NewRouter(),
reverseProxies: make(map[string]*httputil.ReverseProxy),
}
// Set up router middleware
gateway.router.Use(middleware.RequestID)
gateway.router.Use(middleware.Logger)
gateway.router.Use(middleware.Recoverer)
gateway.router.Use(middleware.Timeout(30 * time.Second))
// Add health check endpoint
gateway.router.Get("/health", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, `{"status":"ok","node":"%s"}`, cfg.NodeName)
})
// Initialize reverse proxies and routes
if err := gateway.initializeRoutes(); err != nil {
return nil, fmt.Errorf("failed to initialize routes: %w", err)
}
gateway.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway initialized",
zap.String("node_name", cfg.NodeName),
zap.String("listen_addr", cfg.ListenAddr),
zap.Int("routes", len(cfg.Routes)),
)
return gateway, nil
}
// initializeRoutes sets up all reverse proxy routes
func (hg *HTTPGateway) initializeRoutes() error {
hg.mu.Lock()
defer hg.mu.Unlock()
for routeName, routeConfig := range hg.config.Routes {
// Validate backend URL
_, err := url.Parse(routeConfig.BackendURL)
if err != nil {
return fmt.Errorf("invalid backend URL for route %s: %w", routeName, err)
}
// Create reverse proxy with custom transport
proxy := &httputil.ReverseProxy{
Rewrite: func(r *httputil.ProxyRequest) {
// Keep original host for Host header
r.Out.Host = r.In.Host
// Set X-Forwarded-For header for logging
r.Out.Header.Set("X-Forwarded-For", getClientIP(r.In))
},
ErrorHandler: hg.proxyErrorHandler(routeName),
}
// Set timeout on transport
if routeConfig.Timeout > 0 {
proxy.Transport = &http.Transport{
Dial: (&net.Dialer{
Timeout: routeConfig.Timeout,
}).Dial,
ResponseHeaderTimeout: routeConfig.Timeout,
}
}
hg.reverseProxies[routeName] = proxy
// Register route handler
hg.registerRouteHandler(routeName, routeConfig, proxy)
hg.logger.ComponentInfo(logging.ComponentGeneral, "Route initialized",
zap.String("name", routeName),
zap.String("path", routeConfig.PathPrefix),
zap.String("backend", routeConfig.BackendURL),
)
}
return nil
}
// registerRouteHandler registers a route handler with the router
func (hg *HTTPGateway) registerRouteHandler(name string, routeConfig config.RouteConfig, proxy *httputil.ReverseProxy) {
pathPrefix := strings.TrimSuffix(routeConfig.PathPrefix, "/")
// Use Mount instead of Route for wildcard path handling
hg.router.Mount(pathPrefix, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
hg.handleProxyRequest(w, req, routeConfig, proxy)
}))
}
// handleProxyRequest handles a reverse proxy request
func (hg *HTTPGateway) handleProxyRequest(w http.ResponseWriter, req *http.Request, routeConfig config.RouteConfig, proxy *httputil.ReverseProxy) {
// Strip path prefix before forwarding
originalPath := req.URL.Path
pathPrefix := strings.TrimSuffix(routeConfig.PathPrefix, "/")
if strings.HasPrefix(req.URL.Path, pathPrefix) {
// Remove the prefix but keep leading slash
strippedPath := strings.TrimPrefix(req.URL.Path, pathPrefix)
if strippedPath == "" {
strippedPath = "/"
}
req.URL.Path = strippedPath
}
// Update request URL to point to backend
backendURL, _ := url.Parse(routeConfig.BackendURL)
req.URL.Scheme = backendURL.Scheme
req.URL.Host = backendURL.Host
// Log the proxy request
hg.logger.ComponentInfo(logging.ComponentGeneral, "Proxy request",
zap.String("original_path", originalPath),
zap.String("stripped_path", req.URL.Path),
zap.String("backend", routeConfig.BackendURL),
zap.String("method", req.Method),
zap.String("client_ip", getClientIP(req)),
)
// Handle WebSocket upgrades if configured
if routeConfig.WebSocket && isWebSocketRequest(req) {
hg.logger.ComponentInfo(logging.ComponentGeneral, "WebSocket upgrade detected",
zap.String("path", originalPath),
)
}
// Forward the request
proxy.ServeHTTP(w, req)
}
// proxyErrorHandler returns an error handler for the reverse proxy
func (hg *HTTPGateway) proxyErrorHandler(routeName string) func(http.ResponseWriter, *http.Request, error) {
return func(w http.ResponseWriter, r *http.Request, err error) {
hg.logger.ComponentError(logging.ComponentGeneral, "Proxy error",
zap.String("route", routeName),
zap.String("path", r.URL.Path),
zap.String("method", r.Method),
zap.Error(err),
)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadGateway)
fmt.Fprintf(w, `{"error":"gateway error","route":"%s","detail":"%s"}`, routeName, err.Error())
}
}
// Start starts the HTTP gateway server
func (hg *HTTPGateway) Start(ctx context.Context) error {
if hg == nil || !hg.config.Enabled {
return nil
}
hg.server = &http.Server{
Addr: hg.config.ListenAddr,
Handler: hg.router,
}
// Listen for connections
listener, err := net.Listen("tcp", hg.config.ListenAddr)
if err != nil {
return fmt.Errorf("failed to listen on %s: %w", hg.config.ListenAddr, err)
}
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway server starting",
zap.String("node_name", hg.config.NodeName),
zap.String("listen_addr", hg.config.ListenAddr),
)
// Serve in a goroutine
go func() {
if err := hg.server.Serve(listener); err != nil && err != http.ErrServerClosed {
hg.logger.ComponentError(logging.ComponentGeneral, "HTTP Gateway server error", zap.Error(err))
}
}()
// Wait for context cancellation
<-ctx.Done()
return hg.Stop()
}
// Stop gracefully stops the HTTP gateway server
func (hg *HTTPGateway) Stop() error {
if hg == nil || hg.server == nil {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway shutting down")
if err := hg.server.Shutdown(ctx); err != nil {
hg.logger.ComponentError(logging.ComponentGeneral, "HTTP Gateway shutdown error", zap.Error(err))
return err
}
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway shutdown complete")
return nil
}
// Router returns the chi router for testing or extension
func (hg *HTTPGateway) Router() chi.Router {
return hg.router
}
// isWebSocketRequest checks if a request is a WebSocket upgrade request
func isWebSocketRequest(r *http.Request) bool {
return r.Header.Get("Connection") == "Upgrade" &&
r.Header.Get("Upgrade") == "websocket"
}

217
pkg/gateway/https.go Normal file
View File

@ -0,0 +1,217 @@
package gateway
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"strings"
"time"
"go.uber.org/zap"
"golang.org/x/crypto/acme/autocert"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/logging"
)
// HTTPSGateway extends HTTPGateway with HTTPS/TLS support
type HTTPSGateway struct {
*HTTPGateway
httpsConfig *config.HTTPSConfig
certManager *autocert.Manager
httpsServer *http.Server
httpServer *http.Server // For ACME challenge and redirect
}
// NewHTTPSGateway creates a new HTTPS gateway with Let's Encrypt autocert
func NewHTTPSGateway(logger *logging.ColoredLogger, cfg *config.HTTPGatewayConfig) (*HTTPSGateway, error) {
// First create the base HTTP gateway
base, err := NewHTTPGateway(logger, cfg)
if err != nil {
return nil, err
}
if base == nil {
return nil, nil
}
if !cfg.HTTPS.Enabled {
// Return base gateway wrapped in HTTPSGateway for consistent interface
return &HTTPSGateway{HTTPGateway: base}, nil
}
gateway := &HTTPSGateway{
HTTPGateway: base,
httpsConfig: &cfg.HTTPS,
}
// Set up Let's Encrypt autocert if enabled
if cfg.HTTPS.AutoCert {
cacheDir := cfg.HTTPS.CacheDir
if cacheDir == "" {
cacheDir = "/home/debros/.orama/tls-cache"
}
gateway.certManager = &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(cfg.HTTPS.Domain),
Cache: autocert.DirCache(cacheDir),
Email: cfg.HTTPS.Email,
}
logger.ComponentInfo(logging.ComponentGeneral, "Let's Encrypt autocert configured",
zap.String("domain", cfg.HTTPS.Domain),
zap.String("cache_dir", cacheDir),
)
}
return gateway, nil
}
// Start starts both HTTP (for ACME) and HTTPS servers
func (g *HTTPSGateway) Start(ctx context.Context) error {
if g == nil {
return nil
}
// If HTTPS is not enabled, just start the base HTTP gateway
if !g.httpsConfig.Enabled {
return g.HTTPGateway.Start(ctx)
}
httpPort := g.httpsConfig.HTTPPort
if httpPort == 0 {
httpPort = 80
}
httpsPort := g.httpsConfig.HTTPSPort
if httpsPort == 0 {
httpsPort = 443
}
// Start HTTP server for ACME challenge and redirect
g.httpServer = &http.Server{
Addr: fmt.Sprintf(":%d", httpPort),
Handler: g.httpHandler(),
}
go func() {
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTP server starting (ACME/redirect)",
zap.Int("port", httpPort),
)
if err := g.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
g.logger.ComponentError(logging.ComponentGeneral, "HTTP server error", zap.Error(err))
}
}()
// Set up TLS config
tlsConfig := &tls.Config{
MinVersion: tls.VersionTLS12,
}
if g.certManager != nil {
tlsConfig.GetCertificate = g.certManager.GetCertificate
} else if g.httpsConfig.CertFile != "" && g.httpsConfig.KeyFile != "" {
cert, err := tls.LoadX509KeyPair(g.httpsConfig.CertFile, g.httpsConfig.KeyFile)
if err != nil {
return fmt.Errorf("failed to load TLS certificate: %w", err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
} else {
return fmt.Errorf("HTTPS enabled but no certificate source configured")
}
// Start HTTPS server
g.httpsServer = &http.Server{
Addr: fmt.Sprintf(":%d", httpsPort),
Handler: g.router,
TLSConfig: tlsConfig,
}
listener, err := tls.Listen("tcp", g.httpsServer.Addr, tlsConfig)
if err != nil {
return fmt.Errorf("failed to create TLS listener: %w", err)
}
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway starting",
zap.String("domain", g.httpsConfig.Domain),
zap.Int("port", httpsPort),
)
go func() {
if err := g.httpsServer.Serve(listener); err != nil && err != http.ErrServerClosed {
g.logger.ComponentError(logging.ComponentGeneral, "HTTPS server error", zap.Error(err))
}
}()
// Wait for context cancellation
<-ctx.Done()
return g.Stop()
}
// httpHandler returns a handler for the HTTP server (ACME challenge + redirect)
func (g *HTTPSGateway) httpHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Handle ACME challenge
if g.certManager != nil && strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") {
g.certManager.HTTPHandler(nil).ServeHTTP(w, r)
return
}
// Redirect HTTP to HTTPS
httpsPort := g.httpsConfig.HTTPSPort
if httpsPort == 0 {
httpsPort = 443
}
target := "https://" + r.Host + r.URL.RequestURI()
if httpsPort != 443 {
host := r.Host
if idx := strings.LastIndex(host, ":"); idx > 0 {
host = host[:idx]
}
target = fmt.Sprintf("https://%s:%d%s", host, httpsPort, r.URL.RequestURI())
}
http.Redirect(w, r, target, http.StatusMovedPermanently)
})
}
// Stop gracefully stops both HTTP and HTTPS servers
func (g *HTTPSGateway) Stop() error {
if g == nil {
return nil
}
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway shutting down")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
var errs []error
if g.httpServer != nil {
if err := g.httpServer.Shutdown(ctx); err != nil {
errs = append(errs, fmt.Errorf("HTTP server shutdown: %w", err))
}
}
if g.httpsServer != nil {
if err := g.httpsServer.Shutdown(ctx); err != nil {
errs = append(errs, fmt.Errorf("HTTPS server shutdown: %w", err))
}
}
if g.HTTPGateway.server != nil {
if err := g.HTTPGateway.Stop(); err != nil {
errs = append(errs, fmt.Errorf("base gateway shutdown: %w", err))
}
}
if len(errs) > 0 {
return fmt.Errorf("shutdown errors: %v", errs)
}
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway shutdown complete")
return nil
}

View File

@ -0,0 +1,212 @@
package gateway
import (
"context"
"crypto/tls"
"fmt"
"io"
"net"
"strings"
"sync"
"time"
"go.uber.org/zap"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/logging"
)
// TCPSNIGateway handles SNI-based TCP routing for services like RQLite Raft, IPFS, etc.
type TCPSNIGateway struct {
logger *logging.ColoredLogger
config *config.SNIConfig
listener net.Listener
routes map[string]string
mu sync.RWMutex
running bool
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
tlsConfig *tls.Config
}
// NewTCPSNIGateway creates a new TCP SNI-based gateway
func NewTCPSNIGateway(logger *logging.ColoredLogger, cfg *config.SNIConfig) (*TCPSNIGateway, error) {
if !cfg.Enabled {
return nil, nil
}
if logger == nil {
var err error
logger, err = logging.NewColoredLogger(logging.ComponentGeneral, true)
if err != nil {
return nil, fmt.Errorf("failed to create logger: %w", err)
}
}
cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile)
if err != nil {
return nil, fmt.Errorf("failed to load TLS certificate: %w", err)
}
ctx, cancel := context.WithCancel(context.Background())
gateway := &TCPSNIGateway{
logger: logger,
config: cfg,
routes: make(map[string]string),
ctx: ctx,
cancel: cancel,
tlsConfig: &tls.Config{
Certificates: []tls.Certificate{cert},
},
}
for hostname, backend := range cfg.Routes {
gateway.routes[strings.ToLower(hostname)] = backend
}
logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway initialized",
zap.String("listen_addr", cfg.ListenAddr),
zap.Int("routes", len(cfg.Routes)),
)
return gateway, nil
}
// Start starts the TCP SNI gateway server
func (g *TCPSNIGateway) Start(ctx context.Context) error {
if g == nil || !g.config.Enabled {
return nil
}
listener, err := tls.Listen("tcp", g.config.ListenAddr, g.tlsConfig)
if err != nil {
return fmt.Errorf("failed to listen on %s: %w", g.config.ListenAddr, err)
}
g.listener = listener
g.running = true
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway starting",
zap.String("listen_addr", g.config.ListenAddr),
)
g.wg.Add(1)
go func() {
defer g.wg.Done()
for {
conn, err := listener.Accept()
if err != nil {
select {
case <-g.ctx.Done():
return
default:
g.logger.ComponentError(logging.ComponentGeneral, "Accept error", zap.Error(err))
continue
}
}
g.wg.Add(1)
go func(c net.Conn) {
defer g.wg.Done()
g.handleConnection(c)
}(conn)
}
}()
select {
case <-ctx.Done():
case <-g.ctx.Done():
}
return g.Stop()
}
// handleConnection routes a TCP connection based on SNI
func (g *TCPSNIGateway) handleConnection(conn net.Conn) {
defer conn.Close()
tlsConn, ok := conn.(*tls.Conn)
if !ok {
g.logger.ComponentError(logging.ComponentGeneral, "Expected TLS connection")
return
}
if err := tlsConn.Handshake(); err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "TLS handshake failed", zap.Error(err))
return
}
serverName := strings.ToLower(tlsConn.ConnectionState().ServerName)
if serverName == "" {
g.logger.ComponentError(logging.ComponentGeneral, "No SNI provided")
return
}
g.mu.RLock()
backend, found := g.routes[serverName]
if !found {
for prefix, be := range g.routes {
if strings.HasPrefix(serverName, prefix+".") {
backend = be
found = true
break
}
}
}
g.mu.RUnlock()
if !found {
g.logger.ComponentError(logging.ComponentGeneral, "No route for SNI",
zap.String("server_name", serverName),
)
return
}
g.logger.ComponentInfo(logging.ComponentGeneral, "Routing connection",
zap.String("server_name", serverName),
zap.String("backend", backend),
)
backendConn, err := net.DialTimeout("tcp", backend, 10*time.Second)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "Backend connect failed",
zap.String("backend", backend),
zap.Error(err),
)
return
}
defer backendConn.Close()
errc := make(chan error, 2)
go func() { _, err := io.Copy(backendConn, tlsConn); errc <- err }()
go func() { _, err := io.Copy(tlsConn, backendConn); errc <- err }()
<-errc
}
// Stop gracefully stops the TCP SNI gateway
func (g *TCPSNIGateway) Stop() error {
if g == nil || !g.running {
return nil
}
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway shutting down")
g.cancel()
if g.listener != nil {
g.listener.Close()
}
done := make(chan struct{})
go func() { g.wg.Wait(); close(done) }()
select {
case <-done:
case <-time.After(10 * time.Second):
g.logger.ComponentWarn(logging.ComponentGeneral, "Shutdown timeout")
}
g.running = false
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway shutdown complete")
return nil
}

583
pkg/installer/installer.go Normal file
View File

@ -0,0 +1,583 @@
// Package installer provides an interactive TUI installer for Orama Network
package installer
import (
"fmt"
"net"
"os"
"regexp"
"strings"
"github.com/charmbracelet/bubbles/textinput"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
)
// InstallerConfig holds the configuration gathered from the TUI
type InstallerConfig struct {
VpsIP string
Domain string
JoinAddress string
Peers []string
ClusterSecret string
Branch string
IsFirstNode bool
}
// Step represents a step in the installation wizard
type Step int
const (
StepWelcome Step = iota
StepNodeType
StepVpsIP
StepDomain
StepJoinAddress
StepClusterSecret
StepBranch
StepConfirm
StepInstalling
StepDone
)
// Model is the bubbletea model for the installer
type Model struct {
step Step
config InstallerConfig
textInput textinput.Model
err error
width int
height int
installing bool
installOutput []string
cursor int // For selection menus
}
// Styles
var (
titleStyle = lipgloss.NewStyle().
Bold(true).
Foreground(lipgloss.Color("#00D4AA")).
MarginBottom(1)
subtitleStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#888888")).
MarginBottom(1)
focusedStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#00D4AA"))
blurredStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#666666"))
cursorStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#00D4AA"))
helpStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#626262")).
MarginTop(1)
errorStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#FF6B6B")).
Bold(true)
successStyle = lipgloss.NewStyle().
Foreground(lipgloss.Color("#00D4AA")).
Bold(true)
boxStyle = lipgloss.NewStyle().
Border(lipgloss.RoundedBorder()).
BorderForeground(lipgloss.Color("#00D4AA")).
Padding(1, 2)
)
// NewModel creates a new installer model
func NewModel() Model {
ti := textinput.New()
ti.Focus()
ti.CharLimit = 256
ti.Width = 50
return Model{
step: StepWelcome,
textInput: ti,
config: InstallerConfig{
Branch: "main",
},
}
}
// Init initializes the model
func (m Model) Init() tea.Cmd {
return textinput.Blink
}
// Update handles messages
func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.WindowSizeMsg:
m.width = msg.Width
m.height = msg.Height
return m, nil
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "q":
if m.step != StepInstalling {
return m, tea.Quit
}
case "enter":
return m.handleEnter()
case "up", "k":
if m.step == StepNodeType || m.step == StepBranch {
if m.cursor > 0 {
m.cursor--
}
}
case "down", "j":
if m.step == StepNodeType {
if m.cursor < 1 {
m.cursor++
}
} else if m.step == StepBranch {
if m.cursor < 1 {
m.cursor++
}
}
case "esc":
if m.step > StepWelcome && m.step < StepInstalling {
m.step--
m.err = nil
m.setupStepInput()
}
}
}
// Update text input for input steps
if m.step == StepVpsIP || m.step == StepDomain || m.step == StepJoinAddress || m.step == StepClusterSecret {
var cmd tea.Cmd
m.textInput, cmd = m.textInput.Update(msg)
return m, cmd
}
return m, nil
}
func (m *Model) handleEnter() (tea.Model, tea.Cmd) {
switch m.step {
case StepWelcome:
m.step = StepNodeType
m.cursor = 0
case StepNodeType:
m.config.IsFirstNode = m.cursor == 0
m.step = StepVpsIP
m.setupStepInput()
case StepVpsIP:
ip := strings.TrimSpace(m.textInput.Value())
if err := validateIP(ip); err != nil {
m.err = err
return m, nil
}
m.config.VpsIP = ip
m.err = nil
m.step = StepDomain
m.setupStepInput()
case StepDomain:
domain := strings.TrimSpace(m.textInput.Value())
if err := validateDomain(domain); err != nil {
m.err = err
return m, nil
}
m.config.Domain = domain
m.err = nil
if m.config.IsFirstNode {
m.step = StepBranch
m.cursor = 0
} else {
m.step = StepJoinAddress
m.setupStepInput()
}
case StepJoinAddress:
addr := strings.TrimSpace(m.textInput.Value())
if addr != "" {
if err := validateJoinAddress(addr); err != nil {
m.err = err
return m, nil
}
m.config.JoinAddress = addr
}
m.err = nil
m.step = StepClusterSecret
m.setupStepInput()
case StepClusterSecret:
secret := strings.TrimSpace(m.textInput.Value())
if err := validateClusterSecret(secret); err != nil {
m.err = err
return m, nil
}
m.config.ClusterSecret = secret
m.err = nil
m.step = StepBranch
m.cursor = 0
case StepBranch:
if m.cursor == 0 {
m.config.Branch = "main"
} else {
m.config.Branch = "nightly"
}
m.step = StepConfirm
case StepConfirm:
m.step = StepInstalling
return m, m.startInstallation()
case StepDone:
return m, tea.Quit
}
return m, nil
}
func (m *Model) setupStepInput() {
m.textInput.Reset()
m.textInput.Focus()
switch m.step {
case StepVpsIP:
m.textInput.Placeholder = "e.g., 203.0.113.1"
// Try to auto-detect public IP
if ip := detectPublicIP(); ip != "" {
m.textInput.SetValue(ip)
}
case StepDomain:
m.textInput.Placeholder = "e.g., node-1.orama.network"
case StepJoinAddress:
m.textInput.Placeholder = "e.g., 203.0.113.1:7001 (or leave empty)"
case StepClusterSecret:
m.textInput.Placeholder = "64 hex characters"
m.textInput.EchoMode = textinput.EchoPassword
}
}
func (m Model) startInstallation() tea.Cmd {
return func() tea.Msg {
// This would trigger the actual installation
// For now, we return the config for the CLI to handle
return installCompleteMsg{config: m.config}
}
}
type installCompleteMsg struct {
config InstallerConfig
}
// View renders the UI
func (m Model) View() string {
var s strings.Builder
// Header
s.WriteString(renderHeader())
s.WriteString("\n\n")
switch m.step {
case StepWelcome:
s.WriteString(m.viewWelcome())
case StepNodeType:
s.WriteString(m.viewNodeType())
case StepVpsIP:
s.WriteString(m.viewVpsIP())
case StepDomain:
s.WriteString(m.viewDomain())
case StepJoinAddress:
s.WriteString(m.viewJoinAddress())
case StepClusterSecret:
s.WriteString(m.viewClusterSecret())
case StepBranch:
s.WriteString(m.viewBranch())
case StepConfirm:
s.WriteString(m.viewConfirm())
case StepInstalling:
s.WriteString(m.viewInstalling())
case StepDone:
s.WriteString(m.viewDone())
}
return s.String()
}
func renderHeader() string {
logo := `
___ ____ _ __ __ _
/ _ \| _ \ / \ | \/ | / \
| | | | |_) | / _ \ | |\/| | / _ \
| |_| | _ < / ___ \| | | |/ ___ \
\___/|_| \_\/_/ \_\_| |_/_/ \_\
`
return titleStyle.Render(logo) + "\n" + subtitleStyle.Render("Network Installation Wizard")
}
func (m Model) viewWelcome() string {
var s strings.Builder
s.WriteString(boxStyle.Render(
titleStyle.Render("Welcome to Orama Network!") + "\n\n" +
"This wizard will guide you through setting up your node.\n\n" +
"You'll need:\n" +
" • A public IP address for your server\n" +
" • A domain name (e.g., node-1.orama.network)\n" +
" • For joining: cluster secret from existing node\n",
))
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Press Enter to continue • q to quit"))
return s.String()
}
func (m Model) viewNodeType() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Node Type") + "\n\n")
s.WriteString("Is this the first node in a new cluster?\n\n")
options := []string{"Yes, create new cluster", "No, join existing cluster"}
for i, opt := range options {
if i == m.cursor {
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
} else {
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
}
}
s.WriteString("\n")
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewVpsIP() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Server IP Address") + "\n\n")
s.WriteString("Enter your server's public IP address:\n\n")
s.WriteString(m.textInput.View())
if m.err != nil {
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewDomain() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Domain Name") + "\n\n")
s.WriteString("Enter the domain for this node:\n\n")
s.WriteString(m.textInput.View())
if m.err != nil {
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewJoinAddress() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Join Address") + "\n\n")
s.WriteString("Enter the RQLite address to join (IP:port):\n")
s.WriteString(subtitleStyle.Render("Leave empty to auto-detect from peers") + "\n\n")
s.WriteString(m.textInput.View())
if m.err != nil {
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewClusterSecret() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Cluster Secret") + "\n\n")
s.WriteString("Enter the cluster secret from an existing node:\n")
s.WriteString(subtitleStyle.Render("Get it with: cat ~/.orama/secrets/cluster-secret") + "\n\n")
s.WriteString(m.textInput.View())
if m.err != nil {
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
}
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewBranch() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Release Channel") + "\n\n")
s.WriteString("Select the release channel:\n\n")
options := []string{"main (stable)", "nightly (latest features)"}
for i, opt := range options {
if i == m.cursor {
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
} else {
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
}
}
s.WriteString("\n")
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
return s.String()
}
func (m Model) viewConfirm() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Confirm Installation") + "\n\n")
config := fmt.Sprintf(
" VPS IP: %s\n"+
" Domain: %s\n"+
" Branch: %s\n"+
" Node Type: %s\n",
m.config.VpsIP,
m.config.Domain,
m.config.Branch,
map[bool]string{true: "First node (new cluster)", false: "Join existing cluster"}[m.config.IsFirstNode],
)
if !m.config.IsFirstNode {
if m.config.JoinAddress != "" {
config += fmt.Sprintf(" Join Addr: %s\n", m.config.JoinAddress)
}
config += fmt.Sprintf(" Secret: %s...\n", m.config.ClusterSecret[:8])
}
s.WriteString(boxStyle.Render(config))
s.WriteString("\n\n")
s.WriteString(helpStyle.Render("Press Enter to install • Esc to go back"))
return s.String()
}
func (m Model) viewInstalling() string {
var s strings.Builder
s.WriteString(titleStyle.Render("Installing...") + "\n\n")
s.WriteString("Please wait while the node is being configured.\n\n")
for _, line := range m.installOutput {
s.WriteString(line + "\n")
}
return s.String()
}
func (m Model) viewDone() string {
var s strings.Builder
s.WriteString(successStyle.Render("✓ Installation Complete!") + "\n\n")
s.WriteString("Your node is now running.\n\n")
s.WriteString("Useful commands:\n")
s.WriteString(" orama status - Check service status\n")
s.WriteString(" orama logs node - View node logs\n")
s.WriteString(" orama logs gateway - View gateway logs\n")
s.WriteString("\n")
s.WriteString(helpStyle.Render("Press Enter or q to exit"))
return s.String()
}
// GetConfig returns the installer configuration after the TUI completes
func (m Model) GetConfig() InstallerConfig {
return m.config
}
// Validation helpers
func validateIP(ip string) error {
if ip == "" {
return fmt.Errorf("IP address is required")
}
if net.ParseIP(ip) == nil {
return fmt.Errorf("invalid IP address format")
}
return nil
}
func validateDomain(domain string) error {
if domain == "" {
return fmt.Errorf("domain is required")
}
// Basic domain validation
domainRegex := regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?)*$`)
if !domainRegex.MatchString(domain) {
return fmt.Errorf("invalid domain format")
}
return nil
}
func validateJoinAddress(addr string) error {
if addr == "" {
return nil // Optional
}
_, _, err := net.SplitHostPort(addr)
if err != nil {
return fmt.Errorf("invalid address format (expected IP:port)")
}
return nil
}
func validateClusterSecret(secret string) error {
if len(secret) != 64 {
return fmt.Errorf("cluster secret must be 64 hex characters")
}
secretRegex := regexp.MustCompile(`^[a-fA-F0-9]{64}$`)
if !secretRegex.MatchString(secret) {
return fmt.Errorf("cluster secret must be valid hexadecimal")
}
return nil
}
func detectPublicIP() string {
// Try to detect public IP from common interfaces
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil && !ipnet.IP.IsPrivate() {
return ipnet.IP.String()
}
}
}
return ""
}
// Run starts the TUI installer and returns the configuration
func Run() (*InstallerConfig, error) {
// Check if running as root
if os.Geteuid() != 0 {
return nil, fmt.Errorf("installer must be run as root (use sudo)")
}
p := tea.NewProgram(NewModel(), tea.WithAltScreen())
finalModel, err := p.Run()
if err != nil {
return nil, err
}
m := finalModel.(Model)
if m.step == StepInstalling || m.step == StepDone {
config := m.GetConfig()
return &config, nil
}
return nil, fmt.Errorf("installation cancelled")
}

View File

@ -86,7 +86,7 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
}
// Determine cluster path based on data directory structure
// Check if dataDir contains specific node names (e.g., ~/.debros/bootstrap, ~/.debros/bootstrap2, ~/.debros/node2-4)
// Check if dataDir contains specific node names (e.g., ~/.orama/bootstrap, ~/.orama/bootstrap2, ~/.orama/node2-4)
clusterPath := filepath.Join(dataDir, "ipfs-cluster")
nodeNames := []string{"bootstrap", "bootstrap2", "node2", "node3", "node4"}
for _, nodeName := range nodeNames {
@ -103,11 +103,11 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
// Load or generate cluster secret
secretPath := filepath.Join(dataDir, "..", "cluster-secret")
if strings.Contains(dataDir, ".debros") {
// Try to find cluster-secret in ~/.debros
if strings.Contains(dataDir, ".orama") {
// Try to find cluster-secret in ~/.orama
home, err := os.UserHomeDir()
if err == nil {
secretPath = filepath.Join(home, ".debros", "cluster-secret")
secretPath = filepath.Join(home, ".orama", "cluster-secret")
}
}

View File

@ -179,6 +179,33 @@ func NewDefaultLogger(component Component) (*ColoredLogger, error) {
return NewColoredLogger(component, true)
}
// NewFileLogger creates a logger that writes to a file
func NewFileLogger(component Component, filePath string, enableColors bool) (*ColoredLogger, error) {
// Create encoder
encoder := coloredConsoleEncoder(enableColors)
// Create file writer
file, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return nil, fmt.Errorf("failed to open log file %s: %w", filePath, err)
}
// Create core
core := zapcore.NewCore(
encoder,
zapcore.AddSync(file),
zapcore.DebugLevel,
)
// Create logger with caller information
logger := zap.New(core, zap.AddCaller(), zap.AddCallerSkip(1))
return &ColoredLogger{
Logger: logger,
enableColors: enableColors,
}, nil
}
// Component-specific logging methods
func (l *ColoredLogger) ComponentInfo(component Component, msg string, fields ...zap.Field) {
if l.enableColors {

View File

@ -23,6 +23,7 @@ import (
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/discovery"
"github.com/DeBrosOfficial/network/pkg/encryption"
"github.com/DeBrosOfficial/network/pkg/gateway"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/pubsub"
@ -50,6 +51,9 @@ type Node struct {
// IPFS Cluster config manager
clusterConfigManager *ipfs.ClusterConfigManager
// HTTP reverse proxy gateway
httpGateway *gateway.HTTPGateway
}
// NewNode creates a new network node
@ -637,6 +641,11 @@ func (n *Node) stopPeerDiscovery() {
func (n *Node) Stop() error {
n.logger.ComponentInfo(logging.ComponentNode, "Stopping network node")
// Stop HTTP Gateway
if n.httpGateway != nil {
_ = n.httpGateway.Stop()
}
// Stop cluster discovery
if n.clusterDiscovery != nil {
n.clusterDiscovery.Stop()
@ -667,6 +676,43 @@ func (n *Node) Stop() error {
return nil
}
// startHTTPGateway initializes and starts the HTTP reverse proxy gateway
func (n *Node) startHTTPGateway(ctx context.Context) error {
if !n.config.HTTPGateway.Enabled {
n.logger.ComponentInfo(logging.ComponentNode, "HTTP Gateway disabled in config")
return nil
}
// Create separate logger for unified gateway
logFile := filepath.Join(os.ExpandEnv(n.config.Node.DataDir), "..", "logs", fmt.Sprintf("gateway-%s.log", n.config.HTTPGateway.NodeName))
// Ensure logs directory exists
logsDir := filepath.Dir(logFile)
if err := os.MkdirAll(logsDir, 0755); err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}
httpGatewayLogger, err := logging.NewFileLogger(logging.ComponentGeneral, logFile, false)
if err != nil {
return fmt.Errorf("failed to create HTTP gateway logger: %w", err)
}
// Create and start HTTP gateway with its own logger
n.httpGateway, err = gateway.NewHTTPGateway(httpGatewayLogger, &n.config.HTTPGateway)
if err != nil {
return fmt.Errorf("failed to create HTTP gateway: %w", err)
}
// Start gateway in a goroutine (it handles its own lifecycle)
go func() {
if err := n.httpGateway.Start(ctx); err != nil {
n.logger.ComponentError(logging.ComponentNode, "HTTP Gateway error", zap.Error(err))
}
}()
return nil
}
// Starts the network node
func (n *Node) Start(ctx context.Context) error {
n.logger.Info("Starting network node", zap.String("data_dir", n.config.Node.DataDir))
@ -687,6 +733,12 @@ func (n *Node) Start(ctx context.Context) error {
return fmt.Errorf("failed to create data directory: %w", err)
}
// Start HTTP Gateway first (doesn't depend on other services)
if err := n.startHTTPGateway(ctx); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to start HTTP Gateway", zap.Error(err))
// Don't fail node startup if gateway fails
}
// Start LibP2P host first (needed for cluster discovery)
if err := n.startLibP2P(); err != nil {
return fmt.Errorf("failed to start LibP2P: %w", err)

View File

@ -75,7 +75,7 @@ for pattern in "${SPECIFIC_PATTERNS[@]}"; do
done
# Method 3: Kill processes using PID files
PIDS_DIR="$HOME/.debros/.pids"
PIDS_DIR="$HOME/.orama/.pids"
if [[ -d "$PIDS_DIR" ]]; then
for pidfile in "$PIDS_DIR"/*.pid; do
if [[ -f "$pidfile" ]]; then

View File

@ -1,222 +0,0 @@
#!/bin/bash
# DeBros Network Installation Script
# Downloads dbn from GitHub releases and runs the new 'dbn prod install' flow
#
# Supported: Ubuntu 20.04+, Debian 11+
#
# Usage:
# curl -fsSL https://install.debros.network | bash
# OR
# bash scripts/install-debros-network.sh
# OR with specific flags:
# bash scripts/install-debros-network.sh --bootstrap
# bash scripts/install-debros-network.sh --vps-ip 1.2.3.4 --peers /ip4/1.2.3.4/tcp/4001/p2p/Qm...
# bash scripts/install-debros-network.sh --domain example.com
set -e
set -o pipefail
trap 'error "An error occurred. Installation aborted."; exit 1' ERR
# Color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
CYAN='\033[0;36m'
BLUE='\033[38;2;2;128;175m'
YELLOW='\033[1;33m'
NOCOLOR='\033[0m'
# Configuration
GITHUB_REPO="DeBrosOfficial/network"
GITHUB_API="https://api.github.com/repos/$GITHUB_REPO"
INSTALL_DIR="/usr/local/bin"
log() { echo -e "${CYAN}[$(date '+%Y-%m-%d %H:%M:%S')]${NOCOLOR} $1"; }
error() { echo -e "${RED}[ERROR]${NOCOLOR} $1" >&2; }
success() { echo -e "${GREEN}[SUCCESS]${NOCOLOR} $1"; }
warning() { echo -e "${YELLOW}[WARNING]${NOCOLOR} $1" >&2; }
display_banner() {
echo -e "${BLUE}========================================================================${NOCOLOR}"
echo -e "${CYAN}
____ ____ _ _ _ _
| _ \\ ___| __ ) _ __ ___ ___ | \\ | | ___| |___ _____ _ __| | __
| | | |/ _ \\ _ \\| __/ _ \\/ __| | \\| |/ _ \\ __\\ \\ /\\ / / _ \\| __| |/ /
| |_| | __/ |_) | | | (_) \\__ \\ | |\\ | __/ |_ \\ V V / (_) | | | <
|____/ \\___|____/|_| \\___/|___/ |_| \\_|\\___|\\__| \\_/\\_/ \\___/|_| |_|\\_\\
${NOCOLOR}"
echo -e "${BLUE}========================================================================${NOCOLOR}"
echo -e "${GREEN} Production Installation ${NOCOLOR}"
echo -e "${BLUE}========================================================================${NOCOLOR}"
}
detect_os() {
if [ ! -f /etc/os-release ]; then
error "Cannot detect operating system"
exit 1
fi
. /etc/os-release
OS=$ID
VERSION=$VERSION_ID
# Support Debian and Ubuntu
case $OS in
ubuntu|debian)
log "Detected OS: $OS ${VERSION:-unknown}"
;;
*)
warning "Unsupported operating system: $OS (may not work)"
;;
esac
}
check_architecture() {
ARCH=$(uname -m)
case $ARCH in
x86_64)
GITHUB_ARCH="amd64"
;;
aarch64|arm64)
GITHUB_ARCH="arm64"
;;
*)
error "Unsupported architecture: $ARCH"
echo -e "${YELLOW}Supported: x86_64, aarch64/arm64${NOCOLOR}"
exit 1
;;
esac
log "Architecture: $ARCH (using $GITHUB_ARCH)"
}
check_root() {
if [[ $EUID -ne 0 ]]; then
error "This script must be run as root"
echo -e "${YELLOW}Please run with sudo:${NOCOLOR}"
echo -e "${CYAN} sudo bash <(curl -fsSL https://install.debros.network)${NOCOLOR}"
exit 1
fi
}
get_latest_release() {
log "Fetching latest release..."
# Try to get latest release with better error handling
RELEASE_DATA=""
if command -v jq &>/dev/null; then
# Get the latest release (including pre-releases/nightly)
RELEASE_DATA=$(curl -fsSL -H "Accept: application/vnd.github+json" "$GITHUB_API/releases" 2>&1)
if [ $? -ne 0 ]; then
error "Failed to fetch release data from GitHub API"
error "Response: $RELEASE_DATA"
exit 1
fi
LATEST_RELEASE=$(echo "$RELEASE_DATA" | jq -r '.[0] | .tag_name' 2>/dev/null)
else
RELEASE_DATA=$(curl -fsSL "$GITHUB_API/releases" 2>&1)
if [ $? -ne 0 ]; then
error "Failed to fetch release data from GitHub API"
error "Response: $RELEASE_DATA"
exit 1
fi
LATEST_RELEASE=$(echo "$RELEASE_DATA" | grep '"tag_name"' | head -1 | cut -d'"' -f4)
fi
if [ -z "$LATEST_RELEASE" ] || [ "$LATEST_RELEASE" = "null" ]; then
error "Could not determine latest release version"
error "GitHub API response may be empty or rate-limited"
exit 1
fi
log "Latest release: $LATEST_RELEASE"
}
download_and_install_cli() {
BINARY_NAME="debros-network_${LATEST_RELEASE#v}_linux_${GITHUB_ARCH}.tar.gz"
DOWNLOAD_URL="$GITHUB_REPO/releases/download/$LATEST_RELEASE/$BINARY_NAME"
log "Downloading dbn from GitHub releases..."
log "URL: https://github.com/$DOWNLOAD_URL"
# Clean up any stale binaries
rm -f /tmp/network-cli /tmp/dbn.tar.gz "$INSTALL_DIR/dbn"
if ! curl -fsSL -o /tmp/dbn.tar.gz "https://github.com/$DOWNLOAD_URL"; then
error "Failed to download dbn"
exit 1
fi
# Verify the download was successful
if [ ! -f /tmp/dbn.tar.gz ]; then
error "Download file not found"
exit 1
fi
log "Extracting dbn..."
# Extract to /tmp
tar -xzf /tmp/dbn.tar.gz -C /tmp/
# Check for extracted binary (could be named network-cli or dbn)
EXTRACTED_BINARY=""
if [ -f /tmp/network-cli ]; then
EXTRACTED_BINARY="/tmp/network-cli"
elif [ -f /tmp/dbn ]; then
EXTRACTED_BINARY="/tmp/dbn"
else
error "Failed to extract binary (neither network-cli nor dbn found)"
ls -la /tmp/ | grep -E "(network|cli|dbn)"
exit 1
fi
chmod +x "$EXTRACTED_BINARY"
log "Installing dbn to $INSTALL_DIR..."
# Always rename to dbn during installation
mv "$EXTRACTED_BINARY" "$INSTALL_DIR/dbn"
# Sanity check: verify the installed binary is functional and reports correct version
if ! "$INSTALL_DIR/dbn" version &>/dev/null; then
error "Installed dbn failed sanity check (version command failed)"
rm -f "$INSTALL_DIR/dbn"
exit 1
fi
# Clean up
rm -f /tmp/dbn.tar.gz
success "dbn installed successfully"
}
# Main flow
display_banner
# Check prerequisites
check_root
detect_os
check_architecture
# Download and install
get_latest_release
download_and_install_cli
# Show next steps
echo ""
echo -e "${GREEN}Installation complete!${NOCOLOR}"
echo ""
echo -e "${CYAN}Next, run the production setup:${NOCOLOR}"
echo ""
echo "Bootstrap node (first node, main branch):"
echo -e " ${BLUE}sudo dbn prod install --bootstrap${NOCOLOR}"
echo ""
echo "Bootstrap node (nightly branch):"
echo -e " ${BLUE}sudo dbn prod install --bootstrap --branch nightly${NOCOLOR}"
echo ""
echo "Secondary node (join existing cluster):"
echo -e " ${BLUE}sudo dbn prod install --vps-ip <bootstrap_ip> --peers <multiaddr>${NOCOLOR}"
echo ""
echo "With HTTPS/domain:"
echo -e " ${BLUE}sudo dbn prod install --bootstrap --domain example.com${NOCOLOR}"
echo ""
echo "For more help:"
echo -e " ${BLUE}dbn prod --help${NOCOLOR}"
echo ""

View File

@ -0,0 +1,53 @@
#!/bin/bash
# Setup local domains for DeBros Network development
# Adds entries to /etc/hosts for node-1.local through node-5.local
# Maps them to 127.0.0.1 for local development
set -e
HOSTS_FILE="/etc/hosts"
NODES=("node-1" "node-2" "node-3" "node-4" "node-5")
# Check if we have sudo access
if [ "$EUID" -ne 0 ]; then
echo "This script requires sudo to modify /etc/hosts"
echo "Please run: sudo bash scripts/setup-local-domains.sh"
exit 1
fi
# Function to add or update domain entry
add_domain() {
local domain=$1
local ip="127.0.0.1"
# Check if domain already exists
if grep -q "^[[:space:]]*$ip[[:space:]]\+$domain" "$HOSTS_FILE"; then
echo "$domain already configured"
return 0
fi
# Add domain to /etc/hosts
echo "$ip $domain" >> "$HOSTS_FILE"
echo "✓ Added $domain -> $ip"
}
echo "Setting up local domains for DeBros Network..."
echo ""
# Add each node domain
for node in "${NODES[@]}"; do
add_domain "${node}.local"
done
echo ""
echo "✓ Local domains configured successfully!"
echo ""
echo "You can now access nodes via:"
for node in "${NODES[@]}"; do
echo " - ${node}.local (HTTP Gateway)"
done
echo ""
echo "Example: curl http://node-1.local:8080/rqlite/http/db/status"

View File

@ -0,0 +1,85 @@
#!/bin/bash
# Test local domain routing for DeBros Network
# Validates that all HTTP gateway routes are working
set -e
NODES=("1" "2" "3" "4" "5")
GATEWAY_PORTS=(8080 8081 8082 8083 8084)
# Color codes
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Counters
PASSED=0
FAILED=0
# Test a single endpoint
test_endpoint() {
local node=$1
local port=$2
local path=$3
local description=$4
local url="http://node-${node}.local:${port}${path}"
printf "Testing %-50s ... " "$description"
if curl -s -f "$url" > /dev/null 2>&1; then
echo -e "${GREEN}✓ PASS${NC}"
((PASSED++))
return 0
else
echo -e "${RED}✗ FAIL${NC}"
((FAILED++))
return 1
fi
}
echo "=========================================="
echo "DeBros Network Local Domain Tests"
echo "=========================================="
echo ""
# Test each node's HTTP gateway
for i in "${!NODES[@]}"; do
node=${NODES[$i]}
port=${GATEWAY_PORTS[$i]}
echo "Testing node-${node}.local (port ${port}):"
# Test health endpoint
test_endpoint "$node" "$port" "/health" "Node-${node} health check"
# Test RQLite HTTP endpoint
test_endpoint "$node" "$port" "/rqlite/http/db/execute" "Node-${node} RQLite HTTP"
# Test IPFS API endpoint (may fail if IPFS not running, but at least connection should work)
test_endpoint "$node" "$port" "/ipfs/api/v0/version" "Node-${node} IPFS API" || true
# Test Cluster API endpoint (may fail if Cluster not running, but at least connection should work)
test_endpoint "$node" "$port" "/cluster/health" "Node-${node} Cluster API" || true
echo ""
done
# Summary
echo "=========================================="
echo "Test Results"
echo "=========================================="
echo -e "${GREEN}Passed: $PASSED${NC}"
echo -e "${RED}Failed: $FAILED${NC}"
echo ""
if [ $FAILED -eq 0 ]; then
echo -e "${GREEN}✓ All tests passed!${NC}"
exit 0
else
echo -e "${YELLOW}⚠ Some tests failed (this is expected if services aren't running)${NC}"
exit 1
fi