mirror of
https://github.com/DeBrosOfficial/network.git
synced 2025-12-14 12:18:49 +00:00
Compare commits
70 Commits
v0.60.1-ni
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f43cea907 | ||
| 65286df31e | |||
|
|
b91b7c27ea | ||
|
|
432952ed69 | ||
|
|
9193f088a3 | ||
|
|
3505a6a0eb | ||
|
|
3ca4e1f43b | ||
|
|
2fb1d68fcb | ||
|
|
7126c4068b | ||
|
|
681cef999a | ||
|
|
5c7767b7c8 | ||
|
|
d8994b1e4f | ||
|
|
b983066016 | ||
|
|
660008b0aa | ||
|
|
775289a1a2 | ||
|
|
87059fb9c4 | ||
|
|
90a26295a4 | ||
|
|
4c1f842939 | ||
|
|
33ebf222ff | ||
|
|
2f1ccfa473 | ||
|
|
6f7b7606b0 | ||
|
|
adb180932b | ||
|
|
5d6de3b0b8 | ||
|
|
747be5863b | ||
|
|
358de8a8ad | ||
|
|
47ffe817b4 | ||
|
|
7f77836d73 | ||
|
|
1d060490a8 | ||
|
|
0421155594 | ||
|
|
32470052ba | ||
|
|
0ca211c983 | ||
|
|
2b17bcdaa2 | ||
|
|
c405be3e69 | ||
|
|
c2298e476e | ||
|
|
ee566d93b7 | ||
|
|
7c3378a8ec | ||
|
|
bd4542ef56 | ||
|
|
f88a28b3df | ||
|
|
b0ac58af3e | ||
|
|
52b3a99bb9 | ||
|
|
19bfaff943 | ||
|
|
b58b632be9 | ||
|
|
a33d03c6b2 | ||
|
|
6ba0a824e0 | ||
|
|
d5e28bb694 | ||
|
|
72ba75d16b | ||
|
|
b896e37e09 | ||
|
|
b1732b2cbe | ||
|
|
badaa920d9 | ||
|
|
ed80b5b023 | ||
|
|
e9bf94ba96 | ||
|
|
52a726ffd4 | ||
|
|
efa26e6ec8 | ||
|
|
239fb2084b | ||
|
|
5463df73d5 | ||
|
|
0ea58354ca | ||
|
|
263fbbb8b4 | ||
|
|
a72aebc1fe | ||
|
|
80ea58848b | ||
|
|
687316b8d6 | ||
|
|
170665bf02 | ||
|
|
17fc78975d | ||
|
|
6a86592cad | ||
| abcf9a42eb | |||
|
|
42131c0e75 | ||
|
|
cc74a8f135 | ||
|
|
685295551c | ||
|
|
ca00561da1 | ||
|
|
a4b4b8f0df | ||
|
|
fe05240362 |
197
.github/workflows/release-apt.yml
vendored
Normal file
197
.github/workflows/release-apt.yml
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
name: Release APT Package
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: "Version to release (e.g., 0.69.20)"
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build-deb:
|
||||
name: Build Debian Package
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
arch: [amd64, arm64]
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.23"
|
||||
|
||||
- name: Get version
|
||||
id: version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "release" ]; then
|
||||
VERSION="${{ github.event.release.tag_name }}"
|
||||
VERSION="${VERSION#v}" # Remove 'v' prefix if present
|
||||
else
|
||||
VERSION="${{ github.event.inputs.version }}"
|
||||
fi
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU (for arm64)
|
||||
if: matrix.arch == 'arm64'
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Build binary
|
||||
env:
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
CGO_ENABLED: 0
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
COMMIT=$(git rev-parse --short HEAD)
|
||||
DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
LDFLAGS="-X 'main.version=$VERSION' -X 'main.commit=$COMMIT' -X 'main.date=$DATE'"
|
||||
|
||||
mkdir -p build/usr/local/bin
|
||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/orama cmd/cli/main.go
|
||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-node cmd/node/main.go
|
||||
go build -ldflags "$LDFLAGS" -o build/usr/local/bin/debros-gateway cmd/gateway/main.go
|
||||
|
||||
- name: Create Debian package structure
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
ARCH="${{ matrix.arch }}"
|
||||
PKG_NAME="orama_${VERSION}_${ARCH}"
|
||||
|
||||
mkdir -p ${PKG_NAME}/DEBIAN
|
||||
mkdir -p ${PKG_NAME}/usr/local/bin
|
||||
|
||||
# Copy binaries
|
||||
cp build/usr/local/bin/* ${PKG_NAME}/usr/local/bin/
|
||||
chmod 755 ${PKG_NAME}/usr/local/bin/*
|
||||
|
||||
# Create control file
|
||||
cat > ${PKG_NAME}/DEBIAN/control << EOF
|
||||
Package: orama
|
||||
Version: ${VERSION}
|
||||
Section: net
|
||||
Priority: optional
|
||||
Architecture: ${ARCH}
|
||||
Depends: libc6
|
||||
Maintainer: DeBros Team <team@debros.network>
|
||||
Description: Orama Network - Distributed P2P Database System
|
||||
Orama is a distributed peer-to-peer network that combines
|
||||
RQLite for distributed SQL, IPFS for content-addressed storage,
|
||||
and LibP2P for peer discovery and communication.
|
||||
EOF
|
||||
|
||||
# Create postinst script
|
||||
cat > ${PKG_NAME}/DEBIAN/postinst << 'EOF'
|
||||
#!/bin/bash
|
||||
set -e
|
||||
echo ""
|
||||
echo "Orama installed successfully!"
|
||||
echo ""
|
||||
echo "To set up your node, run:"
|
||||
echo " sudo orama install"
|
||||
echo ""
|
||||
EOF
|
||||
chmod 755 ${PKG_NAME}/DEBIAN/postinst
|
||||
|
||||
- name: Build .deb package
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
ARCH="${{ matrix.arch }}"
|
||||
PKG_NAME="orama_${VERSION}_${ARCH}"
|
||||
|
||||
dpkg-deb --build ${PKG_NAME}
|
||||
mv ${PKG_NAME}.deb orama_${VERSION}_${ARCH}.deb
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: deb-${{ matrix.arch }}
|
||||
path: "*.deb"
|
||||
|
||||
publish-apt:
|
||||
name: Publish to APT Repository
|
||||
needs: build-deb
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: packages
|
||||
|
||||
- name: Get version
|
||||
id: version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "release" ]; then
|
||||
VERSION="${{ github.event.release.tag_name }}"
|
||||
VERSION="${VERSION#v}"
|
||||
else
|
||||
VERSION="${{ github.event.inputs.version }}"
|
||||
fi
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up GPG
|
||||
if: env.GPG_PRIVATE_KEY != ''
|
||||
env:
|
||||
GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }}
|
||||
run: |
|
||||
echo "$GPG_PRIVATE_KEY" | gpg --import
|
||||
|
||||
- name: Create APT repository structure
|
||||
run: |
|
||||
mkdir -p apt-repo/pool/main/o/orama
|
||||
mkdir -p apt-repo/dists/stable/main/binary-amd64
|
||||
mkdir -p apt-repo/dists/stable/main/binary-arm64
|
||||
|
||||
# Move packages
|
||||
mv packages/deb-amd64/*.deb apt-repo/pool/main/o/orama/
|
||||
mv packages/deb-arm64/*.deb apt-repo/pool/main/o/orama/
|
||||
|
||||
# Generate Packages files
|
||||
cd apt-repo
|
||||
dpkg-scanpackages --arch amd64 pool/ > dists/stable/main/binary-amd64/Packages
|
||||
dpkg-scanpackages --arch arm64 pool/ > dists/stable/main/binary-arm64/Packages
|
||||
|
||||
gzip -k dists/stable/main/binary-amd64/Packages
|
||||
gzip -k dists/stable/main/binary-arm64/Packages
|
||||
|
||||
# Generate Release file
|
||||
cat > dists/stable/Release << EOF
|
||||
Origin: Orama
|
||||
Label: Orama
|
||||
Suite: stable
|
||||
Codename: stable
|
||||
Architectures: amd64 arm64
|
||||
Components: main
|
||||
Description: Orama Network APT Repository
|
||||
EOF
|
||||
|
||||
cd ..
|
||||
|
||||
- name: Upload to release
|
||||
if: github.event_name == 'release'
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: |
|
||||
apt-repo/pool/main/o/orama/*.deb
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Deploy APT repository to GitHub Pages
|
||||
uses: peaceiris/actions-gh-pages@v4
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./apt-repo
|
||||
destination_dir: apt
|
||||
keep_files: true
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -74,4 +74,6 @@ data/bootstrap/rqlite/
|
||||
|
||||
configs/
|
||||
|
||||
.dev/
|
||||
.dev/
|
||||
|
||||
.gocache/
|
||||
@ -1,6 +1,6 @@
|
||||
# GoReleaser Configuration for DeBros Network
|
||||
# Builds and releases the network-cli binary for multiple platforms
|
||||
# Other binaries (node, gateway, identity) are installed via: network-cli setup
|
||||
# Builds and releases the dbn binary for multiple platforms
|
||||
# Other binaries (node, gateway, identity) are installed via: dbn setup
|
||||
|
||||
project_name: debros-network
|
||||
|
||||
@ -8,10 +8,10 @@ env:
|
||||
- GO111MODULE=on
|
||||
|
||||
builds:
|
||||
# network-cli binary - only build the CLI
|
||||
- id: network-cli
|
||||
# dbn binary - only build the CLI
|
||||
- id: dbn
|
||||
main: ./cmd/cli
|
||||
binary: network-cli
|
||||
binary: dbn
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
@ -23,10 +23,10 @@ builds:
|
||||
- -X main.version={{.Version}}
|
||||
- -X main.commit={{.ShortCommit}}
|
||||
- -X main.date={{.Date}}
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
|
||||
archives:
|
||||
# Tar.gz archives for network-cli
|
||||
# Tar.gz archives for dbn
|
||||
- id: binaries
|
||||
format: tar.gz
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
@ -50,10 +50,10 @@ changelog:
|
||||
abbrev: -1
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- '^chore:'
|
||||
- '^ci:'
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
- "^chore:"
|
||||
- "^ci:"
|
||||
- Merge pull request
|
||||
- Merge branch
|
||||
|
||||
|
||||
1031
CHANGELOG.md
1031
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@ -22,19 +22,19 @@ make deps
|
||||
- Test: `make test`
|
||||
- Format/Vet: `make fmt vet` (or `make lint`)
|
||||
|
||||
```
|
||||
````
|
||||
|
||||
Useful CLI commands:
|
||||
|
||||
```bash
|
||||
./bin/network-cli health
|
||||
./bin/network-cli peers
|
||||
./bin/network-cli status
|
||||
```
|
||||
./bin/orama health
|
||||
./bin/orama peers
|
||||
./bin/orama status
|
||||
````
|
||||
|
||||
## Versioning
|
||||
|
||||
- The CLI reports its version via `network-cli version`.
|
||||
- The CLI reports its version via `orama version`.
|
||||
- Releases are tagged (e.g., `v0.18.0-beta`) and published via GoReleaser.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
93
Makefile
93
Makefile
@ -6,14 +6,12 @@ test:
|
||||
go test -v $(TEST)
|
||||
|
||||
# Gateway-focused E2E tests assume gateway and nodes are already running
|
||||
# Configure via env:
|
||||
# GATEWAY_BASE_URL (default http://localhost:6001)
|
||||
# GATEWAY_API_KEY (required for auth-protected routes)
|
||||
# Auto-discovers configuration from ~/.orama and queries database for API key
|
||||
# No environment variables required
|
||||
.PHONY: test-e2e
|
||||
test-e2e:
|
||||
@echo "Running gateway E2E tests (HTTP/WS only)..."
|
||||
@echo "Base URL: $${GATEWAY_BASE_URL:-http://localhost:6001}"
|
||||
@test -n "$$GATEWAY_API_KEY" || (echo "GATEWAY_API_KEY must be set" && exit 1)
|
||||
@echo "Running comprehensive E2E tests..."
|
||||
@echo "Auto-discovering configuration from ~/.orama..."
|
||||
go test -v -tags e2e ./e2e
|
||||
|
||||
# Network - Distributed P2P Database System
|
||||
@ -21,7 +19,7 @@ test-e2e:
|
||||
|
||||
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
|
||||
|
||||
VERSION := 0.60.1
|
||||
VERSION := 0.72.0
|
||||
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
|
||||
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
|
||||
@ -31,11 +29,11 @@ build: deps
|
||||
@echo "Building network executables (version=$(VERSION))..."
|
||||
@mkdir -p bin
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/identity ./cmd/identity
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/node ./cmd/node
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/network-cli cmd/cli/main.go
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/orama-node ./cmd/node
|
||||
go build -ldflags "$(LDFLAGS)" -o bin/orama cmd/cli/main.go
|
||||
# Inject gateway build metadata via pkg path variables
|
||||
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
|
||||
@echo "Build complete! Run ./bin/network-cli version"
|
||||
@echo "Build complete! Run ./bin/orama version"
|
||||
|
||||
# Install git hooks
|
||||
install-hooks:
|
||||
@ -51,45 +49,48 @@ clean:
|
||||
|
||||
# Run bootstrap node (auto-selects identity and data dir)
|
||||
run-node:
|
||||
@echo "Starting bootstrap node..."
|
||||
@echo "Config: ~/.debros/bootstrap.yaml"
|
||||
@echo "Generate it with: network-cli config init --type bootstrap"
|
||||
go run ./cmd/node --config node.yaml
|
||||
@echo "Starting node..."
|
||||
@echo "Config: ~/.orama/node.yaml"
|
||||
go run ./cmd/orama-node --config node.yaml
|
||||
|
||||
# Run second node (regular) - requires join address of bootstrap node
|
||||
# Usage: make run-node2 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002
|
||||
# Run second node - requires join address
|
||||
run-node2:
|
||||
@echo "Starting regular node (node.yaml)..."
|
||||
@echo "Config: ~/.debros/node.yaml"
|
||||
@echo "Generate it with: network-cli config init --type node --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
|
||||
go run ./cmd/node --config node2.yaml
|
||||
@echo "Starting second node..."
|
||||
@echo "Config: ~/.orama/node2.yaml"
|
||||
go run ./cmd/orama-node --config node2.yaml
|
||||
|
||||
# Run third node (regular) - requires join address of bootstrap node
|
||||
# Usage: make run-node3 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003
|
||||
# Run third node - requires join address
|
||||
run-node3:
|
||||
@echo "Starting regular node (node2.yaml)..."
|
||||
@echo "Config: ~/.debros/node2.yaml"
|
||||
@echo "Generate it with: network-cli config init --type node --name node2.yaml --join localhost:5001 --bootstrap-peers '<peer_multiaddr>'"
|
||||
go run ./cmd/node --config node3.yaml
|
||||
@echo "Starting third node..."
|
||||
@echo "Config: ~/.orama/node3.yaml"
|
||||
go run ./cmd/orama-node --config node3.yaml
|
||||
|
||||
# Run gateway HTTP server
|
||||
# Usage examples:
|
||||
# make run-gateway # uses ~/.debros/gateway.yaml
|
||||
# Config generated with: network-cli config init --type gateway
|
||||
run-gateway:
|
||||
@echo "Starting gateway HTTP server..."
|
||||
@echo "Note: Config must be in ~/.debros/gateway.yaml"
|
||||
@echo "Generate it with: network-cli config init --type gateway"
|
||||
go run ./cmd/gateway
|
||||
@echo "Note: Config must be in ~/.orama/data/gateway.yaml"
|
||||
go run ./cmd/orama-gateway
|
||||
|
||||
# Setup local domain names for development
|
||||
setup-domains:
|
||||
@echo "Setting up local domains..."
|
||||
@sudo bash scripts/setup-local-domains.sh
|
||||
|
||||
# Development environment target
|
||||
# Uses network-cli dev up to start full stack with dependency and port checking
|
||||
dev: build
|
||||
@./bin/network-cli dev up
|
||||
# Uses orama dev up to start full stack with dependency and port checking
|
||||
dev: build setup-domains
|
||||
@./bin/orama dev up
|
||||
|
||||
# Kill all processes using network-cli dev down
|
||||
# Graceful shutdown of all dev services
|
||||
stop:
|
||||
@if [ -f ./bin/orama ]; then \
|
||||
./bin/orama dev down || true; \
|
||||
fi
|
||||
@bash scripts/dev-kill-all.sh
|
||||
|
||||
# Force kill all processes (immediate termination)
|
||||
kill:
|
||||
@./bin/network-cli dev down
|
||||
@bash scripts/dev-kill-all.sh
|
||||
|
||||
# Help
|
||||
help:
|
||||
@ -101,19 +102,17 @@ help:
|
||||
@echo "Local Development (Recommended):"
|
||||
@echo " make dev - Start full development stack with one command"
|
||||
@echo " - Checks dependencies and available ports"
|
||||
@echo " - Generates configs (bootstrap + node2 + node3 + gateway)"
|
||||
@echo " - Starts IPFS, RQLite, Olric, nodes, and gateway"
|
||||
@echo " - Validates cluster health (IPFS peers, RQLite, LibP2P)"
|
||||
@echo " - Stops all services if health checks fail"
|
||||
@echo " - Includes comprehensive logging"
|
||||
@echo " make kill - Stop all development services"
|
||||
@echo " - Generates configs and starts all services"
|
||||
@echo " - Validates cluster health"
|
||||
@echo " make stop - Gracefully stop all development services"
|
||||
@echo " make kill - Force kill all development services (use if stop fails)"
|
||||
@echo ""
|
||||
@echo "Development Management (via network-cli):"
|
||||
@echo " ./bin/network-cli dev status - Show status of all dev services"
|
||||
@echo " ./bin/network-cli dev logs <component> [--follow]"
|
||||
@echo "Development Management (via orama):"
|
||||
@echo " ./bin/orama dev status - Show status of all dev services"
|
||||
@echo " ./bin/orama dev logs <component> [--follow]"
|
||||
@echo ""
|
||||
@echo "Individual Node Targets (advanced):"
|
||||
@echo " run-node - Start bootstrap node directly"
|
||||
@echo " run-node - Start first node directly"
|
||||
@echo " run-node2 - Start second node directly"
|
||||
@echo " run-node3 - Start third node directly"
|
||||
@echo " run-gateway - Start HTTP gateway directly"
|
||||
|
||||
462
README.md
462
README.md
@ -1,156 +1,340 @@
|
||||
# DeBros Network - Distributed P2P Database System
|
||||
# Orama Network - Distributed P2P Database System
|
||||
|
||||
DeBros Network is a decentralized peer-to-peer data platform built in Go. It combines distributed SQL (RQLite), pub/sub messaging, and resilient peer discovery so applications can share state without central infrastructure.
|
||||
A decentralized peer-to-peer data platform built in Go. Combines distributed SQL (RQLite), pub/sub messaging, and resilient peer discovery so applications can share state without central infrastructure.
|
||||
|
||||
## Table of Contents
|
||||
## Features
|
||||
|
||||
- [At a Glance](#at-a-glance)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Components & Ports](#components--ports)
|
||||
- [Configuration Cheatsheet](#configuration-cheatsheet)
|
||||
- [CLI Highlights](#cli-highlights)
|
||||
- [HTTP Gateway](#http-gateway)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Resources](#resources)
|
||||
|
||||
## At a Glance
|
||||
|
||||
- Distributed SQL backed by RQLite and Raft consensus
|
||||
- Topic-based pub/sub with automatic cleanup
|
||||
- Namespace isolation for multi-tenant apps
|
||||
- Secure transport using libp2p plus Noise/TLS
|
||||
- Lightweight Go client and CLI tooling
|
||||
- **Distributed SQL** - RQLite with Raft consensus
|
||||
- **Pub/Sub Messaging** - Topic-based with automatic cleanup
|
||||
- **Namespace Isolation** - Multi-tenant support
|
||||
- **Secure Transport** - LibP2P + Noise/TLS encryption
|
||||
- **Unified Gateway** - Single port access to all node services
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. Clone and build the project:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/DeBrosOfficial/network.git
|
||||
cd network
|
||||
make build
|
||||
```
|
||||
|
||||
2. Generate local configuration (bootstrap, node2, node3, gateway):
|
||||
|
||||
```bash
|
||||
./bin/network-cli config init
|
||||
```
|
||||
|
||||
3. Launch the full development stack:
|
||||
|
||||
```bash
|
||||
make dev
|
||||
```
|
||||
|
||||
This starts three nodes and the HTTP gateway. **The command will not complete successfully until all services pass health checks** (IPFS peer connectivity, RQLite cluster formation, and LibP2P connectivity). If health checks fail, all services are stopped automatically. Stop with `Ctrl+C`.
|
||||
|
||||
4. Validate the network from another terminal:
|
||||
|
||||
```bash
|
||||
./bin/network-cli health
|
||||
./bin/network-cli peers
|
||||
./bin/network-cli pubsub publish notifications "Hello World"
|
||||
./bin/network-cli pubsub subscribe notifications 10s
|
||||
```
|
||||
|
||||
## Components & Ports
|
||||
|
||||
- **Bootstrap node**: P2P `4001`, RQLite HTTP `5001`, Raft `7001`
|
||||
- **Additional nodes** (`node2`, `node3`): Incrementing ports (`400{2,3}`, `500{2,3}`, `700{2,3}`)
|
||||
- **Gateway**: HTTP `6001` exposes REST/WebSocket APIs
|
||||
- **Data directory**: `~/.debros/` stores configs, identities, and RQLite data
|
||||
|
||||
Use `make dev` for the complete stack or run binaries individually with `go run ./cmd/node --config <file>` and `go run ./cmd/gateway --config gateway.yaml`.
|
||||
|
||||
## Configuration Cheatsheet
|
||||
|
||||
All runtime configuration lives in `~/.debros/`.
|
||||
|
||||
- `bootstrap.yaml`: `type: bootstrap`, blank `database.rqlite_join_address`
|
||||
- `node*.yaml`: `type: node`, set `database.rqlite_join_address` (e.g. `localhost:7001`) and include the bootstrap `discovery.bootstrap_peers`
|
||||
- `gateway.yaml`: configure `gateway.bootstrap_peers`, `gateway.namespace`, and optional auth flags
|
||||
|
||||
Validation reminders:
|
||||
|
||||
- HTTP and Raft ports must differ
|
||||
- Non-bootstrap nodes require a join address and bootstrap peers
|
||||
- Bootstrap nodes cannot define a join address
|
||||
- Multiaddrs must end with `/p2p/<peerID>`
|
||||
|
||||
Regenerate configs any time with `./bin/network-cli config init --force`.
|
||||
|
||||
## CLI Highlights
|
||||
|
||||
All commands accept `--format json`, `--timeout <duration>`, and `--bootstrap <multiaddr>`.
|
||||
|
||||
- **Auth**
|
||||
|
||||
```bash
|
||||
./bin/network-cli auth login
|
||||
./bin/network-cli auth status
|
||||
./bin/network-cli auth logout
|
||||
```
|
||||
|
||||
- **Network**
|
||||
|
||||
```bash
|
||||
./bin/network-cli health
|
||||
./bin/network-cli status
|
||||
./bin/network-cli peers
|
||||
```
|
||||
|
||||
- **Database**
|
||||
|
||||
```bash
|
||||
./bin/network-cli query "SELECT * FROM users"
|
||||
./bin/network-cli query "CREATE TABLE users (id INTEGER PRIMARY KEY)"
|
||||
./bin/network-cli transaction --file ops.json
|
||||
```
|
||||
|
||||
- **Pub/Sub**
|
||||
|
||||
```bash
|
||||
./bin/network-cli pubsub publish <topic> <message>
|
||||
./bin/network-cli pubsub subscribe <topic> 30s
|
||||
./bin/network-cli pubsub topics
|
||||
```
|
||||
|
||||
Credentials live at `~/.debros/credentials.json` with user-only permissions.
|
||||
|
||||
## HTTP Gateway
|
||||
|
||||
Start locally with `make run-gateway` or `go run ./cmd/gateway --config gateway.yaml`.
|
||||
|
||||
Environment overrides:
|
||||
### Local Development
|
||||
|
||||
```bash
|
||||
export GATEWAY_ADDR="0.0.0.0:6001"
|
||||
export GATEWAY_NAMESPACE="my-app"
|
||||
export GATEWAY_BOOTSTRAP_PEERS="/ip4/localhost/tcp/4001/p2p/<peerID>"
|
||||
export GATEWAY_REQUIRE_AUTH=true
|
||||
export GATEWAY_API_KEYS="key1:namespace1,key2:namespace2"
|
||||
# Build the project
|
||||
make build
|
||||
|
||||
# Start 5-node development cluster
|
||||
make dev
|
||||
```
|
||||
|
||||
Common endpoints (see `openapi/gateway.yaml` for the full spec):
|
||||
The cluster automatically performs health checks before declaring success. Check the output for:
|
||||
|
||||
- `GET /health`, `GET /v1/status`, `GET /v1/version`
|
||||
- `POST /v1/auth/challenge`, `POST /v1/auth/verify`, `POST /v1/auth/refresh`
|
||||
- `POST /v1/rqlite/exec`, `POST /v1/rqlite/find`, `POST /v1/rqlite/select`, `POST /v1/rqlite/transaction`
|
||||
- `GET /v1/rqlite/schema`
|
||||
- `POST /v1/pubsub/publish`, `GET /v1/pubsub/topics`, `GET /v1/pubsub/ws?topic=<topic>`
|
||||
- `POST /v1/storage/upload`, `POST /v1/storage/pin`, `GET /v1/storage/status/:cid`, `GET /v1/storage/get/:cid`, `DELETE /v1/storage/unpin/:cid`
|
||||
- Node unified gateway ports (6001-6005)
|
||||
- IPFS API endpoints
|
||||
- Olric cache server
|
||||
- Peer connection status
|
||||
- Example curl commands
|
||||
|
||||
### Stop Development Environment
|
||||
|
||||
```bash
|
||||
make down
|
||||
```
|
||||
|
||||
## Testing Services
|
||||
|
||||
After running `make dev`, test service health using these curl requests:
|
||||
|
||||
> **Note:** Local domains (node-1.local, etc.) require running `sudo make setup-domains` first. Alternatively, use `localhost` with port numbers.
|
||||
|
||||
### Node Unified Gateways
|
||||
|
||||
Each node is accessible via a single unified gateway port:
|
||||
|
||||
```bash
|
||||
# Node-1 (port 6001)
|
||||
curl http://node-1.local:6001/health
|
||||
curl http://node-1.local:6001/rqlite/http/db/execute -H "Content-Type: application/json" -d '{"sql":"SELECT 1"}'
|
||||
curl http://node-1.local:6001/cluster/health
|
||||
curl http://node-1.local:6001/ipfs/api/v0/version
|
||||
|
||||
# Node-2 (port 6002)
|
||||
curl http://node-2.local:6002/health
|
||||
curl http://node-2.local:6002/rqlite/http/db/execute -H "Content-Type: application/json" -d '{"sql":"SELECT 1"}'
|
||||
|
||||
# Node-3 (port 6003)
|
||||
curl http://node-3.local:6003/health
|
||||
|
||||
# Node-4 (port 6004)
|
||||
curl http://node-4.local:6004/health
|
||||
|
||||
# Node-5 (port 6005)
|
||||
curl http://node-5.local:6005/health
|
||||
```
|
||||
|
||||
### Main Gateway
|
||||
|
||||
The main gateway provides `/v1/*` routes for RQLite, pub/sub, and storage:
|
||||
|
||||
```bash
|
||||
# Gateway health
|
||||
curl http://node-1.local:6001/health
|
||||
|
||||
# Gateway status
|
||||
curl http://node-1.local:6001/v1/status
|
||||
|
||||
# Network peers
|
||||
curl http://node-1.local:6001/v1/network/status
|
||||
|
||||
# Database query
|
||||
curl http://node-1.local:6001/v1/rqlite/query \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"sql":"SELECT 1"}'
|
||||
|
||||
# Pub/Sub topics
|
||||
curl http://node-1.local:6001/v1/pubsub/topics
|
||||
```
|
||||
|
||||
### Direct Service Access (Debugging)
|
||||
|
||||
Direct access to individual service ports without unified gateway:
|
||||
|
||||
```bash
|
||||
# RQLite HTTP (each node on its own port)
|
||||
curl http://localhost:5001/db/execute -H "Content-Type: application/json" -d '{"sql":"SELECT 1"}' # Bootstrap
|
||||
curl http://localhost:5002/db/execute -H "Content-Type: application/json" -d '{"sql":"SELECT 1"}' # Node2
|
||||
|
||||
# IPFS API
|
||||
curl http://localhost:4501/api/v0/version # Bootstrap IPFS
|
||||
curl http://localhost:4502/api/v0/version # Node2 IPFS
|
||||
|
||||
# Olric Cache
|
||||
curl http://localhost:3320/stats
|
||||
```
|
||||
|
||||
## Network Architecture
|
||||
|
||||
### Unified Gateway Ports
|
||||
|
||||
```
|
||||
Node-1: localhost:6001 → /rqlite/http, /rqlite/raft, /cluster, /ipfs/api
|
||||
Node-2: localhost:6002 → Same routes
|
||||
Node-3: localhost:6003 → Same routes
|
||||
Node-4: localhost:6004 → Same routes
|
||||
Node-5: localhost:6005 → Same routes
|
||||
```
|
||||
|
||||
### Direct Service Ports (for debugging)
|
||||
|
||||
```
|
||||
RQLite HTTP: 5001, 5002, 5003, 5004, 5005 (one per node)
|
||||
RQLite Raft: 7001, 7002, 7003, 7004, 7005
|
||||
IPFS API: 4501, 4502, 4503, 4504, 4505
|
||||
IPFS Swarm: 4101, 4102, 4103, 4104, 4105
|
||||
Cluster API: 9094, 9104, 9114, 9124, 9134
|
||||
Internal Gateway: 6000
|
||||
Olric Cache: 3320
|
||||
Anon SOCKS: 9050
|
||||
```
|
||||
|
||||
## Development Commands
|
||||
|
||||
```bash
|
||||
# Start full cluster (5 nodes + gateway)
|
||||
make dev
|
||||
|
||||
# Check service status
|
||||
orama dev status
|
||||
|
||||
# View logs
|
||||
orama dev logs node-1 # Node-1 logs
|
||||
orama dev logs node-1 --follow # Follow logs in real-time
|
||||
orama dev logs gateway --follow # Gateway logs
|
||||
|
||||
# Stop all services
|
||||
orama dev down
|
||||
|
||||
# Build binaries
|
||||
make build
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
### Network Status
|
||||
|
||||
```bash
|
||||
./bin/orama health # Cluster health check
|
||||
./bin/orama peers # List connected peers
|
||||
./bin/orama status # Network status
|
||||
```
|
||||
|
||||
### Database Operations
|
||||
|
||||
```bash
|
||||
./bin/orama query "SELECT * FROM users"
|
||||
./bin/orama query "CREATE TABLE users (id INTEGER PRIMARY KEY)"
|
||||
./bin/orama transaction --file ops.json
|
||||
```
|
||||
|
||||
### Pub/Sub
|
||||
|
||||
```bash
|
||||
./bin/orama pubsub publish <topic> <message>
|
||||
./bin/orama pubsub subscribe <topic> 30s
|
||||
./bin/orama pubsub topics
|
||||
```
|
||||
|
||||
### Authentication
|
||||
|
||||
```bash
|
||||
./bin/orama auth login
|
||||
./bin/orama auth status
|
||||
./bin/orama auth logout
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Ubuntu 22.04+ or Debian 12+
|
||||
- `amd64` or `arm64` architecture
|
||||
- 4GB RAM, 50GB SSD, 2 CPU cores
|
||||
|
||||
### Required Ports
|
||||
|
||||
**External (must be open in firewall):**
|
||||
|
||||
- **80** - HTTP (ACME/Let's Encrypt certificate challenges)
|
||||
- **443** - HTTPS (Main gateway API endpoint)
|
||||
- **4101** - IPFS Swarm (peer connections)
|
||||
- **7001** - RQLite Raft (cluster consensus)
|
||||
|
||||
**Internal (bound to localhost, no firewall needed):**
|
||||
|
||||
- 4501 - IPFS API
|
||||
- 5001 - RQLite HTTP API
|
||||
- 6001 - Unified Gateway
|
||||
- 8080 - IPFS Gateway
|
||||
- 9050 - Anyone Client SOCKS5 proxy
|
||||
- 9094 - IPFS Cluster API
|
||||
- 3320/3322 - Olric Cache
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
# Install via APT
|
||||
echo "deb https://debrosficial.github.io/network/apt stable main" | sudo tee /etc/apt/sources.list.d/debros.list
|
||||
|
||||
sudo apt update && sudo apt install orama
|
||||
|
||||
# Interactive installation (recommended)
|
||||
sudo orama install
|
||||
|
||||
# Or with flags - First node (creates new cluster)
|
||||
sudo orama install --vps-ip <public_ip> --domain node-1.example.com
|
||||
|
||||
# Joining existing cluster
|
||||
sudo orama install --vps-ip <public_ip> --domain node-2.example.com \
|
||||
--peers /ip4/<first_node_ip>/tcp/4001/p2p/<peer_id> \
|
||||
--cluster-secret <64-hex-secret>
|
||||
```
|
||||
|
||||
### Service Management
|
||||
|
||||
```bash
|
||||
# Status
|
||||
orama status
|
||||
|
||||
# Control services
|
||||
sudo orama start
|
||||
sudo orama stop
|
||||
sudo orama restart
|
||||
|
||||
# View logs
|
||||
orama logs node --follow
|
||||
orama logs gateway --follow
|
||||
orama logs ipfs --follow
|
||||
```
|
||||
|
||||
### Upgrade
|
||||
|
||||
```bash
|
||||
# Upgrade to latest version
|
||||
sudo orama upgrade --restart [--branch nightly]
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
All configuration lives in `~/.orama/`:
|
||||
|
||||
- `configs/node.yaml` - Node configuration
|
||||
- `configs/gateway.yaml` - Gateway configuration
|
||||
- `configs/olric.yaml` - Cache configuration
|
||||
- `secrets/` - Keys and certificates
|
||||
- `data/` - Service data directories
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **Config directory errors**: Ensure `~/.debros/` exists, is writable, and has free disk space (`touch ~/.debros/test && rm ~/.debros/test`).
|
||||
- **Port conflicts**: Inspect with `lsof -i :4001` (or other ports) and stop conflicting processes or regenerate configs with new ports.
|
||||
- **Missing configs**: Run `./bin/network-cli config init` before starting nodes.
|
||||
- **Cluster join issues**: Confirm the bootstrap node is running, `peer.info` multiaddr matches `bootstrap_peers`, and firewall rules allow the P2P ports.
|
||||
### Services Not Starting
|
||||
|
||||
```bash
|
||||
# Check status
|
||||
systemctl status debros-node
|
||||
|
||||
# View logs
|
||||
journalctl -u debros-node -f
|
||||
|
||||
# Check log files
|
||||
tail -f /home/debros/.orama/logs/node.log
|
||||
```
|
||||
|
||||
### Port Conflicts
|
||||
|
||||
```bash
|
||||
# Check what's using specific ports
|
||||
sudo lsof -i :443 # HTTPS Gateway
|
||||
sudo lsof -i :7001 # TCP/SNI Gateway
|
||||
sudo lsof -i :6001 # Internal Gateway
|
||||
```
|
||||
|
||||
### RQLite Cluster Issues
|
||||
|
||||
```bash
|
||||
# Connect to RQLite CLI
|
||||
rqlite -H localhost -p 5001
|
||||
|
||||
# Check cluster status
|
||||
.nodes
|
||||
.status
|
||||
.ready
|
||||
|
||||
# Check consistency level
|
||||
.consistency
|
||||
```
|
||||
|
||||
### Reset Installation
|
||||
|
||||
```bash
|
||||
# Production reset (⚠️ DESTROYS DATA)
|
||||
sudo orama uninstall
|
||||
sudo rm -rf /home/debros/.orama
|
||||
sudo orama install
|
||||
```
|
||||
|
||||
## HTTP Gateway API
|
||||
|
||||
### Main Gateway Endpoints
|
||||
|
||||
- `GET /health` - Health status
|
||||
- `GET /v1/status` - Full status
|
||||
- `GET /v1/version` - Version info
|
||||
- `POST /v1/rqlite/exec` - Execute SQL
|
||||
- `POST /v1/rqlite/query` - Query database
|
||||
- `GET /v1/rqlite/schema` - Get schema
|
||||
- `POST /v1/pubsub/publish` - Publish message
|
||||
- `GET /v1/pubsub/topics` - List topics
|
||||
- `GET /v1/pubsub/ws?topic=<name>` - WebSocket subscribe
|
||||
|
||||
See `openapi/gateway.yaml` for complete API specification.
|
||||
|
||||
## Resources
|
||||
|
||||
- Go modules: `go mod tidy`, `go test ./...`
|
||||
- Automation: `make build`, `make dev`, `make run-gateway`, `make lint`
|
||||
- API reference: `openapi/gateway.yaml`
|
||||
- Code of Conduct: [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md)
|
||||
- [RQLite Documentation](https://rqlite.io/docs/)
|
||||
- [LibP2P Documentation](https://docs.libp2p.io/)
|
||||
- [GitHub Repository](https://github.com/DeBrosOfficial/network)
|
||||
- [Issue Tracker](https://github.com/DeBrosOfficial/network/issues)
|
||||
|
||||
143
cmd/cli/main.go
143
cmd/cli/main.go
@ -34,7 +34,7 @@ func main() {
|
||||
|
||||
switch command {
|
||||
case "version":
|
||||
fmt.Printf("network-cli %s", version)
|
||||
fmt.Printf("orama %s", version)
|
||||
if commit != "" {
|
||||
fmt.Printf(" (commit %s)", commit)
|
||||
}
|
||||
@ -44,68 +44,38 @@ func main() {
|
||||
fmt.Println()
|
||||
return
|
||||
|
||||
// Environment commands
|
||||
case "env":
|
||||
cli.HandleEnvCommand(args)
|
||||
case "devnet", "testnet", "local":
|
||||
// Shorthand for switching environments
|
||||
if len(args) > 0 && (args[0] == "enable" || args[0] == "switch") {
|
||||
if err := cli.SwitchEnvironment(command); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to switch environment: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
env, _ := cli.GetActiveEnvironment()
|
||||
fmt.Printf("✅ Switched to %s environment\n", command)
|
||||
if env != nil {
|
||||
fmt.Printf(" Gateway URL: %s\n", env.GatewayURL)
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli %s enable\n", command)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Development environment commands
|
||||
case "dev":
|
||||
cli.HandleDevCommand(args)
|
||||
|
||||
// Production environment commands
|
||||
// Production environment commands (legacy with 'prod' prefix)
|
||||
case "prod":
|
||||
cli.HandleProdCommand(args)
|
||||
|
||||
// Direct production commands (new simplified interface)
|
||||
case "install":
|
||||
cli.HandleProdCommand(append([]string{"install"}, args...))
|
||||
case "upgrade":
|
||||
cli.HandleProdCommand(append([]string{"upgrade"}, args...))
|
||||
case "migrate":
|
||||
cli.HandleProdCommand(append([]string{"migrate"}, args...))
|
||||
case "status":
|
||||
cli.HandleProdCommand(append([]string{"status"}, args...))
|
||||
case "start":
|
||||
cli.HandleProdCommand(append([]string{"start"}, args...))
|
||||
case "stop":
|
||||
cli.HandleProdCommand(append([]string{"stop"}, args...))
|
||||
case "restart":
|
||||
cli.HandleProdCommand(append([]string{"restart"}, args...))
|
||||
case "logs":
|
||||
cli.HandleProdCommand(append([]string{"logs"}, args...))
|
||||
case "uninstall":
|
||||
cli.HandleProdCommand(append([]string{"uninstall"}, args...))
|
||||
|
||||
// Authentication commands
|
||||
case "auth":
|
||||
cli.HandleAuthCommand(args)
|
||||
|
||||
// Basic network commands
|
||||
case "health":
|
||||
cli.HandleHealthCommand(format, timeout)
|
||||
case "peers":
|
||||
cli.HandlePeersCommand(format, timeout)
|
||||
case "status":
|
||||
cli.HandleStatusCommand(format, timeout)
|
||||
case "peer-id":
|
||||
cli.HandlePeerIDCommand(format, timeout)
|
||||
|
||||
// Query command
|
||||
case "query":
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli query <sql>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
cli.HandleQueryCommand(args[0], format, timeout)
|
||||
|
||||
// PubSub commands
|
||||
case "pubsub":
|
||||
cli.HandlePubSubCommand(args, format, timeout)
|
||||
|
||||
// Connect command
|
||||
case "connect":
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli connect <peer_address>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
cli.HandleConnectCommand(args[0], timeout)
|
||||
|
||||
// Help
|
||||
case "help", "--help", "-h":
|
||||
showHelp()
|
||||
@ -135,68 +105,47 @@ func parseGlobalFlags(args []string) {
|
||||
}
|
||||
|
||||
func showHelp() {
|
||||
fmt.Printf("Network CLI - Distributed P2P Network Management Tool\n\n")
|
||||
fmt.Printf("Usage: network-cli <command> [args...]\n\n")
|
||||
|
||||
fmt.Printf("🌍 Environment Management:\n")
|
||||
fmt.Printf(" env list - List available environments\n")
|
||||
fmt.Printf(" env current - Show current environment\n")
|
||||
fmt.Printf(" env switch <env> - Switch to environment (local, devnet, testnet)\n")
|
||||
fmt.Printf(" devnet enable - Shorthand for switching to devnet\n")
|
||||
fmt.Printf(" testnet enable - Shorthand for switching to testnet\n\n")
|
||||
fmt.Printf("Orama CLI - Distributed P2P Network Management Tool\n\n")
|
||||
fmt.Printf("Usage: orama <command> [args...]\n\n")
|
||||
|
||||
fmt.Printf("💻 Local Development:\n")
|
||||
fmt.Printf(" dev up - Start full local dev environment\n")
|
||||
fmt.Printf(" dev down - Stop all dev services\n")
|
||||
fmt.Printf(" dev status - Show status of dev services\n")
|
||||
fmt.Printf(" dev logs <component> - View dev component logs\n\n")
|
||||
fmt.Printf(" dev logs <component> - View dev component logs\n")
|
||||
fmt.Printf(" dev help - Show dev command help\n\n")
|
||||
|
||||
fmt.Printf("🚀 Production Deployment:\n")
|
||||
fmt.Printf(" prod install [--bootstrap] - Full production bootstrap (requires root)\n")
|
||||
fmt.Printf(" prod upgrade - Upgrade existing installation\n")
|
||||
fmt.Printf(" prod status - Show production service status\n")
|
||||
fmt.Printf(" prod logs <service> - View production service logs\n")
|
||||
fmt.Printf(" prod uninstall - Remove production services (preserves data)\n\n")
|
||||
fmt.Printf(" install - Install production node (requires root/sudo)\n")
|
||||
fmt.Printf(" upgrade - Upgrade existing installation\n")
|
||||
fmt.Printf(" status - Show production service status\n")
|
||||
fmt.Printf(" start - Start all production services (requires root/sudo)\n")
|
||||
fmt.Printf(" stop - Stop all production services (requires root/sudo)\n")
|
||||
fmt.Printf(" restart - Restart all production services (requires root/sudo)\n")
|
||||
fmt.Printf(" logs <service> - View production service logs\n")
|
||||
fmt.Printf(" uninstall - Remove production services (requires root/sudo)\n\n")
|
||||
|
||||
fmt.Printf("🔐 Authentication:\n")
|
||||
fmt.Printf(" auth login - Authenticate with wallet\n")
|
||||
fmt.Printf(" auth logout - Clear stored credentials\n")
|
||||
fmt.Printf(" auth whoami - Show current authentication\n")
|
||||
fmt.Printf(" auth status - Show detailed auth info\n\n")
|
||||
|
||||
fmt.Printf("🌐 Network Commands:\n")
|
||||
fmt.Printf(" health - Check network health\n")
|
||||
fmt.Printf(" peers - List connected peers\n")
|
||||
fmt.Printf(" status - Show network status\n")
|
||||
fmt.Printf(" peer-id - Show this node's peer ID\n")
|
||||
fmt.Printf(" connect <peer_address> - Connect to peer\n\n")
|
||||
|
||||
fmt.Printf("🗄️ Database:\n")
|
||||
fmt.Printf(" query <sql> 🔐 Execute database query\n\n")
|
||||
|
||||
fmt.Printf("📡 PubSub:\n")
|
||||
fmt.Printf(" pubsub publish <topic> <msg> 🔐 Publish message\n")
|
||||
fmt.Printf(" pubsub subscribe <topic> 🔐 Subscribe to topic\n")
|
||||
fmt.Printf(" pubsub topics 🔐 List topics\n\n")
|
||||
fmt.Printf(" auth status - Show detailed auth info\n")
|
||||
fmt.Printf(" auth help - Show auth command help\n\n")
|
||||
|
||||
fmt.Printf("Global Flags:\n")
|
||||
fmt.Printf(" -f, --format <format> - Output format: table, json (default: table)\n")
|
||||
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n\n")
|
||||
|
||||
fmt.Printf("🔐 = Requires authentication (auto-prompts if needed)\n\n")
|
||||
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n")
|
||||
fmt.Printf(" --help, -h - Show this help message\n\n")
|
||||
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" # Switch to devnet\n")
|
||||
fmt.Printf(" network-cli devnet enable\n\n")
|
||||
fmt.Printf(" # First node (creates new cluster)\n")
|
||||
fmt.Printf(" sudo orama install --vps-ip 203.0.113.1 --domain node-1.orama.network\n\n")
|
||||
|
||||
fmt.Printf(" # Authenticate and query\n")
|
||||
fmt.Printf(" network-cli auth login\n")
|
||||
fmt.Printf(" network-cli query \"SELECT * FROM users LIMIT 10\"\n\n")
|
||||
fmt.Printf(" # Join existing cluster\n")
|
||||
fmt.Printf(" sudo orama install --vps-ip 203.0.113.2 --domain node-2.orama.network \\\n")
|
||||
fmt.Printf(" --peers /ip4/203.0.113.1/tcp/4001/p2p/12D3KooW... --cluster-secret <hex>\n\n")
|
||||
|
||||
fmt.Printf(" # Setup VPS (Linux only)\n")
|
||||
fmt.Printf(" sudo network-cli setup\n\n")
|
||||
|
||||
fmt.Printf(" # Manage services\n")
|
||||
fmt.Printf(" sudo network-cli service status all\n")
|
||||
fmt.Printf(" sudo network-cli service logs node --follow\n")
|
||||
fmt.Printf(" # Service management\n")
|
||||
fmt.Printf(" orama status\n")
|
||||
fmt.Printf(" orama logs node --follow\n")
|
||||
}
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -39,14 +40,36 @@ func getEnvBoolDefault(key string, def bool) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// parseGatewayConfig loads gateway.yaml from ~/.debros exclusively.
|
||||
// parseGatewayConfig loads gateway.yaml from ~/.orama exclusively.
|
||||
// It accepts an optional --config flag for absolute paths (used by systemd services).
|
||||
func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||
// Parse --config flag (optional, for systemd services that pass absolute paths)
|
||||
configFlag := flag.String("config", "", "Config file path (absolute path or filename in ~/.orama)")
|
||||
flag.Parse()
|
||||
|
||||
// Determine config path
|
||||
configPath, err := config.DefaultPath("gateway.yaml")
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||
os.Exit(1)
|
||||
var configPath string
|
||||
var err error
|
||||
if *configFlag != "" {
|
||||
// If --config flag is provided, use it (handles both absolute and relative paths)
|
||||
if filepath.IsAbs(*configFlag) {
|
||||
configPath = *configFlag
|
||||
} else {
|
||||
configPath, err = config.DefaultPath(*configFlag)
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Default behavior: look for gateway.yaml in ~/.orama/data/, ~/.orama/configs/, or ~/.orama/
|
||||
configPath, err = config.DefaultPath("gateway.yaml")
|
||||
if err != nil {
|
||||
logger.ComponentError(logging.ComponentGeneral, "Failed to determine config path", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Load YAML
|
||||
@ -54,7 +77,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||
ListenAddr string `yaml:"listen_addr"`
|
||||
ClientNamespace string `yaml:"client_namespace"`
|
||||
RQLiteDSN string `yaml:"rqlite_dsn"`
|
||||
BootstrapPeers []string `yaml:"bootstrap_peers"`
|
||||
Peers []string `yaml:"bootstrap_peers"`
|
||||
EnableHTTPS bool `yaml:"enable_https"`
|
||||
DomainName string `yaml:"domain_name"`
|
||||
TLSCacheDir string `yaml:"tls_cache_dir"`
|
||||
@ -72,7 +95,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||
zap.String("path", configPath),
|
||||
zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
|
||||
fmt.Fprintf(os.Stderr, "Generate it using: network-cli config init --type gateway\n")
|
||||
fmt.Fprintf(os.Stderr, "Generate it using: dbn config init --type gateway\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -110,16 +133,16 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||
if v := strings.TrimSpace(y.RQLiteDSN); v != "" {
|
||||
cfg.RQLiteDSN = v
|
||||
}
|
||||
if len(y.BootstrapPeers) > 0 {
|
||||
var bp []string
|
||||
for _, p := range y.BootstrapPeers {
|
||||
if len(y.Peers) > 0 {
|
||||
var peers []string
|
||||
for _, p := range y.Peers {
|
||||
p = strings.TrimSpace(p)
|
||||
if p != "" {
|
||||
bp = append(bp, p)
|
||||
peers = append(peers, p)
|
||||
}
|
||||
}
|
||||
if len(bp) > 0 {
|
||||
cfg.BootstrapPeers = bp
|
||||
if len(peers) > 0 {
|
||||
cfg.BootstrapPeers = peers
|
||||
}
|
||||
}
|
||||
|
||||
@ -134,7 +157,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||
// Default TLS cache directory if HTTPS is enabled but not specified
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err == nil {
|
||||
cfg.TLSCacheDir = filepath.Join(homeDir, ".debros", "tls-cache")
|
||||
cfg.TLSCacheDir = filepath.Join(homeDir, ".orama", "tls-cache")
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,7 +205,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
|
||||
zap.String("path", configPath),
|
||||
zap.String("addr", cfg.ListenAddr),
|
||||
zap.String("namespace", cfg.ClientNamespace),
|
||||
zap.Int("bootstrap_peer_count", len(cfg.BootstrapPeers)),
|
||||
zap.Int("peer_count", len(cfg.BootstrapPeers)),
|
||||
)
|
||||
|
||||
return cfg
|
||||
|
||||
100
cmd/node/main.go
100
cmd/node/main.go
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
@ -32,7 +33,7 @@ func setup_logger(component logging.Component) (logger *logging.ColoredLogger) {
|
||||
|
||||
// parse_flags parses command-line flags and returns them.
|
||||
func parse_flags() (configName *string, help *bool) {
|
||||
configName = flag.String("config", "node.yaml", "Config filename in ~/.debros (default: node.yaml)")
|
||||
configName = flag.String("config", "node.yaml", "Config filename in ~/.orama (default: node.yaml)")
|
||||
help = flag.Bool("help", false, "Show help")
|
||||
flag.Parse()
|
||||
|
||||
@ -62,27 +63,36 @@ func check_if_should_open_help(help *bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// select_data_dir validates that we can load the config from ~/.debros
|
||||
// select_data_dir validates that we can load the config from ~/.orama
|
||||
func select_data_dir_check(configName *string) {
|
||||
logger := setup_logger(logging.ComponentNode)
|
||||
|
||||
// Ensure config directory exists and is writable
|
||||
_, err := config.EnsureConfigDir()
|
||||
if err != nil {
|
||||
logger.Error("Failed to ensure config directory", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
|
||||
fmt.Fprintf(os.Stderr, "Failed to create/access config directory: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "\nPlease ensure:\n")
|
||||
fmt.Fprintf(os.Stderr, " 1. Home directory is accessible: %s\n", os.ExpandEnv("~"))
|
||||
fmt.Fprintf(os.Stderr, " 2. You have write permissions to home directory\n")
|
||||
fmt.Fprintf(os.Stderr, " 3. Disk space is available\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
var configPath string
|
||||
var err error
|
||||
|
||||
configPath, err := config.DefaultPath(*configName)
|
||||
if err != nil {
|
||||
logger.Error("Failed to determine config path", zap.Error(err))
|
||||
os.Exit(1)
|
||||
// Check if configName is an absolute path
|
||||
if filepath.IsAbs(*configName) {
|
||||
// Use absolute path directly
|
||||
configPath = *configName
|
||||
} else {
|
||||
// Ensure config directory exists and is writable
|
||||
_, err = config.EnsureConfigDir()
|
||||
if err != nil {
|
||||
logger.Error("Failed to ensure config directory", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
|
||||
fmt.Fprintf(os.Stderr, "Failed to create/access config directory: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "\nPlease ensure:\n")
|
||||
fmt.Fprintf(os.Stderr, " 1. Home directory is accessible: %s\n", os.ExpandEnv("~"))
|
||||
fmt.Fprintf(os.Stderr, " 2. You have write permissions to home directory\n")
|
||||
fmt.Fprintf(os.Stderr, " 3. Disk space is available\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configPath, err = config.DefaultPath(*configName)
|
||||
if err != nil {
|
||||
logger.Error("Failed to determine config path", zap.Error(err))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(configPath); err != nil {
|
||||
@ -92,8 +102,8 @@ func select_data_dir_check(configName *string) {
|
||||
fmt.Fprintf(os.Stderr, "\n❌ Configuration Error:\n")
|
||||
fmt.Fprintf(os.Stderr, "Config file not found at %s\n", configPath)
|
||||
fmt.Fprintf(os.Stderr, "\nGenerate it with one of:\n")
|
||||
fmt.Fprintf(os.Stderr, " network-cli config init --type bootstrap\n")
|
||||
fmt.Fprintf(os.Stderr, " network-cli config init --type node --bootstrap-peers '<peer_multiaddr>'\n")
|
||||
fmt.Fprintf(os.Stderr, " orama config init --type node\n")
|
||||
fmt.Fprintf(os.Stderr, " orama config init --type node --peers '<peer_multiaddr>'\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@ -125,16 +135,35 @@ func startNode(ctx context.Context, cfg *config.Config, port int) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Save the peer ID to a file for CLI access (especially useful for bootstrap)
|
||||
// Save the peer ID to a file for CLI access
|
||||
peerID := n.GetPeerID()
|
||||
peerInfoFile := filepath.Join(dataDir, "peer.info")
|
||||
peerMultiaddr := fmt.Sprintf("/ip4/0.0.0.0/tcp/%d/p2p/%s", port, peerID)
|
||||
|
||||
// Extract advertise IP from config (prefer http_adv_address, fallback to raft_adv_address)
|
||||
advertiseIP := "0.0.0.0" // Default fallback
|
||||
if cfg.Discovery.HttpAdvAddress != "" {
|
||||
if host, _, err := net.SplitHostPort(cfg.Discovery.HttpAdvAddress); err == nil && host != "" && host != "localhost" {
|
||||
advertiseIP = host
|
||||
}
|
||||
} else if cfg.Discovery.RaftAdvAddress != "" {
|
||||
if host, _, err := net.SplitHostPort(cfg.Discovery.RaftAdvAddress); err == nil && host != "" && host != "localhost" {
|
||||
advertiseIP = host
|
||||
}
|
||||
}
|
||||
|
||||
// Determine IP protocol (IPv4 or IPv6) for multiaddr
|
||||
ipProtocol := "ip4"
|
||||
if ip := net.ParseIP(advertiseIP); ip != nil && ip.To4() == nil {
|
||||
ipProtocol = "ip6"
|
||||
}
|
||||
|
||||
peerMultiaddr := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", ipProtocol, advertiseIP, port, peerID)
|
||||
|
||||
if err := os.WriteFile(peerInfoFile, []byte(peerMultiaddr), 0644); err != nil {
|
||||
logger.Error("Failed to save peer info: %v", zap.Error(err))
|
||||
} else {
|
||||
logger.Info("Peer info saved to: %s", zap.String("path", peerInfoFile))
|
||||
logger.Info("Bootstrap multiaddr: %s", zap.String("path", peerMultiaddr))
|
||||
logger.Info("Peer multiaddr: %s", zap.String("path", peerMultiaddr))
|
||||
}
|
||||
|
||||
logger.Info("Node started successfully")
|
||||
@ -232,15 +261,24 @@ func main() {
|
||||
|
||||
check_if_should_open_help(help)
|
||||
|
||||
// Check if config file exists
|
||||
// Check if config file exists and determine path
|
||||
select_data_dir_check(configName)
|
||||
|
||||
// Load configuration from ~/.debros/node.yaml
|
||||
configPath, err := config.DefaultPath(*configName)
|
||||
if err != nil {
|
||||
logger.Error("Failed to determine config path", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||
os.Exit(1)
|
||||
// Determine config path (handle both absolute and relative paths)
|
||||
// Note: select_data_dir_check already validated the path exists, so we can safely determine it here
|
||||
var configPath string
|
||||
var err error
|
||||
if filepath.IsAbs(*configName) {
|
||||
// Absolute path passed directly (e.g., from systemd service)
|
||||
configPath = *configName
|
||||
} else {
|
||||
// Relative path - use DefaultPath which checks both ~/.orama/configs/ and ~/.orama/
|
||||
configPath, err = config.DefaultPath(*configName)
|
||||
if err != nil {
|
||||
logger.Error("Failed to determine config path", zap.Error(err))
|
||||
fmt.Fprintf(os.Stderr, "Configuration error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var cfg *config.Config
|
||||
@ -278,7 +316,7 @@ func main() {
|
||||
zap.Strings("listen_addresses", cfg.Node.ListenAddresses),
|
||||
zap.Int("rqlite_http_port", cfg.Database.RQLitePort),
|
||||
zap.Int("rqlite_raft_port", cfg.Database.RQLiteRaftPort),
|
||||
zap.Strings("bootstrap_peers", cfg.Discovery.BootstrapPeers),
|
||||
zap.Strings("peers", cfg.Discovery.BootstrapPeers),
|
||||
zap.String("rqlite_join_address", cfg.Database.RQLiteJoinAddress),
|
||||
zap.String("data_directory", cfg.Node.DataDir))
|
||||
|
||||
|
||||
19
debian/control
vendored
Normal file
19
debian/control
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Package: orama
|
||||
Version: 0.69.20
|
||||
Section: net
|
||||
Priority: optional
|
||||
Architecture: amd64
|
||||
Depends: libc6
|
||||
Maintainer: DeBros Team <dev@debros.io>
|
||||
Description: Orama Network - Distributed P2P Database System
|
||||
Orama is a distributed peer-to-peer network that combines
|
||||
RQLite for distributed SQL, IPFS for content-addressed storage,
|
||||
and LibP2P for peer discovery and communication.
|
||||
.
|
||||
Features:
|
||||
- Distributed SQLite database with Raft consensus
|
||||
- IPFS-based file storage with encryption
|
||||
- LibP2P peer-to-peer networking
|
||||
- Olric distributed cache
|
||||
- Unified HTTP/HTTPS gateway
|
||||
|
||||
18
debian/postinst
vendored
Normal file
18
debian/postinst
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Post-installation script for orama package
|
||||
|
||||
echo "Orama installed successfully!"
|
||||
echo ""
|
||||
echo "To set up your node, run:"
|
||||
echo " sudo orama install"
|
||||
echo ""
|
||||
echo "This will launch the interactive installer."
|
||||
echo ""
|
||||
echo "For command-line installation:"
|
||||
echo " sudo orama install --vps-ip <your-ip> --domain <your-domain>"
|
||||
echo ""
|
||||
echo "For help:"
|
||||
echo " orama --help"
|
||||
|
||||
294
e2e/auth_negative_test.go
Normal file
294
e2e/auth_negative_test.go
Normal file
@ -0,0 +1,294 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func TestAuth_MissingAPIKey(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Request without auth headers
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/network/status", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
client := NewHTTPClient(30 * time.Second)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Should be unauthorized
|
||||
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
|
||||
t.Logf("warning: expected 401/403 for missing auth, got %d (auth may not be enforced on this endpoint)", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_InvalidAPIKey(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Request with invalid API key
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer invalid-key-xyz")
|
||||
|
||||
client := NewHTTPClient(30 * time.Second)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Should be unauthorized
|
||||
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
|
||||
t.Logf("warning: expected 401/403 for invalid key, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_CacheWithoutAuth(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Request cache endpoint without auth
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/cache/health",
|
||||
SkipAuth: true,
|
||||
}
|
||||
|
||||
_, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
|
||||
// Should fail with 401 or 403
|
||||
if status != http.StatusUnauthorized && status != http.StatusForbidden {
|
||||
t.Logf("warning: expected 401/403 for cache without auth, got %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_StorageWithoutAuth(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Request storage endpoint without auth
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/storage/status/QmTest",
|
||||
SkipAuth: true,
|
||||
}
|
||||
|
||||
_, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
|
||||
// Should fail with 401 or 403
|
||||
if status != http.StatusUnauthorized && status != http.StatusForbidden {
|
||||
t.Logf("warning: expected 401/403 for storage without auth, got %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_RQLiteWithoutAuth(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Request rqlite endpoint without auth
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/schema",
|
||||
SkipAuth: true,
|
||||
}
|
||||
|
||||
_, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
|
||||
// Should fail with 401 or 403
|
||||
if status != http.StatusUnauthorized && status != http.StatusForbidden {
|
||||
t.Logf("warning: expected 401/403 for rqlite without auth, got %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_MalformedBearerToken(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Request with malformed bearer token
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
// Missing "Bearer " prefix
|
||||
req.Header.Set("Authorization", "invalid-token-format")
|
||||
|
||||
client := NewHTTPClient(30 * time.Second)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Should be unauthorized
|
||||
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
|
||||
t.Logf("warning: expected 401/403 for malformed token, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_ExpiredJWT(t *testing.T) {
|
||||
// Skip if JWT is not being used
|
||||
if GetJWT() == "" && GetAPIKey() == "" {
|
||||
t.Skip("No JWT or API key configured")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// This test would require an expired JWT token
|
||||
// For now, test with a clearly invalid JWT structure
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer expired.jwt.token")
|
||||
|
||||
client := NewHTTPClient(30 * time.Second)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Should be unauthorized
|
||||
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
|
||||
t.Logf("warning: expected 401/403 for expired JWT, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_EmptyBearerToken(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Request with empty bearer token
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer ")
|
||||
|
||||
client := NewHTTPClient(30 * time.Second)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Should be unauthorized
|
||||
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
|
||||
t.Logf("warning: expected 401/403 for empty token, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_DuplicateAuthHeaders(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Request with both API key and invalid JWT
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/cache/health",
|
||||
Headers: map[string]string{
|
||||
"Authorization": "Bearer " + GetAPIKey(),
|
||||
"X-API-Key": GetAPIKey(),
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
|
||||
// Should succeed if API key is valid
|
||||
if status != http.StatusOK {
|
||||
t.Logf("request with both headers returned %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_CaseSensitiveAPIKey(t *testing.T) {
|
||||
if GetAPIKey() == "" {
|
||||
t.Skip("No API key configured")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Request with incorrectly cased API key
|
||||
apiKey := GetAPIKey()
|
||||
incorrectKey := ""
|
||||
for i, ch := range apiKey {
|
||||
if i%2 == 0 && unicode.IsLetter(ch) {
|
||||
incorrectKey += string(unicode.ToUpper(ch)) // Convert to uppercase
|
||||
} else {
|
||||
incorrectKey += string(ch)
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer "+incorrectKey)
|
||||
|
||||
client := NewHTTPClient(30 * time.Second)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// API keys should be case-sensitive
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
t.Logf("warning: API key check may not be case-sensitive (got 200)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuth_HealthEndpointNoAuth(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Health endpoint at /health should not require auth
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/health", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
client := NewHTTPClient(30 * time.Second)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Should succeed without auth
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected 200 for /health without auth, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
511
e2e/cache_http_test.go
Normal file
511
e2e/cache_http_test.go
Normal file
@ -0,0 +1,511 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCache_Health(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/cache/health",
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("health check failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if resp["status"] != "ok" {
|
||||
t.Fatalf("expected status 'ok', got %v", resp["status"])
|
||||
}
|
||||
|
||||
if resp["service"] != "olric" {
|
||||
t.Fatalf("expected service 'olric', got %v", resp["service"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_PutGet(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
key := "test-key"
|
||||
value := "test-value"
|
||||
|
||||
// Put value
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": value,
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := putReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("put failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d: %s", status, string(body))
|
||||
}
|
||||
|
||||
// Get value
|
||||
getReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/get",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err = getReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("get failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var getResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &getResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if getResp["value"] != value {
|
||||
t.Fatalf("expected value %q, got %v", value, getResp["value"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_PutGetJSON(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
key := "json-key"
|
||||
jsonValue := map[string]interface{}{
|
||||
"name": "John",
|
||||
"age": 30,
|
||||
"tags": []string{"developer", "golang"},
|
||||
}
|
||||
|
||||
// Put JSON value
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": jsonValue,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("put failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
// Get JSON value
|
||||
getReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/get",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := getReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("get failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var getResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &getResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
retrievedValue := getResp["value"].(map[string]interface{})
|
||||
if retrievedValue["name"] != jsonValue["name"] {
|
||||
t.Fatalf("expected name %q, got %v", jsonValue["name"], retrievedValue["name"])
|
||||
}
|
||||
if retrievedValue["age"] != float64(30) {
|
||||
t.Fatalf("expected age 30, got %v", retrievedValue["age"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_Delete(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
key := "delete-key"
|
||||
value := "delete-value"
|
||||
|
||||
// Put value
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": value,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Delete value
|
||||
deleteReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/delete",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = deleteReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("delete failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
// Verify deletion
|
||||
getReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/get",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = getReq.Do(ctx)
|
||||
// Should get 404 for missing key
|
||||
if status != http.StatusNotFound {
|
||||
t.Fatalf("expected status 404 for deleted key, got %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_TTL(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
key := "ttl-key"
|
||||
value := "ttl-value"
|
||||
|
||||
// Put value with TTL
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": value,
|
||||
"ttl": "2s",
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("put with TTL failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
// Verify value exists
|
||||
getReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/get",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = getReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("get immediately after put failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Wait for TTL expiry (2 seconds + buffer)
|
||||
Delay(2500)
|
||||
|
||||
// Verify value is expired
|
||||
_, status, err = getReq.Do(ctx)
|
||||
if status != http.StatusNotFound {
|
||||
t.Logf("warning: TTL expiry may not be fully implemented; got status %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_Scan(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
|
||||
// Put multiple keys
|
||||
keys := []string{"user-1", "user-2", "session-1", "session-2"}
|
||||
for _, key := range keys {
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": "value-" + key,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Scan all keys
|
||||
scanReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/scan",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := scanReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("scan failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var scanResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &scanResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
keysResp := scanResp["keys"].([]interface{})
|
||||
if len(keysResp) < 4 {
|
||||
t.Fatalf("expected at least 4 keys, got %d", len(keysResp))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_ScanWithRegex(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
|
||||
// Put keys with different patterns
|
||||
keys := []string{"user-1", "user-2", "session-1", "session-2"}
|
||||
for _, key := range keys {
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": "value-" + key,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Scan with regex pattern
|
||||
scanReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/scan",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"pattern": "^user-",
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := scanReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("scan with regex failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var scanResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &scanResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
keysResp := scanResp["keys"].([]interface{})
|
||||
if len(keysResp) < 2 {
|
||||
t.Fatalf("expected at least 2 keys matching pattern, got %d", len(keysResp))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_MultiGet(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
keys := []string{"key-1", "key-2", "key-3"}
|
||||
|
||||
// Put values
|
||||
for i, key := range keys {
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": fmt.Sprintf("value-%d", i),
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Multi-get
|
||||
multiGetReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/mget",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"keys": keys,
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := multiGetReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("mget failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var mgetResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &mgetResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
results := mgetResp["results"].([]interface{})
|
||||
if len(results) != 3 {
|
||||
t.Fatalf("expected 3 results, got %d", len(results))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_MissingDMap(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
getReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/get",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": "",
|
||||
"key": "any-key",
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := getReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusBadRequest {
|
||||
t.Fatalf("expected status 400 for missing dmap, got %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_MissingKey(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
|
||||
getReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/get",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": "non-existent-key",
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := getReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusNotFound {
|
||||
t.Fatalf("expected status 404 for missing key, got %d", status)
|
||||
}
|
||||
}
|
||||
@ -1,93 +0,0 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/client"
|
||||
)
|
||||
|
||||
func getenv(k, def string) string {
|
||||
if v := strings.TrimSpace(os.Getenv(k)); v != "" {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
func requireEnv(t *testing.T, key string) string {
|
||||
t.Helper()
|
||||
v := strings.TrimSpace(os.Getenv(key))
|
||||
if v == "" {
|
||||
t.Skipf("%s not set; skipping", key)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func TestClient_Database_CreateQueryMigrate(t *testing.T) {
|
||||
apiKey := requireEnv(t, "GATEWAY_API_KEY")
|
||||
namespace := getenv("E2E_CLIENT_NAMESPACE", "default")
|
||||
|
||||
cfg := client.DefaultClientConfig(namespace)
|
||||
cfg.APIKey = apiKey
|
||||
cfg.QuietMode = true
|
||||
|
||||
if v := strings.TrimSpace(os.Getenv("E2E_BOOTSTRAP_PEERS")); v != "" {
|
||||
parts := strings.Split(v, ",")
|
||||
var peers []string
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if p != "" {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
}
|
||||
cfg.BootstrapPeers = peers
|
||||
}
|
||||
if v := strings.TrimSpace(os.Getenv("E2E_RQLITE_NODES")); v != "" {
|
||||
nodes := strings.Fields(strings.ReplaceAll(v, ",", " "))
|
||||
cfg.DatabaseEndpoints = nodes
|
||||
}
|
||||
|
||||
c, err := client.NewClient(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("new client: %v", err)
|
||||
}
|
||||
if err := c.Connect(); err != nil {
|
||||
t.Fatalf("connect: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = c.Disconnect() })
|
||||
|
||||
// Unique table per run
|
||||
table := fmt.Sprintf("e2e_items_client_%d", time.Now().UnixNano())
|
||||
schema := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)", table)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
if err := c.Database().CreateTable(ctx, schema); err != nil {
|
||||
t.Fatalf("create table: %v", err)
|
||||
}
|
||||
// Insert via transaction
|
||||
stmts := []string{
|
||||
fmt.Sprintf("INSERT INTO %s(name) VALUES ('alpha')", table),
|
||||
fmt.Sprintf("INSERT INTO %s(name) VALUES ('beta')", table),
|
||||
}
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel2()
|
||||
if err := c.Database().Transaction(ctx2, stmts); err != nil {
|
||||
t.Fatalf("transaction: %v", err)
|
||||
}
|
||||
// Query rows
|
||||
ctx3, cancel3 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel3()
|
||||
res, err := c.Database().Query(ctx3, fmt.Sprintf("SELECT name FROM %s ORDER BY id", table))
|
||||
if err != nil {
|
||||
t.Fatalf("query: %v", err)
|
||||
}
|
||||
if res.Count < 2 {
|
||||
t.Fatalf("expected at least 2 rows, got %d", res.Count)
|
||||
}
|
||||
}
|
||||
503
e2e/concurrency_test.go
Normal file
503
e2e/concurrency_test.go
Normal file
@ -0,0 +1,503 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestCache_ConcurrentWrites tests concurrent cache writes
|
||||
func TestCache_ConcurrentWrites(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
numGoroutines := 10
|
||||
var wg sync.WaitGroup
|
||||
var errorCount int32
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
key := fmt.Sprintf("key-%d", idx)
|
||||
value := fmt.Sprintf("value-%d", idx)
|
||||
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": value,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
atomic.AddInt32(&errorCount, 1)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if errorCount > 0 {
|
||||
t.Fatalf("expected no errors, got %d", errorCount)
|
||||
}
|
||||
|
||||
// Verify all values exist
|
||||
scanReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/scan",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := scanReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("scan failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
var scanResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &scanResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
keys := scanResp["keys"].([]interface{})
|
||||
if len(keys) < numGoroutines {
|
||||
t.Fatalf("expected at least %d keys, got %d", numGoroutines, len(keys))
|
||||
}
|
||||
}
|
||||
|
||||
// TestCache_ConcurrentReads tests concurrent cache reads
|
||||
func TestCache_ConcurrentReads(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
key := "shared-key"
|
||||
value := "shared-value"
|
||||
|
||||
// Put value first
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": value,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("put failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Read concurrently
|
||||
numGoroutines := 10
|
||||
var wg sync.WaitGroup
|
||||
var errorCount int32
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
getReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/get",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := getReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
atomic.AddInt32(&errorCount, 1)
|
||||
return
|
||||
}
|
||||
|
||||
var getResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &getResp); err != nil {
|
||||
atomic.AddInt32(&errorCount, 1)
|
||||
return
|
||||
}
|
||||
|
||||
if getResp["value"] != value {
|
||||
atomic.AddInt32(&errorCount, 1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if errorCount > 0 {
|
||||
t.Fatalf("expected no errors, got %d", errorCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCache_ConcurrentDeleteAndWrite tests concurrent delete and write
|
||||
func TestCache_ConcurrentDeleteAndWrite(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
var wg sync.WaitGroup
|
||||
var errorCount int32
|
||||
|
||||
numWrites := 5
|
||||
numDeletes := 3
|
||||
|
||||
// Write keys
|
||||
for i := 0; i < numWrites; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
key := fmt.Sprintf("key-%d", idx)
|
||||
value := fmt.Sprintf("value-%d", idx)
|
||||
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": value,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
atomic.AddInt32(&errorCount, 1)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Delete some keys
|
||||
for i := 0; i < numDeletes; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
key := fmt.Sprintf("key-%d", idx)
|
||||
|
||||
deleteReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/delete",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := deleteReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
atomic.AddInt32(&errorCount, 1)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if errorCount > 0 {
|
||||
t.Fatalf("expected no errors, got %d", errorCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRQLite_ConcurrentInserts tests concurrent database inserts
|
||||
func TestRQLite_ConcurrentInserts(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
table := GenerateTableName()
|
||||
schema := fmt.Sprintf(
|
||||
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)",
|
||||
table,
|
||||
)
|
||||
|
||||
// Create table
|
||||
createReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/create-table",
|
||||
Body: map[string]interface{}{
|
||||
"schema": schema,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := createReq.Do(ctx)
|
||||
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Insert concurrently
|
||||
numInserts := 10
|
||||
var wg sync.WaitGroup
|
||||
var errorCount int32
|
||||
|
||||
for i := 0; i < numInserts; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
txReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/transaction",
|
||||
Body: map[string]interface{}{
|
||||
"statements": []string{
|
||||
fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, idx),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := txReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
atomic.AddInt32(&errorCount, 1)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if errorCount > 0 {
|
||||
t.Logf("warning: %d concurrent inserts failed", errorCount)
|
||||
}
|
||||
|
||||
// Verify count
|
||||
queryReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/query",
|
||||
Body: map[string]interface{}{
|
||||
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := queryReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("count query failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
var countResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &countResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 {
|
||||
row := rows[0].([]interface{})
|
||||
count := int(row[0].(float64))
|
||||
if count < numInserts {
|
||||
t.Logf("warning: expected %d inserts, got %d", numInserts, count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestRQLite_LargeBatchTransaction tests a large transaction with many statements
|
||||
func TestRQLite_LargeBatchTransaction(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
table := GenerateTableName()
|
||||
schema := fmt.Sprintf(
|
||||
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value TEXT)",
|
||||
table,
|
||||
)
|
||||
|
||||
// Create table
|
||||
createReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/create-table",
|
||||
Body: map[string]interface{}{
|
||||
"schema": schema,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := createReq.Do(ctx)
|
||||
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Create large batch (100 statements)
|
||||
var ops []map[string]interface{}
|
||||
for i := 0; i < 100; i++ {
|
||||
ops = append(ops, map[string]interface{}{
|
||||
"kind": "exec",
|
||||
"sql": fmt.Sprintf("INSERT INTO %s(value) VALUES ('value-%d')", table, i),
|
||||
})
|
||||
}
|
||||
|
||||
txReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/transaction",
|
||||
Body: map[string]interface{}{
|
||||
"ops": ops,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = txReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("large batch transaction failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Verify count
|
||||
queryReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/query",
|
||||
Body: map[string]interface{}{
|
||||
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := queryReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("count query failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
var countResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &countResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 {
|
||||
row := rows[0].([]interface{})
|
||||
if int(row[0].(float64)) != 100 {
|
||||
t.Fatalf("expected 100 rows, got %v", row[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCache_TTLExpiryWithSleep tests TTL expiry with a controlled sleep
|
||||
func TestCache_TTLExpiryWithSleep(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
key := "ttl-expiry-key"
|
||||
value := "ttl-expiry-value"
|
||||
|
||||
// Put value with 2 second TTL
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": value,
|
||||
"ttl": "2s",
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("put with TTL failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Verify exists immediately
|
||||
getReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/get",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = getReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("get immediately after put failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Sleep for TTL duration + buffer
|
||||
Delay(2500)
|
||||
|
||||
// Try to get after TTL expires
|
||||
_, status, err = getReq.Do(ctx)
|
||||
if status == http.StatusOK {
|
||||
t.Logf("warning: TTL expiry may not be fully implemented; key still exists after TTL")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCache_ConcurrentWriteAndDelete tests concurrent writes and deletes on same key
|
||||
func TestCache_ConcurrentWriteAndDelete(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dmap := GenerateDMapName()
|
||||
key := "contested-key"
|
||||
|
||||
// Alternate between writes and deletes
|
||||
numIterations := 5
|
||||
for i := 0; i < numIterations; i++ {
|
||||
// Write
|
||||
putReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/put",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
"value": fmt.Sprintf("value-%d", i),
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := putReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("put failed at iteration %d: status %d, err %v", i, status, err)
|
||||
}
|
||||
|
||||
// Read
|
||||
getReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/get",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = getReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("get failed at iteration %d: status %d, err %v", i, status, err)
|
||||
}
|
||||
|
||||
// Delete
|
||||
deleteReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/cache/delete",
|
||||
Body: map[string]interface{}{
|
||||
"dmap": dmap,
|
||||
"key": key,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = deleteReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Logf("warning: delete at iteration %d failed: status %d, err %v", i, status, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
646
e2e/env.go
Normal file
646
e2e/env.go
Normal file
@ -0,0 +1,646 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/client"
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"go.uber.org/zap"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
gatewayURLCache string
|
||||
apiKeyCache string
|
||||
bootstrapCache []string
|
||||
rqliteCache []string
|
||||
ipfsClusterCache string
|
||||
ipfsAPICache string
|
||||
cacheMutex sync.RWMutex
|
||||
)
|
||||
|
||||
// loadGatewayConfig loads gateway configuration from ~/.orama/gateway.yaml
|
||||
func loadGatewayConfig() (map[string]interface{}, error) {
|
||||
configPath, err := config.DefaultPath("gateway.yaml")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get gateway config path: %w", err)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read gateway config: %w", err)
|
||||
}
|
||||
|
||||
var cfg map[string]interface{}
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse gateway config: %w", err)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// loadNodeConfig loads node configuration from ~/.orama/node-*.yaml
|
||||
func loadNodeConfig(filename string) (map[string]interface{}, error) {
|
||||
configPath, err := config.DefaultPath(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get config path: %w", err)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config: %w", err)
|
||||
}
|
||||
|
||||
var cfg map[string]interface{}
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// GetGatewayURL returns the gateway base URL from config
|
||||
func GetGatewayURL() string {
|
||||
cacheMutex.RLock()
|
||||
if gatewayURLCache != "" {
|
||||
defer cacheMutex.RUnlock()
|
||||
return gatewayURLCache
|
||||
}
|
||||
cacheMutex.RUnlock()
|
||||
|
||||
// Try to load from gateway config
|
||||
gwCfg, err := loadGatewayConfig()
|
||||
if err == nil {
|
||||
if server, ok := gwCfg["server"].(map[interface{}]interface{}); ok {
|
||||
if port, ok := server["port"].(int); ok {
|
||||
url := fmt.Sprintf("http://localhost:%d", port)
|
||||
cacheMutex.Lock()
|
||||
gatewayURLCache = url
|
||||
cacheMutex.Unlock()
|
||||
return url
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default fallback
|
||||
return "http://localhost:6001"
|
||||
}
|
||||
|
||||
// GetRQLiteNodes returns rqlite endpoint addresses from config
|
||||
func GetRQLiteNodes() []string {
|
||||
cacheMutex.RLock()
|
||||
if len(rqliteCache) > 0 {
|
||||
defer cacheMutex.RUnlock()
|
||||
return rqliteCache
|
||||
}
|
||||
cacheMutex.RUnlock()
|
||||
|
||||
// Try all node config files
|
||||
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
|
||||
nodeCfg, err := loadNodeConfig(cfgFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok {
|
||||
if rqlitePort, ok := db["rqlite_port"].(int); ok {
|
||||
nodes := []string{fmt.Sprintf("http://localhost:%d", rqlitePort)}
|
||||
cacheMutex.Lock()
|
||||
rqliteCache = nodes
|
||||
cacheMutex.Unlock()
|
||||
return nodes
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default fallback
|
||||
return []string{"http://localhost:5001"}
|
||||
}
|
||||
|
||||
// queryAPIKeyFromRQLite queries the SQLite database directly for an API key
|
||||
func queryAPIKeyFromRQLite() (string, error) {
|
||||
// Build database path from bootstrap/node config
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get home directory: %w", err)
|
||||
}
|
||||
|
||||
// Try all node data directories
|
||||
dbPaths := []string{
|
||||
filepath.Join(homeDir, ".orama", "data", "node-1", "rqlite", "db.sqlite"),
|
||||
filepath.Join(homeDir, ".orama", "data", "node-2", "rqlite", "db.sqlite"),
|
||||
filepath.Join(homeDir, ".orama", "data", "node-3", "rqlite", "db.sqlite"),
|
||||
filepath.Join(homeDir, ".orama", "data", "node-4", "rqlite", "db.sqlite"),
|
||||
filepath.Join(homeDir, ".orama", "data", "node-5", "rqlite", "db.sqlite"),
|
||||
}
|
||||
|
||||
for _, dbPath := range dbPaths {
|
||||
// Check if database file exists
|
||||
if _, err := os.Stat(dbPath); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Open SQLite database
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Set timeout for connection
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Query the api_keys table
|
||||
row := db.QueryRowContext(ctx, "SELECT key FROM api_keys ORDER BY id LIMIT 1")
|
||||
var apiKey string
|
||||
if err := row.Scan(&apiKey); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
continue // Try next database
|
||||
}
|
||||
continue // Skip this database on error
|
||||
}
|
||||
|
||||
if apiKey != "" {
|
||||
return apiKey, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to retrieve API key from any SQLite database")
|
||||
}
|
||||
|
||||
// GetAPIKey returns the gateway API key from rqlite or cache
|
||||
func GetAPIKey() string {
|
||||
cacheMutex.RLock()
|
||||
if apiKeyCache != "" {
|
||||
defer cacheMutex.RUnlock()
|
||||
return apiKeyCache
|
||||
}
|
||||
cacheMutex.RUnlock()
|
||||
|
||||
// Query rqlite for API key
|
||||
apiKey, err := queryAPIKeyFromRQLite()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
cacheMutex.Lock()
|
||||
apiKeyCache = apiKey
|
||||
cacheMutex.Unlock()
|
||||
|
||||
return apiKey
|
||||
}
|
||||
|
||||
// GetJWT returns the gateway JWT token (currently not auto-discovered)
|
||||
func GetJWT() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetBootstrapPeers returns bootstrap peer addresses from config
|
||||
func GetBootstrapPeers() []string {
|
||||
cacheMutex.RLock()
|
||||
if len(bootstrapCache) > 0 {
|
||||
defer cacheMutex.RUnlock()
|
||||
return bootstrapCache
|
||||
}
|
||||
cacheMutex.RUnlock()
|
||||
|
||||
configFiles := []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"}
|
||||
seen := make(map[string]struct{})
|
||||
var peers []string
|
||||
|
||||
for _, cfgFile := range configFiles {
|
||||
nodeCfg, err := loadNodeConfig(cfgFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
discovery, ok := nodeCfg["discovery"].(map[interface{}]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
rawPeers, ok := discovery["bootstrap_peers"].([]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, v := range rawPeers {
|
||||
peerStr, ok := v.(string)
|
||||
if !ok || peerStr == "" {
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[peerStr]; exists {
|
||||
continue
|
||||
}
|
||||
seen[peerStr] = struct{}{}
|
||||
peers = append(peers, peerStr)
|
||||
}
|
||||
}
|
||||
|
||||
if len(peers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
cacheMutex.Lock()
|
||||
bootstrapCache = peers
|
||||
cacheMutex.Unlock()
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
// GetIPFSClusterURL returns the IPFS cluster API URL from config
|
||||
func GetIPFSClusterURL() string {
|
||||
cacheMutex.RLock()
|
||||
if ipfsClusterCache != "" {
|
||||
defer cacheMutex.RUnlock()
|
||||
return ipfsClusterCache
|
||||
}
|
||||
cacheMutex.RUnlock()
|
||||
|
||||
// Try to load from node config
|
||||
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
|
||||
nodeCfg, err := loadNodeConfig(cfgFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok {
|
||||
if ipfs, ok := db["ipfs"].(map[interface{}]interface{}); ok {
|
||||
if url, ok := ipfs["cluster_api_url"].(string); ok && url != "" {
|
||||
cacheMutex.Lock()
|
||||
ipfsClusterCache = url
|
||||
cacheMutex.Unlock()
|
||||
return url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default fallback
|
||||
return "http://localhost:9094"
|
||||
}
|
||||
|
||||
// GetIPFSAPIURL returns the IPFS API URL from config
|
||||
func GetIPFSAPIURL() string {
|
||||
cacheMutex.RLock()
|
||||
if ipfsAPICache != "" {
|
||||
defer cacheMutex.RUnlock()
|
||||
return ipfsAPICache
|
||||
}
|
||||
cacheMutex.RUnlock()
|
||||
|
||||
// Try to load from node config
|
||||
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
|
||||
nodeCfg, err := loadNodeConfig(cfgFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok {
|
||||
if ipfs, ok := db["ipfs"].(map[interface{}]interface{}); ok {
|
||||
if url, ok := ipfs["api_url"].(string); ok && url != "" {
|
||||
cacheMutex.Lock()
|
||||
ipfsAPICache = url
|
||||
cacheMutex.Unlock()
|
||||
return url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default fallback
|
||||
return "http://localhost:5001"
|
||||
}
|
||||
|
||||
// GetClientNamespace returns the test client namespace from config
|
||||
func GetClientNamespace() string {
|
||||
// Try to load from node config
|
||||
for _, cfgFile := range []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"} {
|
||||
nodeCfg, err := loadNodeConfig(cfgFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if discovery, ok := nodeCfg["discovery"].(map[interface{}]interface{}); ok {
|
||||
if ns, ok := discovery["node_namespace"].(string); ok && ns != "" {
|
||||
return ns
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "default"
|
||||
}
|
||||
|
||||
// SkipIfMissingGateway skips the test if gateway is not accessible or API key not available
|
||||
func SkipIfMissingGateway(t *testing.T) {
|
||||
t.Helper()
|
||||
apiKey := GetAPIKey()
|
||||
if apiKey == "" {
|
||||
t.Skip("API key not available from rqlite; gateway tests skipped")
|
||||
}
|
||||
|
||||
// Verify gateway is accessible
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/health", nil)
|
||||
if err != nil {
|
||||
t.Skip("Gateway not accessible; tests skipped")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Skip("Gateway not accessible; tests skipped")
|
||||
return
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
// IsGatewayReady checks if the gateway is accessible and healthy
|
||||
func IsGatewayReady(ctx context.Context) bool {
|
||||
gatewayURL := GetGatewayURL()
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, gatewayURL+"/v1/health", nil)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.StatusCode == http.StatusOK
|
||||
}
|
||||
|
||||
// NewHTTPClient creates an authenticated HTTP client for gateway requests
|
||||
func NewHTTPClient(timeout time.Duration) *http.Client {
|
||||
if timeout == 0 {
|
||||
timeout = 30 * time.Second
|
||||
}
|
||||
return &http.Client{Timeout: timeout}
|
||||
}
|
||||
|
||||
// HTTPRequest is a helper for making authenticated HTTP requests
|
||||
type HTTPRequest struct {
|
||||
Method string
|
||||
URL string
|
||||
Body interface{}
|
||||
Headers map[string]string
|
||||
Timeout time.Duration
|
||||
SkipAuth bool
|
||||
}
|
||||
|
||||
// Do executes an HTTP request and returns the response body
|
||||
func (hr *HTTPRequest) Do(ctx context.Context) ([]byte, int, error) {
|
||||
if hr.Timeout == 0 {
|
||||
hr.Timeout = 30 * time.Second
|
||||
}
|
||||
|
||||
var reqBody io.Reader
|
||||
if hr.Body != nil {
|
||||
data, err := json.Marshal(hr.Body)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to marshal request body: %w", err)
|
||||
}
|
||||
reqBody = bytes.NewReader(data)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, hr.Method, hr.URL, reqBody)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
// Add headers
|
||||
if hr.Headers != nil {
|
||||
for k, v := range hr.Headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Add JSON content type if body is present
|
||||
if hr.Body != nil && req.Header.Get("Content-Type") == "" {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
// Add auth headers
|
||||
if !hr.SkipAuth {
|
||||
if apiKey := GetAPIKey(); apiKey != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
req.Header.Set("X-API-Key", apiKey)
|
||||
}
|
||||
}
|
||||
|
||||
client := NewHTTPClient(hr.Timeout)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, resp.StatusCode, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
return respBody, resp.StatusCode, nil
|
||||
}
|
||||
|
||||
// DecodeJSON unmarshals response body into v
|
||||
func DecodeJSON(data []byte, v interface{}) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// NewNetworkClient creates a network client configured for e2e tests
|
||||
func NewNetworkClient(t *testing.T) client.NetworkClient {
|
||||
t.Helper()
|
||||
|
||||
namespace := GetClientNamespace()
|
||||
cfg := client.DefaultClientConfig(namespace)
|
||||
cfg.APIKey = GetAPIKey()
|
||||
cfg.QuietMode = true // Suppress debug logs in tests
|
||||
|
||||
if jwt := GetJWT(); jwt != "" {
|
||||
cfg.JWT = jwt
|
||||
}
|
||||
|
||||
if peers := GetBootstrapPeers(); len(peers) > 0 {
|
||||
cfg.BootstrapPeers = peers
|
||||
}
|
||||
|
||||
if nodes := GetRQLiteNodes(); len(nodes) > 0 {
|
||||
cfg.DatabaseEndpoints = nodes
|
||||
}
|
||||
|
||||
c, err := client.NewClient(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create network client: %v", err)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// GenerateUniqueID generates a unique identifier for test resources
|
||||
func GenerateUniqueID(prefix string) string {
|
||||
return fmt.Sprintf("%s_%d_%d", prefix, time.Now().UnixNano(), rand.Intn(10000))
|
||||
}
|
||||
|
||||
// GenerateTableName generates a unique table name for database tests
|
||||
func GenerateTableName() string {
|
||||
return GenerateUniqueID("e2e_test")
|
||||
}
|
||||
|
||||
// GenerateDMapName generates a unique dmap name for cache tests
|
||||
func GenerateDMapName() string {
|
||||
return GenerateUniqueID("test_dmap")
|
||||
}
|
||||
|
||||
// GenerateTopic generates a unique topic name for pubsub tests
|
||||
func GenerateTopic() string {
|
||||
return GenerateUniqueID("e2e_topic")
|
||||
}
|
||||
|
||||
// Delay pauses execution for the specified duration
|
||||
func Delay(ms int) {
|
||||
time.Sleep(time.Duration(ms) * time.Millisecond)
|
||||
}
|
||||
|
||||
// WaitForCondition waits for a condition with exponential backoff
|
||||
func WaitForCondition(maxWait time.Duration, check func() bool) error {
|
||||
deadline := time.Now().Add(maxWait)
|
||||
backoff := 100 * time.Millisecond
|
||||
|
||||
for {
|
||||
if check() {
|
||||
return nil
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
return fmt.Errorf("condition not met within %v", maxWait)
|
||||
}
|
||||
time.Sleep(backoff)
|
||||
if backoff < 2*time.Second {
|
||||
backoff = backoff * 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewTestLogger creates a test logger for debugging
|
||||
func NewTestLogger(t *testing.T) *zap.Logger {
|
||||
t.Helper()
|
||||
config := zap.NewDevelopmentConfig()
|
||||
config.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
|
||||
logger, err := config.Build()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create logger: %v", err)
|
||||
}
|
||||
return logger
|
||||
}
|
||||
|
||||
// CleanupDatabaseTable drops a table from the database after tests
|
||||
func CleanupDatabaseTable(t *testing.T, tableName string) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Query rqlite to drop the table
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
t.Logf("warning: failed to get home directory for cleanup: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(homeDir, ".orama", "data", "node-1", "rqlite", "db.sqlite")
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
t.Logf("warning: failed to open database for cleanup: %v", err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
dropSQL := fmt.Sprintf("DROP TABLE IF EXISTS %s", tableName)
|
||||
if _, err := db.ExecContext(ctx, dropSQL); err != nil {
|
||||
t.Logf("warning: failed to drop table %s: %v", tableName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupDMapCache deletes a dmap from the cache after tests
|
||||
func CleanupDMapCache(t *testing.T, dmapName string) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodDelete,
|
||||
URL: GetGatewayURL() + "/v1/cache/dmap/" + dmapName,
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
_, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Logf("warning: failed to delete dmap %s: %v", dmapName, err)
|
||||
return
|
||||
}
|
||||
|
||||
if status != http.StatusOK && status != http.StatusNoContent && status != http.StatusNotFound {
|
||||
t.Logf("warning: delete dmap returned status %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupIPFSFile unpins a file from IPFS after tests
|
||||
func CleanupIPFSFile(t *testing.T, cid string) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := &ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(*cfg, logger)
|
||||
if err != nil {
|
||||
t.Logf("warning: failed to create IPFS client for cleanup: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := client.Unpin(ctx, cid); err != nil {
|
||||
t.Logf("warning: failed to unpin file %s: %v", cid, err)
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupCacheEntry deletes a cache entry after tests
|
||||
func CleanupCacheEntry(t *testing.T, dmapName, key string) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodDelete,
|
||||
URL: GetGatewayURL() + "/v1/cache/dmap/" + dmapName + "/key/" + key,
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
_, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Logf("warning: failed to delete cache entry: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if status != http.StatusOK && status != http.StatusNoContent && status != http.StatusNotFound {
|
||||
t.Logf("warning: delete cache entry returned status %d", status)
|
||||
}
|
||||
}
|
||||
@ -1,625 +0,0 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
func getEnv(key, def string) string {
|
||||
if v := strings.TrimSpace(os.Getenv(key)); v != "" {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
func requireAPIKey(t *testing.T) string {
|
||||
t.Helper()
|
||||
key := strings.TrimSpace(os.Getenv("GATEWAY_API_KEY"))
|
||||
if key == "" {
|
||||
t.Skip("GATEWAY_API_KEY not set; skipping gateway auth-required tests")
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func gatewayBaseURL() string {
|
||||
return getEnv("GATEWAY_BASE_URL", "http://localhost:6001")
|
||||
}
|
||||
|
||||
func httpClient() *http.Client {
|
||||
return &http.Client{Timeout: 10 * time.Second}
|
||||
}
|
||||
|
||||
func authHeader(key string) http.Header {
|
||||
h := http.Header{}
|
||||
h.Set("Authorization", "Bearer "+key)
|
||||
h.Set("Content-Type", "application/json")
|
||||
return h
|
||||
}
|
||||
|
||||
func TestGateway_Health(t *testing.T) {
|
||||
base := gatewayBaseURL()
|
||||
resp, err := httpClient().Get(base + "/v1/health")
|
||||
if err != nil {
|
||||
t.Fatalf("health request error: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("unexpected status: %d", resp.StatusCode)
|
||||
}
|
||||
var body map[string]any
|
||||
if err := json.NewDecoder(resp.Body).Decode(&body); err != nil {
|
||||
t.Fatalf("decode: %v", err)
|
||||
}
|
||||
if body["status"] != "ok" {
|
||||
t.Fatalf("status not ok: %+v", body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGateway_PubSub_WS_Echo(t *testing.T) {
|
||||
key := requireAPIKey(t)
|
||||
base := gatewayBaseURL()
|
||||
|
||||
topic := fmt.Sprintf("e2e-ws-%d", time.Now().UnixNano())
|
||||
wsURL, hdr := toWSURL(base+"/v1/pubsub/ws?topic="+url.QueryEscape(topic)), http.Header{}
|
||||
hdr.Set("Authorization", "Bearer "+key)
|
||||
|
||||
c, _, err := websocket.DefaultDialer.Dial(wsURL, hdr)
|
||||
if err != nil {
|
||||
t.Fatalf("ws dial: %v", err)
|
||||
}
|
||||
defer c.Close()
|
||||
defer c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
|
||||
|
||||
msg := []byte("hello-ws")
|
||||
if err := c.WriteMessage(websocket.TextMessage, msg); err != nil {
|
||||
t.Fatalf("ws write: %v", err)
|
||||
}
|
||||
|
||||
_, data, err := c.ReadMessage()
|
||||
if err != nil {
|
||||
t.Fatalf("ws read: %v", err)
|
||||
}
|
||||
if string(data) != string(msg) {
|
||||
t.Fatalf("ws echo mismatch: %q", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGateway_PubSub_RestPublishToWS(t *testing.T) {
|
||||
key := requireAPIKey(t)
|
||||
base := gatewayBaseURL()
|
||||
|
||||
topic := fmt.Sprintf("e2e-rest-%d", time.Now().UnixNano())
|
||||
wsURL, hdr := toWSURL(base+"/v1/pubsub/ws?topic="+url.QueryEscape(topic)), http.Header{}
|
||||
hdr.Set("Authorization", "Bearer "+key)
|
||||
c, _, err := websocket.DefaultDialer.Dial(wsURL, hdr)
|
||||
if err != nil {
|
||||
t.Fatalf("ws dial: %v", err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
// Publish via REST
|
||||
payload := randomBytes(24)
|
||||
b64 := base64.StdEncoding.EncodeToString(payload)
|
||||
body := fmt.Sprintf(`{"topic":"%s","data_base64":"%s"}`, topic, b64)
|
||||
req, _ := http.NewRequest(http.MethodPost, base+"/v1/pubsub/publish", strings.NewReader(body))
|
||||
req.Header = authHeader(key)
|
||||
resp, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("publish do: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("publish status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Expect the message via WS
|
||||
_ = c.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
_, data, err := c.ReadMessage()
|
||||
if err != nil {
|
||||
t.Fatalf("ws read: %v", err)
|
||||
}
|
||||
if string(data) != string(payload) {
|
||||
t.Fatalf("payload mismatch: %q != %q", string(data), string(payload))
|
||||
}
|
||||
|
||||
// Topics list should include our topic (without namespace prefix)
|
||||
req2, _ := http.NewRequest(http.MethodGet, base+"/v1/pubsub/topics", nil)
|
||||
req2.Header = authHeader(key)
|
||||
resp2, err := httpClient().Do(req2)
|
||||
if err != nil {
|
||||
t.Fatalf("topics do: %v", err)
|
||||
}
|
||||
defer resp2.Body.Close()
|
||||
if resp2.StatusCode != http.StatusOK {
|
||||
t.Fatalf("topics status: %d", resp2.StatusCode)
|
||||
}
|
||||
var tlist struct {
|
||||
Topics []string `json:"topics"`
|
||||
}
|
||||
if err := json.NewDecoder(resp2.Body).Decode(&tlist); err != nil {
|
||||
t.Fatalf("topics decode: %v", err)
|
||||
}
|
||||
found := false
|
||||
for _, tt := range tlist.Topics {
|
||||
if tt == topic {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("topic %s not found in topics list", topic)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGateway_Database_CreateQueryMigrate(t *testing.T) {
|
||||
key := requireAPIKey(t)
|
||||
base := gatewayBaseURL()
|
||||
|
||||
// Create table
|
||||
schema := `CREATE TABLE IF NOT EXISTS e2e_items (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)`
|
||||
body := fmt.Sprintf(`{"schema":%q}`, schema)
|
||||
req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body))
|
||||
req.Header = authHeader(key)
|
||||
resp, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("create-table do: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("create-table status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Insert via transaction (simulate migration/data seed)
|
||||
txBody := `{"statements":["INSERT INTO e2e_items(name) VALUES ('one')","INSERT INTO e2e_items(name) VALUES ('two')"]}`
|
||||
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txBody))
|
||||
req.Header = authHeader(key)
|
||||
resp, err = httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("tx do: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("tx status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Query rows
|
||||
qBody := `{"sql":"SELECT name FROM e2e_items ORDER BY id ASC"}`
|
||||
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody))
|
||||
req.Header = authHeader(key)
|
||||
resp, err = httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("query do: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("query status: %d", resp.StatusCode)
|
||||
}
|
||||
var qr struct {
|
||||
Columns []string `json:"columns"`
|
||||
Rows [][]any `json:"rows"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&qr); err != nil {
|
||||
t.Fatalf("query decode: %v", err)
|
||||
}
|
||||
if qr.Count < 2 {
|
||||
t.Fatalf("expected at least 2 rows, got %d", qr.Count)
|
||||
}
|
||||
|
||||
// Schema endpoint returns tables
|
||||
req, _ = http.NewRequest(http.MethodGet, base+"/v1/rqlite/schema", nil)
|
||||
req.Header = authHeader(key)
|
||||
resp2, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("schema do: %v", err)
|
||||
}
|
||||
defer resp2.Body.Close()
|
||||
if resp2.StatusCode != http.StatusOK {
|
||||
t.Fatalf("schema status: %d", resp2.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGateway_Database_DropTable(t *testing.T) {
|
||||
key := requireAPIKey(t)
|
||||
base := gatewayBaseURL()
|
||||
|
||||
table := fmt.Sprintf("e2e_tmp_%d", time.Now().UnixNano())
|
||||
schema := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)", table)
|
||||
// create
|
||||
body := fmt.Sprintf(`{"schema":%q}`, schema)
|
||||
req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body))
|
||||
req.Header = authHeader(key)
|
||||
resp, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("create-table do: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("create-table status: %d", resp.StatusCode)
|
||||
}
|
||||
// drop
|
||||
dbody := fmt.Sprintf(`{"table":%q}`, table)
|
||||
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/drop-table", strings.NewReader(dbody))
|
||||
req.Header = authHeader(key)
|
||||
resp, err = httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("drop-table do: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("drop-table status: %d", resp.StatusCode)
|
||||
}
|
||||
// verify not in schema
|
||||
req, _ = http.NewRequest(http.MethodGet, base+"/v1/rqlite/schema", nil)
|
||||
req.Header = authHeader(key)
|
||||
resp2, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("schema do: %v", err)
|
||||
}
|
||||
defer resp2.Body.Close()
|
||||
if resp2.StatusCode != http.StatusOK {
|
||||
t.Fatalf("schema status: %d", resp2.StatusCode)
|
||||
}
|
||||
var schemaResp struct {
|
||||
Tables []struct {
|
||||
Name string `json:"name"`
|
||||
} `json:"tables"`
|
||||
}
|
||||
if err := json.NewDecoder(resp2.Body).Decode(&schemaResp); err != nil {
|
||||
t.Fatalf("schema decode: %v", err)
|
||||
}
|
||||
for _, tbl := range schemaResp.Tables {
|
||||
if tbl.Name == table {
|
||||
t.Fatalf("table %s still present after drop", table)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGateway_Database_RecreateWithFK(t *testing.T) {
|
||||
key := requireAPIKey(t)
|
||||
base := gatewayBaseURL()
|
||||
|
||||
// base tables
|
||||
orgs := fmt.Sprintf("e2e_orgs_%d", time.Now().UnixNano())
|
||||
users := fmt.Sprintf("e2e_users_%d", time.Now().UnixNano())
|
||||
createOrgs := fmt.Sprintf(`{"schema":%q}`, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)", orgs))
|
||||
createUsers := fmt.Sprintf(`{"schema":%q}`, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)", users))
|
||||
|
||||
for _, body := range []string{createOrgs, createUsers} {
|
||||
req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body))
|
||||
req.Header = authHeader(key)
|
||||
resp, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("create-table do: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("create-table status: %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
// seed data
|
||||
txSeed := fmt.Sprintf(`{"statements":["INSERT INTO %s(id,name) VALUES (1,'org')","INSERT INTO %s(id,name,org_id,age) VALUES (1,'alice',1,'30')"]}`, orgs, users)
|
||||
req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txSeed))
|
||||
req.Header = authHeader(key)
|
||||
resp, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("seed tx do: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("seed tx status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// migrate: change users.age TEXT -> INTEGER and add FK to orgs(id)
|
||||
// Note: Some backends may not support connection-scoped BEGIN/COMMIT or PRAGMA via HTTP.
|
||||
// We apply the standard recreate pattern without explicit PRAGMAs/transaction.
|
||||
txMig := fmt.Sprintf(`{"statements":[
|
||||
"CREATE TABLE %s_new (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age INTEGER, FOREIGN KEY(org_id) REFERENCES %s(id) ON DELETE CASCADE)",
|
||||
"INSERT INTO %s_new (id,name,org_id,age) SELECT id,name,org_id, CAST(age AS INTEGER) FROM %s",
|
||||
"DROP TABLE %s",
|
||||
"ALTER TABLE %s_new RENAME TO %s"
|
||||
]}`, users, orgs, users, users, users, users, users)
|
||||
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txMig))
|
||||
req.Header = authHeader(key)
|
||||
resp, err = httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("mig tx do: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("mig tx status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// verify schema type change
|
||||
qBody := fmt.Sprintf(`{"sql":"PRAGMA table_info(%s)"}`, users)
|
||||
req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody))
|
||||
req.Header = authHeader(key)
|
||||
resp, err = httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("pragma do: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("pragma status: %d", resp.StatusCode)
|
||||
}
|
||||
var qr struct {
|
||||
Columns []string `json:"columns"`
|
||||
Rows [][]any `json:"rows"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&qr); err != nil {
|
||||
t.Fatalf("pragma decode: %v", err)
|
||||
}
|
||||
// column order: cid,name,type,notnull,dflt_value,pk
|
||||
ageIsInt := false
|
||||
for _, row := range qr.Rows {
|
||||
if len(row) >= 3 && fmt.Sprintf("%v", row[1]) == "age" {
|
||||
tstr := strings.ToUpper(fmt.Sprintf("%v", row[2]))
|
||||
if strings.Contains(tstr, "INT") {
|
||||
ageIsInt = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !ageIsInt {
|
||||
// Fallback: inspect CREATE TABLE SQL from sqlite_master
|
||||
qBody2 := fmt.Sprintf(`{"sql":"SELECT sql FROM sqlite_master WHERE type='table' AND name='%s'"}`, users)
|
||||
req2, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody2))
|
||||
req2.Header = authHeader(key)
|
||||
resp3, err := httpClient().Do(req2)
|
||||
if err != nil {
|
||||
t.Fatalf("sqlite_master do: %v", err)
|
||||
}
|
||||
defer resp3.Body.Close()
|
||||
if resp3.StatusCode != http.StatusOK {
|
||||
t.Fatalf("sqlite_master status: %d", resp3.StatusCode)
|
||||
}
|
||||
var qr2 struct {
|
||||
Rows [][]any `json:"rows"`
|
||||
}
|
||||
if err := json.NewDecoder(resp3.Body).Decode(&qr2); err != nil {
|
||||
t.Fatalf("sqlite_master decode: %v", err)
|
||||
}
|
||||
found := false
|
||||
for _, row := range qr2.Rows {
|
||||
if len(row) > 0 {
|
||||
sql := strings.ToUpper(fmt.Sprintf("%v", row[0]))
|
||||
if strings.Contains(sql, "AGE INT") || strings.Contains(sql, "AGE INTEGER") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("age column type not INTEGER after migration")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGateway_Storage_UploadMultipart(t *testing.T) {
|
||||
key := requireAPIKey(t)
|
||||
base := gatewayBaseURL()
|
||||
|
||||
// Create multipart form data using proper multipart writer
|
||||
content := []byte("test file content for IPFS upload")
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
part, err := writer.CreateFormFile("file", "test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("create form file: %v", err)
|
||||
}
|
||||
if _, err := part.Write(content); err != nil {
|
||||
t.Fatalf("write content: %v", err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("close writer: %v", err)
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/upload", &buf)
|
||||
req.Header = authHeader(key)
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
resp, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("upload do: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusServiceUnavailable {
|
||||
t.Skip("IPFS storage not available; skipping storage tests")
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("upload status: %d, body: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var uploadResp struct {
|
||||
Cid string `json:"cid"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
||||
t.Fatalf("upload decode: %v", err)
|
||||
}
|
||||
if uploadResp.Cid == "" {
|
||||
t.Fatalf("upload returned empty CID")
|
||||
}
|
||||
if uploadResp.Name != "test.txt" {
|
||||
t.Fatalf("upload name mismatch: got %s", uploadResp.Name)
|
||||
}
|
||||
if uploadResp.Size == 0 {
|
||||
t.Fatalf("upload size is zero")
|
||||
}
|
||||
|
||||
// Test pinning the uploaded content
|
||||
pinBody := fmt.Sprintf(`{"cid":"%s","name":"test-pinned"}`, uploadResp.Cid)
|
||||
req2, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/pin", strings.NewReader(pinBody))
|
||||
req2.Header = authHeader(key)
|
||||
resp2, err := httpClient().Do(req2)
|
||||
if err != nil {
|
||||
t.Fatalf("pin do: %v", err)
|
||||
}
|
||||
defer resp2.Body.Close()
|
||||
if resp2.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp2.Body)
|
||||
t.Fatalf("pin status: %d, body: %s", resp2.StatusCode, string(body))
|
||||
}
|
||||
|
||||
// Test getting pin status
|
||||
req3, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/status/"+uploadResp.Cid, nil)
|
||||
req3.Header = authHeader(key)
|
||||
resp3, err := httpClient().Do(req3)
|
||||
if err != nil {
|
||||
t.Fatalf("status do: %v", err)
|
||||
}
|
||||
defer resp3.Body.Close()
|
||||
if resp3.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp3.Body)
|
||||
t.Fatalf("status status: %d, body: %s", resp3.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var statusResp struct {
|
||||
Cid string `json:"cid"`
|
||||
Status string `json:"status"`
|
||||
ReplicationFactor int `json:"replication_factor"`
|
||||
Peers []string `json:"peers"`
|
||||
}
|
||||
if err := json.NewDecoder(resp3.Body).Decode(&statusResp); err != nil {
|
||||
t.Fatalf("status decode: %v", err)
|
||||
}
|
||||
if statusResp.Cid != uploadResp.Cid {
|
||||
t.Fatalf("status CID mismatch: got %s", statusResp.Cid)
|
||||
}
|
||||
|
||||
// Test retrieving content
|
||||
req4, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/get/"+uploadResp.Cid, nil)
|
||||
req4.Header = authHeader(key)
|
||||
resp4, err := httpClient().Do(req4)
|
||||
if err != nil {
|
||||
t.Fatalf("get do: %v", err)
|
||||
}
|
||||
defer resp4.Body.Close()
|
||||
if resp4.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp4.Body)
|
||||
t.Fatalf("get status: %d, body: %s", resp4.StatusCode, string(body))
|
||||
}
|
||||
|
||||
retrieved, err := io.ReadAll(resp4.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("get read: %v", err)
|
||||
}
|
||||
if string(retrieved) != string(content) {
|
||||
t.Fatalf("retrieved content mismatch: got %q", string(retrieved))
|
||||
}
|
||||
|
||||
// Test unpinning
|
||||
req5, _ := http.NewRequest(http.MethodDelete, base+"/v1/storage/unpin/"+uploadResp.Cid, nil)
|
||||
req5.Header = authHeader(key)
|
||||
resp5, err := httpClient().Do(req5)
|
||||
if err != nil {
|
||||
t.Fatalf("unpin do: %v", err)
|
||||
}
|
||||
defer resp5.Body.Close()
|
||||
if resp5.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp5.Body)
|
||||
t.Fatalf("unpin status: %d, body: %s", resp5.StatusCode, string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGateway_Storage_UploadJSON(t *testing.T) {
|
||||
key := requireAPIKey(t)
|
||||
base := gatewayBaseURL()
|
||||
|
||||
// Test JSON upload with base64 data
|
||||
content := []byte("test json upload content")
|
||||
b64 := base64.StdEncoding.EncodeToString(content)
|
||||
body := fmt.Sprintf(`{"name":"test.json","data":"%s"}`, b64)
|
||||
|
||||
req, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/upload", strings.NewReader(body))
|
||||
req.Header = authHeader(key)
|
||||
resp, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("upload json do: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusServiceUnavailable {
|
||||
t.Skip("IPFS storage not available; skipping storage tests")
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("upload json status: %d, body: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var uploadResp struct {
|
||||
Cid string `json:"cid"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
|
||||
t.Fatalf("upload json decode: %v", err)
|
||||
}
|
||||
if uploadResp.Cid == "" {
|
||||
t.Fatalf("upload json returned empty CID")
|
||||
}
|
||||
if uploadResp.Name != "test.json" {
|
||||
t.Fatalf("upload json name mismatch: got %s", uploadResp.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGateway_Storage_InvalidCID(t *testing.T) {
|
||||
key := requireAPIKey(t)
|
||||
base := gatewayBaseURL()
|
||||
|
||||
// Test status with invalid CID
|
||||
req, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/status/QmInvalidCID123", nil)
|
||||
req.Header = authHeader(key)
|
||||
resp, err := httpClient().Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("status invalid do: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusServiceUnavailable {
|
||||
t.Skip("IPFS storage not available; skipping storage tests")
|
||||
}
|
||||
|
||||
// Should return error but not crash
|
||||
if resp.StatusCode != http.StatusNotFound && resp.StatusCode != http.StatusInternalServerError {
|
||||
t.Fatalf("expected error status for invalid CID, got %d", resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func toWSURL(httpURL string) string {
|
||||
u, err := url.Parse(httpURL)
|
||||
if err != nil {
|
||||
return httpURL
|
||||
}
|
||||
if u.Scheme == "https" {
|
||||
u.Scheme = "wss"
|
||||
} else {
|
||||
u.Scheme = "ws"
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func randomBytes(n int) []byte {
|
||||
b := make([]byte, n)
|
||||
_, _ = rand.Read(b)
|
||||
return b
|
||||
}
|
||||
400
e2e/ipfs_cluster_test.go
Normal file
400
e2e/ipfs_cluster_test.go
Normal file
@ -0,0 +1,400 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
||||
)
|
||||
|
||||
func TestIPFSCluster_Health(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
err = client.Health(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("health check failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIPFSCluster_GetPeerCount(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
peerCount, err := client.GetPeerCount(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("get peer count failed: %v", err)
|
||||
}
|
||||
|
||||
if peerCount < 0 {
|
||||
t.Fatalf("expected non-negative peer count, got %d", peerCount)
|
||||
}
|
||||
|
||||
t.Logf("IPFS cluster peers: %d", peerCount)
|
||||
}
|
||||
|
||||
func TestIPFSCluster_AddFile(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
content := []byte("IPFS cluster test content")
|
||||
result, err := client.Add(ctx, bytes.NewReader(content), "test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("add file failed: %v", err)
|
||||
}
|
||||
|
||||
if result.Cid == "" {
|
||||
t.Fatalf("expected non-empty CID")
|
||||
}
|
||||
|
||||
if result.Size != int64(len(content)) {
|
||||
t.Fatalf("expected size %d, got %d", len(content), result.Size)
|
||||
}
|
||||
|
||||
t.Logf("Added file with CID: %s", result.Cid)
|
||||
}
|
||||
|
||||
func TestIPFSCluster_PinFile(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
// Add file first
|
||||
content := []byte("IPFS pin test content")
|
||||
addResult, err := client.Add(ctx, bytes.NewReader(content), "pin-test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("add file failed: %v", err)
|
||||
}
|
||||
|
||||
cid := addResult.Cid
|
||||
|
||||
// Pin the file
|
||||
pinResult, err := client.Pin(ctx, cid, "pinned-file", 1)
|
||||
if err != nil {
|
||||
t.Fatalf("pin file failed: %v", err)
|
||||
}
|
||||
|
||||
if pinResult.Cid != cid {
|
||||
t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid)
|
||||
}
|
||||
|
||||
t.Logf("Pinned file: %s", cid)
|
||||
}
|
||||
|
||||
func TestIPFSCluster_PinStatus(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
// Add and pin file
|
||||
content := []byte("IPFS status test content")
|
||||
addResult, err := client.Add(ctx, bytes.NewReader(content), "status-test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("add file failed: %v", err)
|
||||
}
|
||||
|
||||
cid := addResult.Cid
|
||||
|
||||
pinResult, err := client.Pin(ctx, cid, "status-test", 1)
|
||||
if err != nil {
|
||||
t.Fatalf("pin file failed: %v", err)
|
||||
}
|
||||
|
||||
if pinResult.Cid != cid {
|
||||
t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid)
|
||||
}
|
||||
|
||||
// Give pin time to propagate
|
||||
Delay(1000)
|
||||
|
||||
// Get status
|
||||
status, err := client.PinStatus(ctx, cid)
|
||||
if err != nil {
|
||||
t.Fatalf("get pin status failed: %v", err)
|
||||
}
|
||||
|
||||
if status.Cid != cid {
|
||||
t.Fatalf("expected cid %s, got %s", cid, status.Cid)
|
||||
}
|
||||
|
||||
if status.Name != "status-test" {
|
||||
t.Fatalf("expected name 'status-test', got %s", status.Name)
|
||||
}
|
||||
|
||||
if status.ReplicationFactor < 1 {
|
||||
t.Logf("warning: replication factor is %d, expected >= 1", status.ReplicationFactor)
|
||||
}
|
||||
|
||||
t.Logf("Pin status: %s (replication: %d, peers: %d)", status.Status, status.ReplicationFactor, len(status.Peers))
|
||||
}
|
||||
|
||||
func TestIPFSCluster_UnpinFile(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
// Add and pin file
|
||||
content := []byte("IPFS unpin test content")
|
||||
addResult, err := client.Add(ctx, bytes.NewReader(content), "unpin-test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("add file failed: %v", err)
|
||||
}
|
||||
|
||||
cid := addResult.Cid
|
||||
|
||||
_, err = client.Pin(ctx, cid, "unpin-test", 1)
|
||||
if err != nil {
|
||||
t.Fatalf("pin file failed: %v", err)
|
||||
}
|
||||
|
||||
// Unpin file
|
||||
err = client.Unpin(ctx, cid)
|
||||
if err != nil {
|
||||
t.Fatalf("unpin file failed: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Unpinned file: %s", cid)
|
||||
}
|
||||
|
||||
func TestIPFSCluster_GetFile(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
// Add file
|
||||
content := []byte("IPFS get test content")
|
||||
addResult, err := client.Add(ctx, bytes.NewReader(content), "get-test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("add file failed: %v", err)
|
||||
}
|
||||
|
||||
cid := addResult.Cid
|
||||
|
||||
// Give time for propagation
|
||||
Delay(1000)
|
||||
|
||||
// Get file
|
||||
rc, err := client.Get(ctx, cid, GetIPFSAPIURL())
|
||||
if err != nil {
|
||||
t.Fatalf("get file failed: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
retrievedContent, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read content: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(retrievedContent, content) {
|
||||
t.Fatalf("content mismatch: expected %q, got %q", string(content), string(retrievedContent))
|
||||
}
|
||||
|
||||
t.Logf("Retrieved file: %s (%d bytes)", cid, len(retrievedContent))
|
||||
}
|
||||
|
||||
func TestIPFSCluster_LargeFile(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 60 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
// Create 5MB file
|
||||
content := bytes.Repeat([]byte("x"), 5*1024*1024)
|
||||
result, err := client.Add(ctx, bytes.NewReader(content), "large.bin")
|
||||
if err != nil {
|
||||
t.Fatalf("add large file failed: %v", err)
|
||||
}
|
||||
|
||||
if result.Cid == "" {
|
||||
t.Fatalf("expected non-empty CID")
|
||||
}
|
||||
|
||||
if result.Size != int64(len(content)) {
|
||||
t.Fatalf("expected size %d, got %d", len(content), result.Size)
|
||||
}
|
||||
|
||||
t.Logf("Added large file with CID: %s (%d bytes)", result.Cid, result.Size)
|
||||
}
|
||||
|
||||
func TestIPFSCluster_ReplicationFactor(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
// Add file
|
||||
content := []byte("IPFS replication test content")
|
||||
addResult, err := client.Add(ctx, bytes.NewReader(content), "replication-test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("add file failed: %v", err)
|
||||
}
|
||||
|
||||
cid := addResult.Cid
|
||||
|
||||
// Pin with specific replication factor
|
||||
replicationFactor := 2
|
||||
pinResult, err := client.Pin(ctx, cid, "replication-test", replicationFactor)
|
||||
if err != nil {
|
||||
t.Fatalf("pin file failed: %v", err)
|
||||
}
|
||||
|
||||
if pinResult.Cid != cid {
|
||||
t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid)
|
||||
}
|
||||
|
||||
// Give time for replication
|
||||
Delay(2000)
|
||||
|
||||
// Check status
|
||||
status, err := client.PinStatus(ctx, cid)
|
||||
if err != nil {
|
||||
t.Fatalf("get pin status failed: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Replication factor: requested=%d, actual=%d, peers=%d", replicationFactor, status.ReplicationFactor, len(status.Peers))
|
||||
}
|
||||
|
||||
func TestIPFSCluster_MultipleFiles(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger := NewTestLogger(t)
|
||||
cfg := ipfs.Config{
|
||||
ClusterAPIURL: GetIPFSClusterURL(),
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
client, err := ipfs.NewClient(cfg, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create IPFS client: %v", err)
|
||||
}
|
||||
|
||||
// Add multiple files
|
||||
numFiles := 5
|
||||
var cids []string
|
||||
|
||||
for i := 0; i < numFiles; i++ {
|
||||
content := []byte(fmt.Sprintf("File %d", i))
|
||||
result, err := client.Add(ctx, bytes.NewReader(content), fmt.Sprintf("file%d.txt", i))
|
||||
if err != nil {
|
||||
t.Fatalf("add file %d failed: %v", i, err)
|
||||
}
|
||||
cids = append(cids, result.Cid)
|
||||
}
|
||||
|
||||
if len(cids) != numFiles {
|
||||
t.Fatalf("expected %d files added, got %d", numFiles, len(cids))
|
||||
}
|
||||
|
||||
// Verify all files exist
|
||||
for i, cid := range cids {
|
||||
status, err := client.PinStatus(ctx, cid)
|
||||
if err != nil {
|
||||
t.Logf("warning: failed to get status for file %d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if status.Cid != cid {
|
||||
t.Fatalf("expected cid %s, got %s", cid, status.Cid)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Successfully added and verified %d files", numFiles)
|
||||
}
|
||||
294
e2e/libp2p_connectivity_test.go
Normal file
294
e2e/libp2p_connectivity_test.go
Normal file
@ -0,0 +1,294 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLibP2P_PeerConnectivity(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create and connect client
|
||||
c := NewNetworkClient(t)
|
||||
if err := c.Connect(); err != nil {
|
||||
t.Fatalf("connect failed: %v", err)
|
||||
}
|
||||
defer c.Disconnect()
|
||||
|
||||
// Verify peer connectivity through the gateway
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/peers",
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("peers request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
peers := resp["peers"].([]interface{})
|
||||
if len(peers) == 0 {
|
||||
t.Logf("warning: no peers connected (cluster may still be initializing)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibP2P_BootstrapPeers(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
bootstrapPeers := GetBootstrapPeers()
|
||||
if len(bootstrapPeers) == 0 {
|
||||
t.Skipf("E2E_BOOTSTRAP_PEERS not set; skipping")
|
||||
}
|
||||
|
||||
// Create client with bootstrap peers explicitly set
|
||||
c := NewNetworkClient(t)
|
||||
if err := c.Connect(); err != nil {
|
||||
t.Fatalf("connect failed: %v", err)
|
||||
}
|
||||
defer c.Disconnect()
|
||||
|
||||
// Give peer discovery time
|
||||
Delay(2000)
|
||||
|
||||
// Verify we're connected (check via gateway status)
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/status",
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("status request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if resp["connected"] != true {
|
||||
t.Logf("warning: client not connected to network (cluster may still be initializing)")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibP2P_MultipleClientConnections(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create multiple clients
|
||||
c1 := NewNetworkClient(t)
|
||||
c2 := NewNetworkClient(t)
|
||||
c3 := NewNetworkClient(t)
|
||||
|
||||
if err := c1.Connect(); err != nil {
|
||||
t.Fatalf("c1 connect failed: %v", err)
|
||||
}
|
||||
defer c1.Disconnect()
|
||||
|
||||
if err := c2.Connect(); err != nil {
|
||||
t.Fatalf("c2 connect failed: %v", err)
|
||||
}
|
||||
defer c2.Disconnect()
|
||||
|
||||
if err := c3.Connect(); err != nil {
|
||||
t.Fatalf("c3 connect failed: %v", err)
|
||||
}
|
||||
defer c3.Disconnect()
|
||||
|
||||
// Give peer discovery time
|
||||
Delay(2000)
|
||||
|
||||
// Verify gateway sees multiple peers
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/peers",
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("peers request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
peers := resp["peers"].([]interface{})
|
||||
if len(peers) < 1 {
|
||||
t.Logf("warning: expected at least 1 peer, got %d", len(peers))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibP2P_ReconnectAfterDisconnect(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
c := NewNetworkClient(t)
|
||||
|
||||
// Connect
|
||||
if err := c.Connect(); err != nil {
|
||||
t.Fatalf("connect failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify connected via gateway
|
||||
req1 := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/status",
|
||||
}
|
||||
|
||||
_, status1, err := req1.Do(ctx)
|
||||
if err != nil || status1 != http.StatusOK {
|
||||
t.Logf("warning: gateway check failed before disconnect: status %d, err %v", status1, err)
|
||||
}
|
||||
|
||||
// Disconnect
|
||||
if err := c.Disconnect(); err != nil {
|
||||
t.Logf("warning: disconnect failed: %v", err)
|
||||
}
|
||||
|
||||
// Give time for disconnect to propagate
|
||||
Delay(500)
|
||||
|
||||
// Reconnect
|
||||
if err := c.Connect(); err != nil {
|
||||
t.Fatalf("reconnect failed: %v", err)
|
||||
}
|
||||
defer c.Disconnect()
|
||||
|
||||
// Verify connected via gateway again
|
||||
req2 := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/status",
|
||||
}
|
||||
|
||||
_, status2, err := req2.Do(ctx)
|
||||
if err != nil || status2 != http.StatusOK {
|
||||
t.Logf("warning: gateway check failed after reconnect: status %d, err %v", status2, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibP2P_PeerDiscovery(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create client
|
||||
c := NewNetworkClient(t)
|
||||
if err := c.Connect(); err != nil {
|
||||
t.Fatalf("connect failed: %v", err)
|
||||
}
|
||||
defer c.Disconnect()
|
||||
|
||||
// Give peer discovery time
|
||||
Delay(3000)
|
||||
|
||||
// Get peer list
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/peers",
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("peers request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
peers := resp["peers"].([]interface{})
|
||||
if len(peers) == 0 {
|
||||
t.Logf("warning: no peers discovered (cluster may not have multiple nodes)")
|
||||
} else {
|
||||
// Verify peer format (should be multiaddr strings)
|
||||
for _, p := range peers {
|
||||
peerStr := p.(string)
|
||||
if !strings.Contains(peerStr, "/p2p/") && !strings.Contains(peerStr, "/ipfs/") {
|
||||
t.Logf("warning: unexpected peer format: %s", peerStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLibP2P_PeerAddressFormat(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create client
|
||||
c := NewNetworkClient(t)
|
||||
if err := c.Connect(); err != nil {
|
||||
t.Fatalf("connect failed: %v", err)
|
||||
}
|
||||
defer c.Disconnect()
|
||||
|
||||
// Get peer list
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/peers",
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("peers request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
peers := resp["peers"].([]interface{})
|
||||
for _, p := range peers {
|
||||
peerStr := p.(string)
|
||||
// Multiaddrs should start with /
|
||||
if !strings.HasPrefix(peerStr, "/") {
|
||||
t.Fatalf("expected multiaddr format, got %s", peerStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
223
e2e/network_http_test.go
Normal file
223
e2e/network_http_test.go
Normal file
@ -0,0 +1,223 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNetwork_Health(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/health",
|
||||
SkipAuth: true,
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("health check failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if resp["status"] != "ok" {
|
||||
t.Fatalf("expected status 'ok', got %v", resp["status"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetwork_Status(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/status",
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("status check failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if _, ok := resp["connected"]; !ok {
|
||||
t.Fatalf("expected 'connected' field in response")
|
||||
}
|
||||
|
||||
if _, ok := resp["peer_count"]; !ok {
|
||||
t.Fatalf("expected 'peer_count' field in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetwork_Peers(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/peers",
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("peers check failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if _, ok := resp["peers"]; !ok {
|
||||
t.Fatalf("expected 'peers' field in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetwork_ProxyAnonSuccess(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/proxy/anon",
|
||||
Body: map[string]interface{}{
|
||||
"url": "https://httpbin.org/get",
|
||||
"method": "GET",
|
||||
"headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"},
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("proxy anon request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d: %s", status, string(body))
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if resp["status_code"] != float64(200) {
|
||||
t.Fatalf("expected proxy status 200, got %v", resp["status_code"])
|
||||
}
|
||||
|
||||
if _, ok := resp["body"]; !ok {
|
||||
t.Fatalf("expected 'body' field in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetwork_ProxyAnonBadURL(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/proxy/anon",
|
||||
Body: map[string]interface{}{
|
||||
"url": "http://localhost:1/nonexistent",
|
||||
"method": "GET",
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := req.Do(ctx)
|
||||
if err == nil && status == http.StatusOK {
|
||||
t.Fatalf("expected error for bad URL, got status 200")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetwork_ProxyAnonPostRequest(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/proxy/anon",
|
||||
Body: map[string]interface{}{
|
||||
"url": "https://httpbin.org/post",
|
||||
"method": "POST",
|
||||
"headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"},
|
||||
"body": "test_data",
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("proxy anon POST failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d: %s", status, string(body))
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if resp["status_code"] != float64(200) {
|
||||
t.Fatalf("expected proxy status 200, got %v", resp["status_code"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestNetwork_Unauthorized(t *testing.T) {
|
||||
// Test without API key
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create request without auth
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/network/status",
|
||||
SkipAuth: true,
|
||||
}
|
||||
|
||||
_, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusUnauthorized && status != http.StatusForbidden {
|
||||
t.Logf("warning: expected 401/403, got %d (auth may not be enforced on this endpoint)", status)
|
||||
}
|
||||
}
|
||||
421
e2e/pubsub_client_test.go
Normal file
421
e2e/pubsub_client_test.go
Normal file
@ -0,0 +1,421 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func newMessageCollector(ctx context.Context, buffer int) (chan []byte, func(string, []byte) error) {
|
||||
if buffer <= 0 {
|
||||
buffer = 1
|
||||
}
|
||||
|
||||
ch := make(chan []byte, buffer)
|
||||
handler := func(_ string, data []byte) error {
|
||||
copied := append([]byte(nil), data...)
|
||||
select {
|
||||
case ch <- copied:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ch, handler
|
||||
}
|
||||
|
||||
func waitForMessage(ctx context.Context, ch <-chan []byte) ([]byte, error) {
|
||||
select {
|
||||
case msg := <-ch:
|
||||
return msg, nil
|
||||
case <-ctx.Done():
|
||||
return nil, fmt.Errorf("context finished while waiting for pubsub message: %w", ctx.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubSub_SubscribePublish(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create two clients
|
||||
client1 := NewNetworkClient(t)
|
||||
client2 := NewNetworkClient(t)
|
||||
|
||||
if err := client1.Connect(); err != nil {
|
||||
t.Fatalf("client1 connect failed: %v", err)
|
||||
}
|
||||
defer client1.Disconnect()
|
||||
|
||||
if err := client2.Connect(); err != nil {
|
||||
t.Fatalf("client2 connect failed: %v", err)
|
||||
}
|
||||
defer client2.Disconnect()
|
||||
|
||||
topic := GenerateTopic()
|
||||
message := "test-message-from-client1"
|
||||
|
||||
// Subscribe on client2
|
||||
messageCh, handler := newMessageCollector(ctx, 1)
|
||||
if err := client2.PubSub().Subscribe(ctx, topic, handler); err != nil {
|
||||
t.Fatalf("subscribe failed: %v", err)
|
||||
}
|
||||
defer client2.PubSub().Unsubscribe(ctx, topic)
|
||||
|
||||
// Give subscription time to propagate and mesh to form
|
||||
Delay(2000)
|
||||
|
||||
// Publish from client1
|
||||
if err := client1.PubSub().Publish(ctx, topic, []byte(message)); err != nil {
|
||||
t.Fatalf("publish failed: %v", err)
|
||||
}
|
||||
|
||||
// Receive message on client2
|
||||
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer recvCancel()
|
||||
|
||||
msg, err := waitForMessage(recvCtx, messageCh)
|
||||
if err != nil {
|
||||
t.Fatalf("receive failed: %v", err)
|
||||
}
|
||||
|
||||
if string(msg) != message {
|
||||
t.Fatalf("expected message %q, got %q", message, string(msg))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubSub_MultipleSubscribers(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create three clients
|
||||
clientPub := NewNetworkClient(t)
|
||||
clientSub1 := NewNetworkClient(t)
|
||||
clientSub2 := NewNetworkClient(t)
|
||||
|
||||
if err := clientPub.Connect(); err != nil {
|
||||
t.Fatalf("publisher connect failed: %v", err)
|
||||
}
|
||||
defer clientPub.Disconnect()
|
||||
|
||||
if err := clientSub1.Connect(); err != nil {
|
||||
t.Fatalf("subscriber1 connect failed: %v", err)
|
||||
}
|
||||
defer clientSub1.Disconnect()
|
||||
|
||||
if err := clientSub2.Connect(); err != nil {
|
||||
t.Fatalf("subscriber2 connect failed: %v", err)
|
||||
}
|
||||
defer clientSub2.Disconnect()
|
||||
|
||||
topic := GenerateTopic()
|
||||
message1 := "message-for-sub1"
|
||||
message2 := "message-for-sub2"
|
||||
|
||||
// Subscribe on both clients
|
||||
sub1Ch, sub1Handler := newMessageCollector(ctx, 4)
|
||||
if err := clientSub1.PubSub().Subscribe(ctx, topic, sub1Handler); err != nil {
|
||||
t.Fatalf("subscribe1 failed: %v", err)
|
||||
}
|
||||
defer clientSub1.PubSub().Unsubscribe(ctx, topic)
|
||||
|
||||
sub2Ch, sub2Handler := newMessageCollector(ctx, 4)
|
||||
if err := clientSub2.PubSub().Subscribe(ctx, topic, sub2Handler); err != nil {
|
||||
t.Fatalf("subscribe2 failed: %v", err)
|
||||
}
|
||||
defer clientSub2.PubSub().Unsubscribe(ctx, topic)
|
||||
|
||||
// Give subscriptions time to propagate
|
||||
Delay(500)
|
||||
|
||||
// Publish first message
|
||||
if err := clientPub.PubSub().Publish(ctx, topic, []byte(message1)); err != nil {
|
||||
t.Fatalf("publish1 failed: %v", err)
|
||||
}
|
||||
|
||||
// Both subscribers should receive first message
|
||||
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer recvCancel()
|
||||
|
||||
msg1a, err := waitForMessage(recvCtx, sub1Ch)
|
||||
if err != nil {
|
||||
t.Fatalf("sub1 receive1 failed: %v", err)
|
||||
}
|
||||
|
||||
if string(msg1a) != message1 {
|
||||
t.Fatalf("sub1: expected %q, got %q", message1, string(msg1a))
|
||||
}
|
||||
|
||||
msg1b, err := waitForMessage(recvCtx, sub2Ch)
|
||||
if err != nil {
|
||||
t.Fatalf("sub2 receive1 failed: %v", err)
|
||||
}
|
||||
|
||||
if string(msg1b) != message1 {
|
||||
t.Fatalf("sub2: expected %q, got %q", message1, string(msg1b))
|
||||
}
|
||||
|
||||
// Publish second message
|
||||
if err := clientPub.PubSub().Publish(ctx, topic, []byte(message2)); err != nil {
|
||||
t.Fatalf("publish2 failed: %v", err)
|
||||
}
|
||||
|
||||
// Both subscribers should receive second message
|
||||
recvCtx2, recvCancel2 := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer recvCancel2()
|
||||
|
||||
msg2a, err := waitForMessage(recvCtx2, sub1Ch)
|
||||
if err != nil {
|
||||
t.Fatalf("sub1 receive2 failed: %v", err)
|
||||
}
|
||||
|
||||
if string(msg2a) != message2 {
|
||||
t.Fatalf("sub1: expected %q, got %q", message2, string(msg2a))
|
||||
}
|
||||
|
||||
msg2b, err := waitForMessage(recvCtx2, sub2Ch)
|
||||
if err != nil {
|
||||
t.Fatalf("sub2 receive2 failed: %v", err)
|
||||
}
|
||||
|
||||
if string(msg2b) != message2 {
|
||||
t.Fatalf("sub2: expected %q, got %q", message2, string(msg2b))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubSub_Deduplication(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create two clients
|
||||
clientPub := NewNetworkClient(t)
|
||||
clientSub := NewNetworkClient(t)
|
||||
|
||||
if err := clientPub.Connect(); err != nil {
|
||||
t.Fatalf("publisher connect failed: %v", err)
|
||||
}
|
||||
defer clientPub.Disconnect()
|
||||
|
||||
if err := clientSub.Connect(); err != nil {
|
||||
t.Fatalf("subscriber connect failed: %v", err)
|
||||
}
|
||||
defer clientSub.Disconnect()
|
||||
|
||||
topic := GenerateTopic()
|
||||
message := "duplicate-test-message"
|
||||
|
||||
// Subscribe on client
|
||||
messageCh, handler := newMessageCollector(ctx, 3)
|
||||
if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil {
|
||||
t.Fatalf("subscribe failed: %v", err)
|
||||
}
|
||||
defer clientSub.PubSub().Unsubscribe(ctx, topic)
|
||||
|
||||
// Give subscription time to propagate and mesh to form
|
||||
Delay(2000)
|
||||
|
||||
// Publish the same message multiple times
|
||||
for i := 0; i < 3; i++ {
|
||||
if err := clientPub.PubSub().Publish(ctx, topic, []byte(message)); err != nil {
|
||||
t.Fatalf("publish %d failed: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Receive messages - should get all (no dedup filter on subscribe)
|
||||
recvCtx, recvCancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer recvCancel()
|
||||
|
||||
receivedCount := 0
|
||||
for receivedCount < 3 {
|
||||
if _, err := waitForMessage(recvCtx, messageCh); err != nil {
|
||||
break
|
||||
}
|
||||
receivedCount++
|
||||
}
|
||||
|
||||
if receivedCount < 1 {
|
||||
t.Fatalf("expected to receive at least 1 message, got %d", receivedCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubSub_ConcurrentPublish(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create clients
|
||||
clientPub := NewNetworkClient(t)
|
||||
clientSub := NewNetworkClient(t)
|
||||
|
||||
if err := clientPub.Connect(); err != nil {
|
||||
t.Fatalf("publisher connect failed: %v", err)
|
||||
}
|
||||
defer clientPub.Disconnect()
|
||||
|
||||
if err := clientSub.Connect(); err != nil {
|
||||
t.Fatalf("subscriber connect failed: %v", err)
|
||||
}
|
||||
defer clientSub.Disconnect()
|
||||
|
||||
topic := GenerateTopic()
|
||||
numMessages := 10
|
||||
|
||||
// Subscribe
|
||||
messageCh, handler := newMessageCollector(ctx, numMessages)
|
||||
if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil {
|
||||
t.Fatalf("subscribe failed: %v", err)
|
||||
}
|
||||
defer clientSub.PubSub().Unsubscribe(ctx, topic)
|
||||
|
||||
// Give subscription time to propagate and mesh to form
|
||||
Delay(2000)
|
||||
|
||||
// Publish multiple messages concurrently
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numMessages; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
msg := fmt.Sprintf("concurrent-msg-%d", idx)
|
||||
if err := clientPub.PubSub().Publish(ctx, topic, []byte(msg)); err != nil {
|
||||
t.Logf("publish %d failed: %v", idx, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Receive messages
|
||||
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer recvCancel()
|
||||
|
||||
receivedCount := 0
|
||||
for receivedCount < numMessages {
|
||||
if _, err := waitForMessage(recvCtx, messageCh); err != nil {
|
||||
break
|
||||
}
|
||||
receivedCount++
|
||||
}
|
||||
|
||||
if receivedCount < numMessages {
|
||||
t.Logf("expected %d messages, got %d (some may have been dropped)", numMessages, receivedCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubSub_TopicIsolation(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create clients
|
||||
clientPub := NewNetworkClient(t)
|
||||
clientSub := NewNetworkClient(t)
|
||||
|
||||
if err := clientPub.Connect(); err != nil {
|
||||
t.Fatalf("publisher connect failed: %v", err)
|
||||
}
|
||||
defer clientPub.Disconnect()
|
||||
|
||||
if err := clientSub.Connect(); err != nil {
|
||||
t.Fatalf("subscriber connect failed: %v", err)
|
||||
}
|
||||
defer clientSub.Disconnect()
|
||||
|
||||
topic1 := GenerateTopic()
|
||||
topic2 := GenerateTopic()
|
||||
|
||||
// Subscribe to topic1
|
||||
messageCh, handler := newMessageCollector(ctx, 2)
|
||||
if err := clientSub.PubSub().Subscribe(ctx, topic1, handler); err != nil {
|
||||
t.Fatalf("subscribe1 failed: %v", err)
|
||||
}
|
||||
defer clientSub.PubSub().Unsubscribe(ctx, topic1)
|
||||
|
||||
// Give subscription time to propagate and mesh to form
|
||||
Delay(2000)
|
||||
|
||||
// Publish to topic2
|
||||
msg2 := "message-on-topic2"
|
||||
if err := clientPub.PubSub().Publish(ctx, topic2, []byte(msg2)); err != nil {
|
||||
t.Fatalf("publish2 failed: %v", err)
|
||||
}
|
||||
|
||||
// Publish to topic1
|
||||
msg1 := "message-on-topic1"
|
||||
if err := clientPub.PubSub().Publish(ctx, topic1, []byte(msg1)); err != nil {
|
||||
t.Fatalf("publish1 failed: %v", err)
|
||||
}
|
||||
|
||||
// Receive on sub1 - should get msg1 only
|
||||
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer recvCancel()
|
||||
|
||||
msg, err := waitForMessage(recvCtx, messageCh)
|
||||
if err != nil {
|
||||
t.Fatalf("receive failed: %v", err)
|
||||
}
|
||||
|
||||
if string(msg) != msg1 {
|
||||
t.Fatalf("expected %q, got %q", msg1, string(msg))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubSub_EmptyMessage(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create clients
|
||||
clientPub := NewNetworkClient(t)
|
||||
clientSub := NewNetworkClient(t)
|
||||
|
||||
if err := clientPub.Connect(); err != nil {
|
||||
t.Fatalf("publisher connect failed: %v", err)
|
||||
}
|
||||
defer clientPub.Disconnect()
|
||||
|
||||
if err := clientSub.Connect(); err != nil {
|
||||
t.Fatalf("subscriber connect failed: %v", err)
|
||||
}
|
||||
defer clientSub.Disconnect()
|
||||
|
||||
topic := GenerateTopic()
|
||||
|
||||
// Subscribe
|
||||
messageCh, handler := newMessageCollector(ctx, 1)
|
||||
if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil {
|
||||
t.Fatalf("subscribe failed: %v", err)
|
||||
}
|
||||
defer clientSub.PubSub().Unsubscribe(ctx, topic)
|
||||
|
||||
// Give subscription time to propagate and mesh to form
|
||||
Delay(2000)
|
||||
|
||||
// Publish empty message
|
||||
if err := clientPub.PubSub().Publish(ctx, topic, []byte("")); err != nil {
|
||||
t.Fatalf("publish empty failed: %v", err)
|
||||
}
|
||||
|
||||
// Receive on sub - should get empty message
|
||||
recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer recvCancel()
|
||||
|
||||
msg, err := waitForMessage(recvCtx, messageCh)
|
||||
if err != nil {
|
||||
t.Fatalf("receive failed: %v", err)
|
||||
}
|
||||
|
||||
if len(msg) != 0 {
|
||||
t.Fatalf("expected empty message, got %q", string(msg))
|
||||
}
|
||||
}
|
||||
446
e2e/rqlite_http_test.go
Normal file
446
e2e/rqlite_http_test.go
Normal file
@ -0,0 +1,446 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRQLite_CreateTable(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
table := GenerateTableName()
|
||||
schema := fmt.Sprintf(
|
||||
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)",
|
||||
table,
|
||||
)
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/create-table",
|
||||
Body: map[string]interface{}{
|
||||
"schema": schema,
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("create table request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusCreated && status != http.StatusOK {
|
||||
t.Fatalf("expected status 201 or 200, got %d: %s", status, string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRQLite_InsertQuery(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
table := GenerateTableName()
|
||||
schema := fmt.Sprintf(
|
||||
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)",
|
||||
table,
|
||||
)
|
||||
|
||||
// Create table
|
||||
createReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/create-table",
|
||||
Body: map[string]interface{}{
|
||||
"schema": schema,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := createReq.Do(ctx)
|
||||
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Insert rows
|
||||
insertReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/transaction",
|
||||
Body: map[string]interface{}{
|
||||
"statements": []string{
|
||||
fmt.Sprintf("INSERT INTO %s(name) VALUES ('alice')", table),
|
||||
fmt.Sprintf("INSERT INTO %s(name) VALUES ('bob')", table),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = insertReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("insert failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Query rows
|
||||
queryReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/query",
|
||||
Body: map[string]interface{}{
|
||||
"sql": fmt.Sprintf("SELECT name FROM %s ORDER BY id", table),
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := queryReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("query failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var queryResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &queryResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if queryResp["count"].(float64) < 2 {
|
||||
t.Fatalf("expected at least 2 rows, got %v", queryResp["count"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestRQLite_DropTable(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
table := GenerateTableName()
|
||||
schema := fmt.Sprintf(
|
||||
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)",
|
||||
table,
|
||||
)
|
||||
|
||||
// Create table
|
||||
createReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/create-table",
|
||||
Body: map[string]interface{}{
|
||||
"schema": schema,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := createReq.Do(ctx)
|
||||
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Drop table
|
||||
dropReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||
Body: map[string]interface{}{
|
||||
"table": table,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = dropReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("drop table request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
// Verify table doesn't exist via schema
|
||||
schemaReq := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/schema",
|
||||
}
|
||||
|
||||
body, status, err := schemaReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Logf("warning: failed to verify schema after drop: status %d, err %v", status, err)
|
||||
return
|
||||
}
|
||||
|
||||
var schemaResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &schemaResp); err != nil {
|
||||
t.Logf("warning: failed to decode schema response: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if tables, ok := schemaResp["tables"].([]interface{}); ok {
|
||||
for _, tbl := range tables {
|
||||
tblMap := tbl.(map[string]interface{})
|
||||
if tblMap["name"] == table {
|
||||
t.Fatalf("table %s still present after drop", table)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRQLite_Schema(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/schema",
|
||||
}
|
||||
|
||||
body, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("schema request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var resp map[string]interface{}
|
||||
if err := DecodeJSON(body, &resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if _, ok := resp["tables"]; !ok {
|
||||
t.Fatalf("expected 'tables' field in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRQLite_MalformedSQL(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/query",
|
||||
Body: map[string]interface{}{
|
||||
"sql": "SELECT * FROM nonexistent_table WHERE invalid syntax",
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := req.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("request failed: %v", err)
|
||||
}
|
||||
|
||||
// Should get an error response
|
||||
if status == http.StatusOK {
|
||||
t.Fatalf("expected error for malformed SQL, got status 200")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRQLite_LargeTransaction(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
table := GenerateTableName()
|
||||
schema := fmt.Sprintf(
|
||||
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)",
|
||||
table,
|
||||
)
|
||||
|
||||
// Create table
|
||||
createReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/create-table",
|
||||
Body: map[string]interface{}{
|
||||
"schema": schema,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := createReq.Do(ctx)
|
||||
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||
t.Fatalf("create table failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Generate large transaction (50 inserts)
|
||||
var statements []string
|
||||
for i := 0; i < 50; i++ {
|
||||
statements = append(statements, fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, i))
|
||||
}
|
||||
|
||||
txReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/transaction",
|
||||
Body: map[string]interface{}{
|
||||
"statements": statements,
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = txReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("large transaction failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Verify all rows were inserted
|
||||
queryReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/query",
|
||||
Body: map[string]interface{}{
|
||||
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := queryReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("count query failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
var countResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &countResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
// Extract count from result
|
||||
if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 {
|
||||
row := rows[0].([]interface{})
|
||||
if row[0].(float64) != 50 {
|
||||
t.Fatalf("expected 50 rows, got %v", row[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRQLite_ForeignKeyMigration(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
orgsTable := GenerateTableName()
|
||||
usersTable := GenerateTableName()
|
||||
|
||||
// Create base tables
|
||||
createOrgsReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/create-table",
|
||||
Body: map[string]interface{}{
|
||||
"schema": fmt.Sprintf(
|
||||
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)",
|
||||
orgsTable,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := createOrgsReq.Do(ctx)
|
||||
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||
t.Fatalf("create orgs table failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
createUsersReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/create-table",
|
||||
Body: map[string]interface{}{
|
||||
"schema": fmt.Sprintf(
|
||||
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)",
|
||||
usersTable,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = createUsersReq.Do(ctx)
|
||||
if err != nil || (status != http.StatusCreated && status != http.StatusOK) {
|
||||
t.Fatalf("create users table failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Seed data
|
||||
seedReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/transaction",
|
||||
Body: map[string]interface{}{
|
||||
"statements": []string{
|
||||
fmt.Sprintf("INSERT INTO %s(id,name) VALUES (1,'org')", orgsTable),
|
||||
fmt.Sprintf("INSERT INTO %s(id,name,org_id,age) VALUES (1,'alice',1,'30')", usersTable),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = seedReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("seed transaction failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Migrate: change age type and add FK
|
||||
migrationReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/transaction",
|
||||
Body: map[string]interface{}{
|
||||
"statements": []string{
|
||||
fmt.Sprintf(
|
||||
"CREATE TABLE %s_new (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age INTEGER, FOREIGN KEY(org_id) REFERENCES %s(id) ON DELETE CASCADE)",
|
||||
usersTable, orgsTable,
|
||||
),
|
||||
fmt.Sprintf(
|
||||
"INSERT INTO %s_new (id,name,org_id,age) SELECT id,name,org_id, CAST(age AS INTEGER) FROM %s",
|
||||
usersTable, usersTable,
|
||||
),
|
||||
fmt.Sprintf("DROP TABLE %s", usersTable),
|
||||
fmt.Sprintf("ALTER TABLE %s_new RENAME TO %s", usersTable, usersTable),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err = migrationReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("migration transaction failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
// Verify data is intact
|
||||
queryReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/query",
|
||||
Body: map[string]interface{}{
|
||||
"sql": fmt.Sprintf("SELECT name, org_id, age FROM %s", usersTable),
|
||||
},
|
||||
}
|
||||
|
||||
body, status, err := queryReq.Do(ctx)
|
||||
if err != nil || status != http.StatusOK {
|
||||
t.Fatalf("query after migration failed: status %d, err %v", status, err)
|
||||
}
|
||||
|
||||
var queryResp map[string]interface{}
|
||||
if err := DecodeJSON(body, &queryResp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if queryResp["count"].(float64) != 1 {
|
||||
t.Fatalf("expected 1 row after migration, got %v", queryResp["count"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestRQLite_DropNonexistentTable(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dropReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/rqlite/drop-table",
|
||||
Body: map[string]interface{}{
|
||||
"table": "nonexistent_table_xyz_" + fmt.Sprintf("%d", time.Now().UnixNano()),
|
||||
},
|
||||
}
|
||||
|
||||
_, status, err := dropReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Logf("warning: drop nonexistent table request failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Should get an error (400 or 404)
|
||||
if status == http.StatusOK {
|
||||
t.Logf("warning: expected error for dropping nonexistent table, got status 200")
|
||||
}
|
||||
}
|
||||
550
e2e/storage_http_test.go
Normal file
550
e2e/storage_http_test.go
Normal file
@ -0,0 +1,550 @@
|
||||
//go:build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// uploadFile is a helper to upload a file to storage
|
||||
func uploadFile(t *testing.T, ctx context.Context, content []byte, filename string) string {
|
||||
t.Helper()
|
||||
|
||||
// Create multipart form
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
|
||||
part, err := writer.CreateFormFile("file", filename)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create form file: %v", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||
t.Fatalf("failed to copy data: %v", err)
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("failed to close writer: %v", err)
|
||||
}
|
||||
|
||||
// Create request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
// Add auth headers
|
||||
if jwt := GetJWT(); jwt != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+jwt)
|
||||
} else if apiKey := GetAPIKey(); apiKey != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
|
||||
client := NewHTTPClient(5 * time.Minute)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("upload request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
result, err := DecodeJSONFromReader(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to decode upload response: %v", err)
|
||||
}
|
||||
|
||||
return result["cid"].(string)
|
||||
}
|
||||
|
||||
// DecodeJSON is a helper to decode JSON from io.ReadCloser
|
||||
func DecodeJSONFromReader(rc io.ReadCloser) (map[string]interface{}, error) {
|
||||
defer rc.Close()
|
||||
body, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var result map[string]interface{}
|
||||
err = DecodeJSON(body, &result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
func TestStorage_UploadText(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
content := []byte("Hello, IPFS!")
|
||||
filename := "test.txt"
|
||||
|
||||
// Create multipart form
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
|
||||
part, err := writer.CreateFormFile("file", filename)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create form file: %v", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||
t.Fatalf("failed to copy data: %v", err)
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("failed to close writer: %v", err)
|
||||
}
|
||||
|
||||
// Create request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
if apiKey := GetAPIKey(); apiKey != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
|
||||
client := NewHTTPClient(5 * time.Minute)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("upload request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if err := DecodeJSON(body, &result); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if result["cid"] == nil {
|
||||
t.Fatalf("expected cid in response")
|
||||
}
|
||||
|
||||
if result["name"] != filename {
|
||||
t.Fatalf("expected name %q, got %v", filename, result["name"])
|
||||
}
|
||||
|
||||
if result["size"] == nil || result["size"].(float64) <= 0 {
|
||||
t.Fatalf("expected positive size")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorage_UploadBinary(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// PNG header
|
||||
content := []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a}
|
||||
filename := "test.png"
|
||||
|
||||
// Create multipart form
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
|
||||
part, err := writer.CreateFormFile("file", filename)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create form file: %v", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||
t.Fatalf("failed to copy data: %v", err)
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("failed to close writer: %v", err)
|
||||
}
|
||||
|
||||
// Create request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
if apiKey := GetAPIKey(); apiKey != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
|
||||
client := NewHTTPClient(5 * time.Minute)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("upload request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if err := DecodeJSON(body, &result); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if result["cid"] == nil {
|
||||
t.Fatalf("expected cid in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorage_UploadLarge(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// Create 1MB file
|
||||
content := bytes.Repeat([]byte("x"), 1024*1024)
|
||||
filename := "large.bin"
|
||||
|
||||
// Create multipart form
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
|
||||
part, err := writer.CreateFormFile("file", filename)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create form file: %v", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||
t.Fatalf("failed to copy data: %v", err)
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("failed to close writer: %v", err)
|
||||
}
|
||||
|
||||
// Create request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
if apiKey := GetAPIKey(); apiKey != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
|
||||
client := NewHTTPClient(5 * time.Minute)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("upload request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if err := DecodeJSON(body, &result); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if result["size"] != float64(1024*1024) {
|
||||
t.Fatalf("expected size %d, got %v", 1024*1024, result["size"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorage_PinUnpin(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
content := []byte("test content for pinning")
|
||||
|
||||
// Upload file first
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
|
||||
part, err := writer.CreateFormFile("file", "pin-test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create form file: %v", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||
t.Fatalf("failed to copy data: %v", err)
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("failed to close writer: %v", err)
|
||||
}
|
||||
|
||||
// Create upload request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
if apiKey := GetAPIKey(); apiKey != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
|
||||
client := NewHTTPClient(5 * time.Minute)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("upload failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var uploadResult map[string]interface{}
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if err := DecodeJSON(body, &uploadResult); err != nil {
|
||||
t.Fatalf("failed to decode upload response: %v", err)
|
||||
}
|
||||
|
||||
cid := uploadResult["cid"].(string)
|
||||
|
||||
// Pin the file
|
||||
pinReq := &HTTPRequest{
|
||||
Method: http.MethodPost,
|
||||
URL: GetGatewayURL() + "/v1/storage/pin",
|
||||
Body: map[string]interface{}{
|
||||
"cid": cid,
|
||||
"name": "pinned-file",
|
||||
},
|
||||
}
|
||||
|
||||
body2, status, err := pinReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("pin failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d: %s", status, string(body2))
|
||||
}
|
||||
|
||||
var pinResult map[string]interface{}
|
||||
if err := DecodeJSON(body2, &pinResult); err != nil {
|
||||
t.Fatalf("failed to decode pin response: %v", err)
|
||||
}
|
||||
|
||||
if pinResult["cid"] != cid {
|
||||
t.Fatalf("expected cid %s, got %v", cid, pinResult["cid"])
|
||||
}
|
||||
|
||||
// Unpin the file
|
||||
unpinReq := &HTTPRequest{
|
||||
Method: http.MethodDelete,
|
||||
URL: GetGatewayURL() + "/v1/storage/unpin/" + cid,
|
||||
}
|
||||
|
||||
body3, status, err := unpinReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unpin failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d: %s", status, string(body3))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorage_Status(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
content := []byte("test content for status")
|
||||
|
||||
// Upload file first
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
|
||||
part, err := writer.CreateFormFile("file", "status-test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create form file: %v", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||
t.Fatalf("failed to copy data: %v", err)
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("failed to close writer: %v", err)
|
||||
}
|
||||
|
||||
// Create upload request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
if apiKey := GetAPIKey(); apiKey != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
|
||||
client := NewHTTPClient(5 * time.Minute)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("upload failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var uploadResult map[string]interface{}
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if err := DecodeJSON(body, &uploadResult); err != nil {
|
||||
t.Fatalf("failed to decode upload response: %v", err)
|
||||
}
|
||||
|
||||
cid := uploadResult["cid"].(string)
|
||||
|
||||
// Get status
|
||||
statusReq := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/storage/status/" + cid,
|
||||
}
|
||||
|
||||
statusBody, status, err := statusReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("status request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", status)
|
||||
}
|
||||
|
||||
var statusResult map[string]interface{}
|
||||
if err := DecodeJSON(statusBody, &statusResult); err != nil {
|
||||
t.Fatalf("failed to decode status response: %v", err)
|
||||
}
|
||||
|
||||
if statusResult["cid"] != cid {
|
||||
t.Fatalf("expected cid %s, got %v", cid, statusResult["cid"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorage_InvalidCID(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
statusReq := &HTTPRequest{
|
||||
Method: http.MethodGet,
|
||||
URL: GetGatewayURL() + "/v1/storage/status/QmInvalidCID123456789",
|
||||
}
|
||||
|
||||
_, status, err := statusReq.Do(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("status request failed: %v", err)
|
||||
}
|
||||
|
||||
if status != http.StatusNotFound {
|
||||
t.Logf("warning: expected status 404 for invalid CID, got %d", status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorage_GetByteRange(t *testing.T) {
|
||||
SkipIfMissingGateway(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
content := []byte("0123456789abcdefghijklmnopqrstuvwxyz")
|
||||
|
||||
// Upload file first
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
|
||||
part, err := writer.CreateFormFile("file", "range-test.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create form file: %v", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, bytes.NewReader(content)); err != nil {
|
||||
t.Fatalf("failed to copy data: %v", err)
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("failed to close writer: %v", err)
|
||||
}
|
||||
|
||||
// Create upload request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
if apiKey := GetAPIKey(); apiKey != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
|
||||
client := NewHTTPClient(5 * time.Minute)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("upload failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var uploadResult map[string]interface{}
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if err := DecodeJSON(body, &uploadResult); err != nil {
|
||||
t.Fatalf("failed to decode upload response: %v", err)
|
||||
}
|
||||
|
||||
cid := uploadResult["cid"].(string)
|
||||
|
||||
// Get full content
|
||||
getReq, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/storage/get/"+cid, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create get request: %v", err)
|
||||
}
|
||||
|
||||
if apiKey := GetAPIKey(); apiKey != "" {
|
||||
getReq.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
}
|
||||
|
||||
resp, err = client.Do(getReq)
|
||||
if err != nil {
|
||||
t.Fatalf("get request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
retrievedContent, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(retrievedContent, content) {
|
||||
t.Fatalf("content mismatch: expected %q, got %q", string(content), string(retrievedContent))
|
||||
}
|
||||
}
|
||||
19
go.mod
19
go.mod
@ -5,29 +5,39 @@ go 1.23.8
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/charmbracelet/bubbles v0.20.0
|
||||
github.com/charmbracelet/bubbletea v1.2.4
|
||||
github.com/charmbracelet/lipgloss v1.0.0
|
||||
github.com/ethereum/go-ethereum v1.13.14
|
||||
github.com/go-chi/chi/v5 v5.2.3
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/libp2p/go-libp2p v0.41.1
|
||||
github.com/libp2p/go-libp2p-pubsub v0.14.2
|
||||
github.com/mackerelio/go-osstat v0.2.6
|
||||
github.com/mattn/go-sqlite3 v1.14.32
|
||||
github.com/multiformats/go-multiaddr v0.15.0
|
||||
github.com/olric-data/olric v0.7.0
|
||||
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/net v0.42.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/RoaringBitmap/roaring v1.9.4 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/atotto/clipboard v0.1.4 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.22.0 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||
github.com/buraksezer/consistent v0.10.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.4.5 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
@ -35,6 +45,7 @@ require (
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/elastic/gosigar v0.14.3 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
@ -70,14 +81,20 @@ require (
|
||||
github.com/libp2p/go-netroute v0.2.2 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/miekg/dns v1.1.66 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.15.2 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
|
||||
@ -120,6 +137,7 @@ require (
|
||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
|
||||
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||
github.com/redis/go-redis/v9 v9.8.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
@ -140,6 +158,5 @@ require (
|
||||
golang.org/x/text v0.27.0 // indirect
|
||||
golang.org/x/tools v0.35.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
lukechampine.com/blake3 v1.4.1 // indirect
|
||||
)
|
||||
|
||||
36
go.sum
36
go.sum
@ -19,6 +19,10 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
@ -44,6 +48,16 @@ github.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0ma
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE=
|
||||
github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU=
|
||||
github.com/charmbracelet/bubbletea v1.2.4 h1:KN8aCViA0eps9SCOThb2/XPIlea3ANJLUkv3KnQRNCE=
|
||||
github.com/charmbracelet/bubbletea v1.2.4/go.mod h1:Qr6fVQw+wX7JkWWkVyXYk/ZUQ92a6XNekLXa3rR18MM=
|
||||
github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg=
|
||||
github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo=
|
||||
github.com/charmbracelet/x/ansi v0.4.5 h1:LqK4vwBNaXw2AyGIICa5/29Sbdq58GbGdFngSexTdRM=
|
||||
github.com/charmbracelet/x/ansi v0.4.5/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw=
|
||||
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
||||
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
@ -75,6 +89,8 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
|
||||
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
|
||||
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ=
|
||||
github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
@ -85,6 +101,8 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE=
|
||||
github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
@ -238,6 +256,8 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0=
|
||||
github.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA=
|
||||
@ -246,6 +266,12 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
|
||||
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
|
||||
@ -269,6 +295,12 @@ github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
|
||||
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
@ -397,6 +429,9 @@ github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtB
|
||||
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
|
||||
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE=
|
||||
@ -583,6 +618,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
||||
@ -34,15 +34,15 @@ func GetCredentialsPath() (string, error) {
|
||||
return "", fmt.Errorf("failed to get home directory: %w", err)
|
||||
}
|
||||
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
if err := os.MkdirAll(debrosDir, 0700); err != nil {
|
||||
return "", fmt.Errorf("failed to create .debros directory: %w", err)
|
||||
oramaDir := filepath.Join(homeDir, ".orama")
|
||||
if err := os.MkdirAll(oramaDir, 0700); err != nil {
|
||||
return "", fmt.Errorf("failed to create .orama directory: %w", err)
|
||||
}
|
||||
|
||||
return filepath.Join(debrosDir, "credentials.json"), nil
|
||||
return filepath.Join(oramaDir, "credentials.json"), nil
|
||||
}
|
||||
|
||||
// LoadCredentials loads credentials from ~/.debros/credentials.json
|
||||
// LoadCredentials loads credentials from ~/.orama/credentials.json
|
||||
func LoadCredentials() (*CredentialStore, error) {
|
||||
credPath, err := GetCredentialsPath()
|
||||
if err != nil {
|
||||
@ -80,7 +80,7 @@ func LoadCredentials() (*CredentialStore, error) {
|
||||
return &store, nil
|
||||
}
|
||||
|
||||
// SaveCredentials saves credentials to ~/.debros/credentials.json
|
||||
// SaveCredentials saves credentials to ~/.orama/credentials.json
|
||||
func (store *CredentialStore) SaveCredentials() error {
|
||||
credPath, err := GetCredentialsPath()
|
||||
if err != nil {
|
||||
|
||||
144
pkg/auth/simple_auth.go
Normal file
144
pkg/auth/simple_auth.go
Normal file
@ -0,0 +1,144 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
||||
)
|
||||
|
||||
// PerformSimpleAuthentication performs a simple authentication flow where the user
|
||||
// provides a wallet address and receives an API key without signature verification
|
||||
func PerformSimpleAuthentication(gatewayURL string) (*Credentials, error) {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
fmt.Println("\n🔐 Simple Wallet Authentication")
|
||||
fmt.Println("================================")
|
||||
|
||||
// Read wallet address
|
||||
fmt.Print("Enter your wallet address (0x...): ")
|
||||
walletInput, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read wallet address: %w", err)
|
||||
}
|
||||
|
||||
wallet := strings.TrimSpace(walletInput)
|
||||
if wallet == "" {
|
||||
return nil, fmt.Errorf("wallet address cannot be empty")
|
||||
}
|
||||
|
||||
// Validate wallet format (basic check)
|
||||
if !strings.HasPrefix(wallet, "0x") && !strings.HasPrefix(wallet, "0X") {
|
||||
wallet = "0x" + wallet
|
||||
}
|
||||
|
||||
if !ValidateWalletAddress(wallet) {
|
||||
return nil, fmt.Errorf("invalid wallet address format")
|
||||
}
|
||||
|
||||
// Read namespace (optional)
|
||||
fmt.Print("Enter namespace (press Enter for 'default'): ")
|
||||
nsInput, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read namespace: %w", err)
|
||||
}
|
||||
|
||||
namespace := strings.TrimSpace(nsInput)
|
||||
if namespace == "" {
|
||||
namespace = "default"
|
||||
}
|
||||
|
||||
fmt.Printf("\n✅ Wallet: %s\n", wallet)
|
||||
fmt.Printf("✅ Namespace: %s\n", namespace)
|
||||
fmt.Println("⏳ Requesting API key from gateway...")
|
||||
|
||||
// Request API key from gateway
|
||||
apiKey, err := requestAPIKeyFromGateway(gatewayURL, wallet, namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to request API key: %w", err)
|
||||
}
|
||||
|
||||
// Create credentials
|
||||
creds := &Credentials{
|
||||
APIKey: apiKey,
|
||||
Namespace: namespace,
|
||||
UserID: wallet,
|
||||
Wallet: wallet,
|
||||
IssuedAt: time.Now(),
|
||||
}
|
||||
|
||||
fmt.Printf("\n🎉 Authentication successful!\n")
|
||||
fmt.Printf("📝 API Key: %s\n", creds.APIKey)
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// requestAPIKeyFromGateway calls the gateway's simple-key endpoint to generate an API key
|
||||
func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, error) {
|
||||
reqBody := map[string]string{
|
||||
"wallet": wallet,
|
||||
"namespace": namespace,
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
endpoint := gatewayURL + "/v1/auth/simple-key"
|
||||
|
||||
// Extract domain from URL for TLS configuration
|
||||
// This uses tlsutil which handles Let's Encrypt staging certificates for *.debros.network
|
||||
domain := extractDomainFromURL(gatewayURL)
|
||||
client := tlsutil.NewHTTPClientForDomain(30*time.Second, domain)
|
||||
|
||||
resp, err := client.Post(endpoint, "application/json", bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to call gateway: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return "", fmt.Errorf("gateway returned status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var respBody map[string]interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&respBody); err != nil {
|
||||
return "", fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
|
||||
apiKey, ok := respBody["api_key"].(string)
|
||||
if !ok || apiKey == "" {
|
||||
return "", fmt.Errorf("no api_key in response")
|
||||
}
|
||||
|
||||
return apiKey, nil
|
||||
}
|
||||
|
||||
// extractDomainFromURL extracts the domain from a URL
|
||||
// Removes protocol (https://, http://), path, and port components
|
||||
func extractDomainFromURL(url string) string {
|
||||
// Remove protocol prefixes
|
||||
url = strings.TrimPrefix(url, "https://")
|
||||
url = strings.TrimPrefix(url, "http://")
|
||||
|
||||
// Remove path component
|
||||
if idx := strings.Index(url, "/"); idx != -1 {
|
||||
url = url[:idx]
|
||||
}
|
||||
|
||||
// Remove port component
|
||||
if idx := strings.Index(url, ":"); idx != -1 {
|
||||
url = url[:idx]
|
||||
}
|
||||
|
||||
return url
|
||||
}
|
||||
@ -199,7 +199,7 @@ func (as *AuthServer) handleCallback(w http.ResponseWriter, r *http.Request) {
|
||||
%s
|
||||
</div>
|
||||
|
||||
<p>Your credentials have been saved securely to <code>~/.debros/credentials.json</code></p>
|
||||
<p>Your credentials have been saved securely to <code>~/.orama/credentials.json</code></p>
|
||||
<p><strong>You can now close this browser window and return to your terminal.</strong></p>
|
||||
</div>
|
||||
</body>
|
||||
|
||||
257
pkg/certutil/cert_manager.go
Normal file
257
pkg/certutil/cert_manager.go
Normal file
@ -0,0 +1,257 @@
|
||||
// Package certutil provides utilities for managing self-signed certificates
|
||||
package certutil
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CertificateManager manages self-signed certificates for the network
|
||||
type CertificateManager struct {
|
||||
baseDir string
|
||||
}
|
||||
|
||||
// NewCertificateManager creates a new certificate manager
|
||||
func NewCertificateManager(baseDir string) *CertificateManager {
|
||||
return &CertificateManager{
|
||||
baseDir: baseDir,
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureCACertificate creates or loads the CA certificate
|
||||
func (cm *CertificateManager) EnsureCACertificate() ([]byte, []byte, error) {
|
||||
caCertPath := filepath.Join(cm.baseDir, "ca.crt")
|
||||
caKeyPath := filepath.Join(cm.baseDir, "ca.key")
|
||||
|
||||
// Check if CA already exists
|
||||
if _, err := os.Stat(caCertPath); err == nil {
|
||||
certPEM, err := os.ReadFile(caCertPath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to read CA certificate: %w", err)
|
||||
}
|
||||
keyPEM, err := os.ReadFile(caKeyPath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to read CA key: %w", err)
|
||||
}
|
||||
return certPEM, keyPEM, nil
|
||||
}
|
||||
|
||||
// Create new CA certificate
|
||||
certPEM, keyPEM, err := cm.generateCACertificate()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Ensure directory exists
|
||||
if err := os.MkdirAll(cm.baseDir, 0700); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create cert directory: %w", err)
|
||||
}
|
||||
|
||||
// Write to files
|
||||
if err := os.WriteFile(caCertPath, certPEM, 0644); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to write CA certificate: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(caKeyPath, keyPEM, 0600); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to write CA key: %w", err)
|
||||
}
|
||||
|
||||
return certPEM, keyPEM, nil
|
||||
}
|
||||
|
||||
// EnsureNodeCertificate creates or loads a node certificate signed by the CA
|
||||
func (cm *CertificateManager) EnsureNodeCertificate(hostname string, caCertPEM, caKeyPEM []byte) ([]byte, []byte, error) {
|
||||
certPath := filepath.Join(cm.baseDir, fmt.Sprintf("%s.crt", hostname))
|
||||
keyPath := filepath.Join(cm.baseDir, fmt.Sprintf("%s.key", hostname))
|
||||
|
||||
// Check if certificate already exists
|
||||
if _, err := os.Stat(certPath); err == nil {
|
||||
certData, err := os.ReadFile(certPath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to read certificate: %w", err)
|
||||
}
|
||||
keyData, err := os.ReadFile(keyPath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to read key: %w", err)
|
||||
}
|
||||
return certData, keyData, nil
|
||||
}
|
||||
|
||||
// Create new certificate
|
||||
certPEM, keyPEM, err := cm.generateNodeCertificate(hostname, caCertPEM, caKeyPEM)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Write to files
|
||||
if err := os.WriteFile(certPath, certPEM, 0644); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to write certificate: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(keyPath, keyPEM, 0600); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to write key: %w", err)
|
||||
}
|
||||
|
||||
return certPEM, keyPEM, nil
|
||||
}
|
||||
|
||||
// generateCACertificate generates a self-signed CA certificate
|
||||
func (cm *CertificateManager) generateCACertificate() ([]byte, []byte, error) {
|
||||
// Generate private key
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate private key: %w", err)
|
||||
}
|
||||
|
||||
// Create certificate template
|
||||
template := x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "DeBros Network Root CA",
|
||||
Organization: []string{"DeBros"},
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().AddDate(10, 0, 0), // 10 year validity
|
||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{},
|
||||
BasicConstraintsValid: true,
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
// Self-sign the certificate
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create certificate: %w", err)
|
||||
}
|
||||
|
||||
// Encode certificate to PEM
|
||||
certPEM := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: certDER,
|
||||
})
|
||||
|
||||
// Encode private key to PEM
|
||||
keyDER, err := x509.MarshalPKCS8PrivateKey(privateKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to marshal private key: %w", err)
|
||||
}
|
||||
|
||||
keyPEM := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "PRIVATE KEY",
|
||||
Bytes: keyDER,
|
||||
})
|
||||
|
||||
return certPEM, keyPEM, nil
|
||||
}
|
||||
|
||||
// generateNodeCertificate generates a certificate signed by the CA
|
||||
func (cm *CertificateManager) generateNodeCertificate(hostname string, caCertPEM, caKeyPEM []byte) ([]byte, []byte, error) {
|
||||
// Parse CA certificate and key
|
||||
caCert, caKey, err := cm.parseCACertificate(caCertPEM, caKeyPEM)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Generate node private key
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate private key: %w", err)
|
||||
}
|
||||
|
||||
// Create certificate template
|
||||
template := x509.Certificate{
|
||||
SerialNumber: big.NewInt(time.Now().UnixNano()),
|
||||
Subject: pkix.Name{
|
||||
CommonName: hostname,
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().AddDate(5, 0, 0), // 5 year validity
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
DNSNames: []string{hostname},
|
||||
}
|
||||
|
||||
// Add wildcard support if hostname contains *.debros.network
|
||||
if hostname == "*.debros.network" {
|
||||
template.DNSNames = []string{"*.debros.network", "debros.network"}
|
||||
} else if hostname == "debros.network" {
|
||||
template.DNSNames = []string{"*.debros.network", "debros.network"}
|
||||
}
|
||||
|
||||
// Try to parse as IP address for IP-based certificates
|
||||
if ip := net.ParseIP(hostname); ip != nil {
|
||||
template.IPAddresses = []net.IP{ip}
|
||||
template.DNSNames = nil
|
||||
}
|
||||
|
||||
// Sign certificate with CA
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, &template, caCert, &privateKey.PublicKey, caKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create certificate: %w", err)
|
||||
}
|
||||
|
||||
// Encode certificate to PEM
|
||||
certPEM := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: certDER,
|
||||
})
|
||||
|
||||
// Encode private key to PEM
|
||||
keyDER, err := x509.MarshalPKCS8PrivateKey(privateKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to marshal private key: %w", err)
|
||||
}
|
||||
|
||||
keyPEM := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "PRIVATE KEY",
|
||||
Bytes: keyDER,
|
||||
})
|
||||
|
||||
return certPEM, keyPEM, nil
|
||||
}
|
||||
|
||||
// parseCACertificate parses CA certificate and key from PEM
|
||||
func (cm *CertificateManager) parseCACertificate(caCertPEM, caKeyPEM []byte) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
// Parse CA certificate
|
||||
certBlock, _ := pem.Decode(caCertPEM)
|
||||
if certBlock == nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse CA certificate PEM")
|
||||
}
|
||||
|
||||
caCert, err := x509.ParseCertificate(certBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse CA certificate: %w", err)
|
||||
}
|
||||
|
||||
// Parse CA private key
|
||||
keyBlock, _ := pem.Decode(caKeyPEM)
|
||||
if keyBlock == nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse CA key PEM")
|
||||
}
|
||||
|
||||
caKey, err := x509.ParsePKCS8PrivateKey(keyBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to parse CA key: %w", err)
|
||||
}
|
||||
|
||||
rsaKey, ok := caKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("CA key is not RSA")
|
||||
}
|
||||
|
||||
return caCert, rsaKey, nil
|
||||
}
|
||||
|
||||
// LoadTLSCertificate loads a TLS certificate from PEM files
|
||||
func LoadTLSCertificate(certPEM, keyPEM []byte) (tls.Certificate, error) {
|
||||
return tls.X509KeyPair(certPEM, keyPEM)
|
||||
}
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/auth"
|
||||
)
|
||||
@ -33,29 +35,35 @@ func HandleAuthCommand(args []string) {
|
||||
|
||||
func showAuthHelp() {
|
||||
fmt.Printf("🔐 Authentication Commands\n\n")
|
||||
fmt.Printf("Usage: network-cli auth <subcommand>\n\n")
|
||||
fmt.Printf("Usage: dbn auth <subcommand>\n\n")
|
||||
fmt.Printf("Subcommands:\n")
|
||||
fmt.Printf(" login - Authenticate with wallet\n")
|
||||
fmt.Printf(" login - Authenticate by providing your wallet address\n")
|
||||
fmt.Printf(" logout - Clear stored credentials\n")
|
||||
fmt.Printf(" whoami - Show current authentication status\n")
|
||||
fmt.Printf(" status - Show detailed authentication info\n\n")
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" network-cli auth login\n")
|
||||
fmt.Printf(" network-cli auth whoami\n")
|
||||
fmt.Printf(" network-cli auth status\n")
|
||||
fmt.Printf(" network-cli auth logout\n\n")
|
||||
fmt.Printf(" dbn auth login # Enter wallet address interactively\n")
|
||||
fmt.Printf(" dbn auth whoami # Check who you're logged in as\n")
|
||||
fmt.Printf(" dbn auth status # View detailed authentication info\n")
|
||||
fmt.Printf(" dbn auth logout # Clear all stored credentials\n\n")
|
||||
fmt.Printf("Environment Variables:\n")
|
||||
fmt.Printf(" DEBROS_GATEWAY_URL - Gateway URL (overrides environment config)\n\n")
|
||||
fmt.Printf("Authentication Flow:\n")
|
||||
fmt.Printf(" 1. Run 'dbn auth login'\n")
|
||||
fmt.Printf(" 2. Enter your wallet address when prompted\n")
|
||||
fmt.Printf(" 3. Enter your namespace (or press Enter for 'default')\n")
|
||||
fmt.Printf(" 4. An API key will be generated and saved to ~/.orama/credentials.json\n\n")
|
||||
fmt.Printf("Note: Authentication uses the currently active environment.\n")
|
||||
fmt.Printf(" Use 'network-cli env current' to see your active environment.\n")
|
||||
fmt.Printf(" Use 'dbn env current' to see your active environment.\n")
|
||||
}
|
||||
|
||||
func handleAuthLogin() {
|
||||
gatewayURL := getGatewayURL()
|
||||
// Prompt for node selection
|
||||
gatewayURL := promptForGatewayURL()
|
||||
fmt.Printf("🔐 Authenticating with gateway at: %s\n", gatewayURL)
|
||||
|
||||
// Use the wallet authentication flow
|
||||
creds, err := auth.PerformWalletAuthentication(gatewayURL)
|
||||
// Use the simple authentication flow
|
||||
creds, err := auth.PerformSimpleAuthentication(gatewayURL)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Authentication failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
@ -72,6 +80,7 @@ func handleAuthLogin() {
|
||||
fmt.Printf("📁 Credentials saved to: %s\n", credsPath)
|
||||
fmt.Printf("🎯 Wallet: %s\n", creds.Wallet)
|
||||
fmt.Printf("🏢 Namespace: %s\n", creds.Namespace)
|
||||
fmt.Printf("🔑 API Key: %s\n", creds.APIKey)
|
||||
}
|
||||
|
||||
func handleAuthLogout() {
|
||||
@ -93,7 +102,7 @@ func handleAuthWhoami() {
|
||||
creds, exists := store.GetCredentialsForGateway(gatewayURL)
|
||||
|
||||
if !exists || !creds.IsValid() {
|
||||
fmt.Println("❌ Not authenticated - run 'network-cli auth login' to authenticate")
|
||||
fmt.Println("❌ Not authenticated - run 'dbn auth login' to authenticate")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -155,7 +164,55 @@ func handleAuthStatus() {
|
||||
}
|
||||
}
|
||||
|
||||
// promptForGatewayURL interactively prompts for the gateway URL
|
||||
// Allows user to choose between local node or remote node by domain
|
||||
func promptForGatewayURL() string {
|
||||
// Check environment variable first (allows override without prompting)
|
||||
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
|
||||
return url
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
fmt.Println("\n🌐 Node Connection")
|
||||
fmt.Println("==================")
|
||||
fmt.Println("1. Local node (localhost:6001)")
|
||||
fmt.Println("2. Remote node (enter domain)")
|
||||
fmt.Print("\nSelect option [1/2]: ")
|
||||
|
||||
choice, _ := reader.ReadString('\n')
|
||||
choice = strings.TrimSpace(choice)
|
||||
|
||||
if choice == "1" || choice == "" {
|
||||
return "http://localhost:6001"
|
||||
}
|
||||
|
||||
if choice != "2" {
|
||||
fmt.Println("⚠️ Invalid option, using localhost")
|
||||
return "http://localhost:6001"
|
||||
}
|
||||
|
||||
fmt.Print("Enter node domain (e.g., node-hk19de.debros.network): ")
|
||||
domain, _ := reader.ReadString('\n')
|
||||
domain = strings.TrimSpace(domain)
|
||||
|
||||
if domain == "" {
|
||||
fmt.Println("⚠️ No domain entered, using localhost")
|
||||
return "http://localhost:6001"
|
||||
}
|
||||
|
||||
// Remove any protocol prefix if user included it
|
||||
domain = strings.TrimPrefix(domain, "https://")
|
||||
domain = strings.TrimPrefix(domain, "http://")
|
||||
// Remove trailing slash
|
||||
domain = strings.TrimSuffix(domain, "/")
|
||||
|
||||
// Use HTTPS for remote domains
|
||||
return fmt.Sprintf("https://%s", domain)
|
||||
}
|
||||
|
||||
// getGatewayURL returns the gateway URL based on environment or env var
|
||||
// Used by other commands that don't need interactive node selection
|
||||
func getGatewayURL() string {
|
||||
// Check environment variable first (for backwards compatibility)
|
||||
if url := os.Getenv("DEBROS_GATEWAY_URL"); url != "" {
|
||||
@ -168,6 +225,6 @@ func getGatewayURL() string {
|
||||
return env.GatewayURL
|
||||
}
|
||||
|
||||
// Fallback to default
|
||||
// Fallback to default (node-1)
|
||||
return "http://localhost:6001"
|
||||
}
|
||||
|
||||
@ -158,7 +158,7 @@ func HandlePeerIDCommand(format string, timeout time.Duration) {
|
||||
// HandlePubSubCommand handles pubsub commands
|
||||
func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli pubsub <publish|subscribe|topics> [args...]\n")
|
||||
fmt.Fprintf(os.Stderr, "Usage: dbn pubsub <publish|subscribe|topics> [args...]\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -179,7 +179,7 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
|
||||
switch subcommand {
|
||||
case "publish":
|
||||
if len(args) < 3 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli pubsub publish <topic> <message>\n")
|
||||
fmt.Fprintf(os.Stderr, "Usage: dbn pubsub publish <topic> <message>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
err := cli.PubSub().Publish(ctx, args[1], []byte(args[2]))
|
||||
@ -191,7 +191,7 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
|
||||
|
||||
case "subscribe":
|
||||
if len(args) < 2 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli pubsub subscribe <topic> [duration]\n")
|
||||
fmt.Fprintf(os.Stderr, "Usage: dbn pubsub subscribe <topic> [duration]\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
duration := 30 * time.Second
|
||||
@ -243,14 +243,23 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
|
||||
// Helper functions
|
||||
|
||||
func createClient() (client.NetworkClient, error) {
|
||||
config := client.DefaultClientConfig("network-cli")
|
||||
config := client.DefaultClientConfig("dbn")
|
||||
|
||||
// Use active environment's gateway URL
|
||||
gatewayURL := getGatewayURL()
|
||||
config.GatewayURL = gatewayURL
|
||||
|
||||
// Try to get peer configuration from active environment
|
||||
env, err := GetActiveEnvironment()
|
||||
if err == nil && env != nil {
|
||||
// Environment loaded successfully - gateway URL already set above
|
||||
_ = env // Reserve for future peer configuration
|
||||
}
|
||||
|
||||
// Check for existing credentials using enhanced authentication
|
||||
creds, err := auth.GetValidEnhancedCredentials()
|
||||
if err != nil {
|
||||
// No valid credentials found, use the enhanced authentication flow
|
||||
gatewayURL := getGatewayURL()
|
||||
|
||||
newCreds, authErr := auth.GetOrPromptForCredentials(gatewayURL)
|
||||
if authErr != nil {
|
||||
return nil, fmt.Errorf("authentication failed: %w", authErr)
|
||||
|
||||
@ -40,30 +40,30 @@ func HandleDevCommand(args []string) {
|
||||
|
||||
func showDevHelp() {
|
||||
fmt.Printf("🚀 Development Environment Commands\n\n")
|
||||
fmt.Printf("Usage: network-cli dev <subcommand> [options]\n\n")
|
||||
fmt.Printf("Usage: orama dev <subcommand> [options]\n\n")
|
||||
fmt.Printf("Subcommands:\n")
|
||||
fmt.Printf(" up - Start development environment (bootstrap + 2 nodes + gateway)\n")
|
||||
fmt.Printf(" up - Start development environment (5 nodes + gateway)\n")
|
||||
fmt.Printf(" down - Stop all development services\n")
|
||||
fmt.Printf(" status - Show status of running services\n")
|
||||
fmt.Printf(" logs <component> - Tail logs for a component\n")
|
||||
fmt.Printf(" help - Show this help\n\n")
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" network-cli dev up\n")
|
||||
fmt.Printf(" network-cli dev down\n")
|
||||
fmt.Printf(" network-cli dev status\n")
|
||||
fmt.Printf(" network-cli dev logs bootstrap --follow\n")
|
||||
fmt.Printf(" orama dev up\n")
|
||||
fmt.Printf(" orama dev down\n")
|
||||
fmt.Printf(" orama dev status\n")
|
||||
fmt.Printf(" orama dev logs node-1 --follow\n")
|
||||
}
|
||||
|
||||
func handleDevUp(args []string) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Get home directory and .debros path
|
||||
// Get home directory and .orama path
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
oramaDir := filepath.Join(homeDir, ".orama")
|
||||
|
||||
// Step 1: Check dependencies
|
||||
fmt.Printf("📋 Checking dependencies...\n\n")
|
||||
@ -90,7 +90,7 @@ func handleDevUp(args []string) {
|
||||
|
||||
// Step 3: Ensure configs
|
||||
fmt.Printf("⚙️ Preparing configuration files...\n\n")
|
||||
ensurer := development.NewConfigEnsurer(debrosDir)
|
||||
ensurer := development.NewConfigEnsurer(oramaDir)
|
||||
if err := ensurer.EnsureAll(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to prepare configs: %v\n", err)
|
||||
os.Exit(1)
|
||||
@ -98,7 +98,7 @@ func handleDevUp(args []string) {
|
||||
fmt.Printf("\n")
|
||||
|
||||
// Step 4: Start services
|
||||
pm := development.NewProcessManager(debrosDir, os.Stdout)
|
||||
pm := development.NewProcessManager(oramaDir, os.Stdout)
|
||||
if err := pm.StartAll(ctx); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "❌ Error starting services: %v\n", err)
|
||||
os.Exit(1)
|
||||
@ -107,17 +107,20 @@ func handleDevUp(args []string) {
|
||||
// Step 5: Show summary
|
||||
fmt.Printf("🎉 Development environment is running!\n\n")
|
||||
fmt.Printf("Key endpoints:\n")
|
||||
fmt.Printf(" Gateway: http://localhost:6001\n")
|
||||
fmt.Printf(" Bootstrap IPFS: http://localhost:4501\n")
|
||||
fmt.Printf(" Node2 IPFS: http://localhost:4502\n")
|
||||
fmt.Printf(" Node3 IPFS: http://localhost:4503\n")
|
||||
fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n")
|
||||
fmt.Printf(" Olric Cache: http://localhost:3320\n\n")
|
||||
fmt.Printf(" Gateway: http://localhost:6001\n")
|
||||
fmt.Printf(" Node-1 IPFS: http://localhost:4501\n")
|
||||
fmt.Printf(" Node-2 IPFS: http://localhost:4502\n")
|
||||
fmt.Printf(" Node-3 IPFS: http://localhost:4503\n")
|
||||
fmt.Printf(" Node-4 IPFS: http://localhost:4504\n")
|
||||
fmt.Printf(" Node-5 IPFS: http://localhost:4505\n")
|
||||
fmt.Printf(" Anon SOCKS: 127.0.0.1:9050\n")
|
||||
fmt.Printf(" Olric Cache: http://localhost:3320\n\n")
|
||||
fmt.Printf("Useful commands:\n")
|
||||
fmt.Printf(" network-cli dev status - Show status\n")
|
||||
fmt.Printf(" network-cli dev logs bootstrap - Bootstrap logs\n")
|
||||
fmt.Printf(" network-cli dev down - Stop all services\n\n")
|
||||
fmt.Printf("Logs directory: %s/logs\n\n", debrosDir)
|
||||
fmt.Printf(" orama dev status - Show status\n")
|
||||
fmt.Printf(" orama dev logs node-1 - Node-1 logs\n")
|
||||
fmt.Printf(" orama dev logs node-2 - Node-2 logs\n")
|
||||
fmt.Printf(" orama dev down - Stop all services\n\n")
|
||||
fmt.Printf("Logs directory: %s/logs\n\n", oramaDir)
|
||||
}
|
||||
|
||||
func handleDevDown(args []string) {
|
||||
@ -126,14 +129,17 @@ func handleDevDown(args []string) {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
oramaDir := filepath.Join(homeDir, ".orama")
|
||||
|
||||
pm := development.NewProcessManager(debrosDir, os.Stdout)
|
||||
pm := development.NewProcessManager(oramaDir, os.Stdout)
|
||||
ctx := context.Background()
|
||||
|
||||
if err := pm.StopAll(ctx); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "⚠️ Error stopping services: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("✅ All services have been stopped\n\n")
|
||||
}
|
||||
|
||||
func handleDevStatus(args []string) {
|
||||
@ -142,9 +148,9 @@ func handleDevStatus(args []string) {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
oramaDir := filepath.Join(homeDir, ".orama")
|
||||
|
||||
pm := development.NewProcessManager(debrosDir, os.Stdout)
|
||||
pm := development.NewProcessManager(oramaDir, os.Stdout)
|
||||
ctx := context.Background()
|
||||
|
||||
pm.Status(ctx)
|
||||
@ -152,8 +158,8 @@ func handleDevStatus(args []string) {
|
||||
|
||||
func handleDevLogs(args []string) {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli dev logs <component> [--follow]\n")
|
||||
fmt.Fprintf(os.Stderr, "\nComponents: bootstrap, node2, node3, gateway, ipfs-bootstrap, ipfs-node2, ipfs-node3, olric, anon\n")
|
||||
fmt.Fprintf(os.Stderr, "Usage: dbn dev logs <component> [--follow]\n")
|
||||
fmt.Fprintf(os.Stderr, "\nComponents: node-1, node-2, node-3, node-4, node-5, gateway, ipfs-node-1, ipfs-node-2, ipfs-node-3, ipfs-node-4, ipfs-node-5, olric, anon\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -165,9 +171,9 @@ func handleDevLogs(args []string) {
|
||||
fmt.Fprintf(os.Stderr, "❌ Failed to get home directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
debrosDir := filepath.Join(homeDir, ".debros")
|
||||
oramaDir := filepath.Join(homeDir, ".orama")
|
||||
|
||||
logPath := filepath.Join(debrosDir, "logs", fmt.Sprintf("%s.log", component))
|
||||
logPath := filepath.Join(oramaDir, "logs", fmt.Sprintf("%s.log", component))
|
||||
if _, err := os.Stat(logPath); os.IsNotExist(err) {
|
||||
fmt.Fprintf(os.Stderr, "❌ Log file not found: %s\n", logPath)
|
||||
os.Exit(1)
|
||||
|
||||
@ -35,7 +35,7 @@ func HandleEnvCommand(args []string) {
|
||||
|
||||
func showEnvHelp() {
|
||||
fmt.Printf("🌍 Environment Management Commands\n\n")
|
||||
fmt.Printf("Usage: network-cli env <subcommand>\n\n")
|
||||
fmt.Printf("Usage: dbn env <subcommand>\n\n")
|
||||
fmt.Printf("Subcommands:\n")
|
||||
fmt.Printf(" list - List all available environments\n")
|
||||
fmt.Printf(" current - Show current active environment\n")
|
||||
@ -43,15 +43,15 @@ func showEnvHelp() {
|
||||
fmt.Printf(" enable - Alias for 'switch' (e.g., 'devnet enable')\n\n")
|
||||
fmt.Printf("Available Environments:\n")
|
||||
fmt.Printf(" local - Local development (http://localhost:6001)\n")
|
||||
fmt.Printf(" devnet - Development network (https://devnet.debros.network)\n")
|
||||
fmt.Printf(" testnet - Test network (https://testnet.debros.network)\n\n")
|
||||
fmt.Printf(" devnet - Development network (https://devnet.orama.network)\n")
|
||||
fmt.Printf(" testnet - Test network (https://testnet.orama.network)\n\n")
|
||||
fmt.Printf("Examples:\n")
|
||||
fmt.Printf(" network-cli env list\n")
|
||||
fmt.Printf(" network-cli env current\n")
|
||||
fmt.Printf(" network-cli env switch devnet\n")
|
||||
fmt.Printf(" network-cli env enable testnet\n")
|
||||
fmt.Printf(" network-cli devnet enable # Shorthand for switch to devnet\n")
|
||||
fmt.Printf(" network-cli testnet enable # Shorthand for switch to testnet\n")
|
||||
fmt.Printf(" dbn env list\n")
|
||||
fmt.Printf(" dbn env current\n")
|
||||
fmt.Printf(" dbn env switch devnet\n")
|
||||
fmt.Printf(" dbn env enable testnet\n")
|
||||
fmt.Printf(" dbn devnet enable # Shorthand for switch to devnet\n")
|
||||
fmt.Printf(" dbn testnet enable # Shorthand for switch to testnet\n")
|
||||
}
|
||||
|
||||
func handleEnvList() {
|
||||
@ -99,7 +99,7 @@ func handleEnvCurrent() {
|
||||
|
||||
func handleEnvSwitch(args []string) {
|
||||
if len(args) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Usage: network-cli env switch <environment>\n")
|
||||
fmt.Fprintf(os.Stderr, "Usage: dbn env switch <environment>\n")
|
||||
fmt.Fprintf(os.Stderr, "Available: local, devnet, testnet\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -28,18 +28,18 @@ var DefaultEnvironments = []Environment{
|
||||
{
|
||||
Name: "local",
|
||||
GatewayURL: "http://localhost:6001",
|
||||
Description: "Local development environment",
|
||||
Description: "Local development environment (node-1)",
|
||||
IsActive: true,
|
||||
},
|
||||
{
|
||||
Name: "devnet",
|
||||
GatewayURL: "https://devnet.debros.network",
|
||||
GatewayURL: "https://devnet.orama.network",
|
||||
Description: "Development network (testnet)",
|
||||
IsActive: false,
|
||||
},
|
||||
{
|
||||
Name: "testnet",
|
||||
GatewayURL: "https://testnet.debros.network",
|
||||
GatewayURL: "https://testnet.orama.network",
|
||||
Description: "Test network (staging)",
|
||||
IsActive: false,
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
172
pkg/cli/prod_commands_test.go
Normal file
172
pkg/cli/prod_commands_test.go
Normal file
@ -0,0 +1,172 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestProdCommandFlagParsing verifies that prod command flags are parsed correctly
|
||||
// Note: The installer now uses --vps-ip presence to determine if it's a first node (no --bootstrap flag)
|
||||
// First node: has --vps-ip but no --peers or --join
|
||||
// Joining node: has --vps-ip, --peers, and --cluster-secret
|
||||
func TestProdCommandFlagParsing(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
expectVPSIP string
|
||||
expectDomain string
|
||||
expectPeers string
|
||||
expectJoin string
|
||||
expectSecret string
|
||||
expectBranch string
|
||||
isFirstNode bool // first node = no peers and no join address
|
||||
}{
|
||||
{
|
||||
name: "first node (creates new cluster)",
|
||||
args: []string{"install", "--vps-ip", "10.0.0.1", "--domain", "node-1.example.com"},
|
||||
expectVPSIP: "10.0.0.1",
|
||||
expectDomain: "node-1.example.com",
|
||||
isFirstNode: true,
|
||||
},
|
||||
{
|
||||
name: "joining node with peers",
|
||||
args: []string{"install", "--vps-ip", "10.0.0.2", "--peers", "/ip4/10.0.0.1/tcp/4001/p2p/Qm123", "--cluster-secret", "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
|
||||
expectVPSIP: "10.0.0.2",
|
||||
expectPeers: "/ip4/10.0.0.1/tcp/4001/p2p/Qm123",
|
||||
expectSecret: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
isFirstNode: false,
|
||||
},
|
||||
{
|
||||
name: "joining node with join address",
|
||||
args: []string{"install", "--vps-ip", "10.0.0.3", "--join", "10.0.0.1:7001", "--cluster-secret", "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
|
||||
expectVPSIP: "10.0.0.3",
|
||||
expectJoin: "10.0.0.1:7001",
|
||||
expectSecret: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
isFirstNode: false,
|
||||
},
|
||||
{
|
||||
name: "with nightly branch",
|
||||
args: []string{"install", "--vps-ip", "10.0.0.4", "--branch", "nightly"},
|
||||
expectVPSIP: "10.0.0.4",
|
||||
expectBranch: "nightly",
|
||||
isFirstNode: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Extract flags manually to verify parsing logic
|
||||
var vpsIP, domain, peersStr, joinAddr, clusterSecret, branch string
|
||||
|
||||
for i, arg := range tt.args {
|
||||
switch arg {
|
||||
case "--vps-ip":
|
||||
if i+1 < len(tt.args) {
|
||||
vpsIP = tt.args[i+1]
|
||||
}
|
||||
case "--domain":
|
||||
if i+1 < len(tt.args) {
|
||||
domain = tt.args[i+1]
|
||||
}
|
||||
case "--peers":
|
||||
if i+1 < len(tt.args) {
|
||||
peersStr = tt.args[i+1]
|
||||
}
|
||||
case "--join":
|
||||
if i+1 < len(tt.args) {
|
||||
joinAddr = tt.args[i+1]
|
||||
}
|
||||
case "--cluster-secret":
|
||||
if i+1 < len(tt.args) {
|
||||
clusterSecret = tt.args[i+1]
|
||||
}
|
||||
case "--branch":
|
||||
if i+1 < len(tt.args) {
|
||||
branch = tt.args[i+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// First node detection: no peers and no join address
|
||||
isFirstNode := peersStr == "" && joinAddr == ""
|
||||
|
||||
if vpsIP != tt.expectVPSIP {
|
||||
t.Errorf("expected vpsIP=%q, got %q", tt.expectVPSIP, vpsIP)
|
||||
}
|
||||
if domain != tt.expectDomain {
|
||||
t.Errorf("expected domain=%q, got %q", tt.expectDomain, domain)
|
||||
}
|
||||
if peersStr != tt.expectPeers {
|
||||
t.Errorf("expected peers=%q, got %q", tt.expectPeers, peersStr)
|
||||
}
|
||||
if joinAddr != tt.expectJoin {
|
||||
t.Errorf("expected join=%q, got %q", tt.expectJoin, joinAddr)
|
||||
}
|
||||
if clusterSecret != tt.expectSecret {
|
||||
t.Errorf("expected clusterSecret=%q, got %q", tt.expectSecret, clusterSecret)
|
||||
}
|
||||
if branch != tt.expectBranch {
|
||||
t.Errorf("expected branch=%q, got %q", tt.expectBranch, branch)
|
||||
}
|
||||
if isFirstNode != tt.isFirstNode {
|
||||
t.Errorf("expected isFirstNode=%v, got %v", tt.isFirstNode, isFirstNode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNormalizePeers tests the peer multiaddr normalization
|
||||
func TestNormalizePeers(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expectCount int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "empty string",
|
||||
input: "",
|
||||
expectCount: 0,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "single peer",
|
||||
input: "/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj",
|
||||
expectCount: 1,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "multiple peers",
|
||||
input: "/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj,/ip4/10.0.0.2/tcp/4001/p2p/12D3KooWJzL4SHW3o7sZpzjfEPJzC6Ky7gKvJxY8vQVDR2jHc8F1",
|
||||
expectCount: 2,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "duplicate peers deduplicated",
|
||||
input: "/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj,/ip4/10.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj",
|
||||
expectCount: 1,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid multiaddr",
|
||||
input: "not-a-multiaddr",
|
||||
expectCount: 0,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
peers, err := normalizePeers(tt.input)
|
||||
|
||||
if tt.expectError && err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
}
|
||||
if !tt.expectError && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if len(peers) != tt.expectCount {
|
||||
t.Errorf("expected %d peers, got %d", tt.expectCount, len(peers))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -195,49 +195,49 @@ func (c *Client) Connect() error {
|
||||
c.pubsub = &pubSubBridge{client: c, adapter: adapter}
|
||||
c.logger.Info("Pubsub bridge created successfully")
|
||||
|
||||
c.logger.Info("Starting bootstrap peer connections...")
|
||||
c.logger.Info("Starting peer connections...")
|
||||
|
||||
// Connect to bootstrap peers FIRST
|
||||
// Connect to peers FIRST
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.config.ConnectTimeout)
|
||||
defer cancel()
|
||||
|
||||
bootstrapPeersConnected := 0
|
||||
for _, bootstrapAddr := range c.config.BootstrapPeers {
|
||||
c.logger.Info("Attempting to connect to bootstrap peer", zap.String("addr", bootstrapAddr))
|
||||
if err := c.connectToBootstrap(ctx, bootstrapAddr); err != nil {
|
||||
c.logger.Warn("Failed to connect to bootstrap peer",
|
||||
zap.String("addr", bootstrapAddr),
|
||||
peersConnected := 0
|
||||
for _, peerAddr := range c.config.BootstrapPeers {
|
||||
c.logger.Info("Attempting to connect to peer", zap.String("addr", peerAddr))
|
||||
if err := c.connectToPeer(ctx, peerAddr); err != nil {
|
||||
c.logger.Warn("Failed to connect to peer",
|
||||
zap.String("addr", peerAddr),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
bootstrapPeersConnected++
|
||||
c.logger.Info("Successfully connected to bootstrap peer", zap.String("addr", bootstrapAddr))
|
||||
peersConnected++
|
||||
c.logger.Info("Successfully connected to peer", zap.String("addr", peerAddr))
|
||||
}
|
||||
|
||||
if bootstrapPeersConnected == 0 {
|
||||
c.logger.Warn("No bootstrap peers connected, continuing anyway")
|
||||
if peersConnected == 0 {
|
||||
c.logger.Warn("No peers connected, continuing anyway")
|
||||
} else {
|
||||
c.logger.Info("Bootstrap peer connections completed", zap.Int("connected_count", bootstrapPeersConnected))
|
||||
c.logger.Info("Peer connections completed", zap.Int("connected_count", peersConnected))
|
||||
}
|
||||
|
||||
c.logger.Info("Adding bootstrap peers to peerstore...")
|
||||
c.logger.Info("Adding peers to peerstore...")
|
||||
|
||||
// Add bootstrap peers to peerstore so we can connect to them later
|
||||
for _, bootstrapAddr := range c.config.BootstrapPeers {
|
||||
if ma, err := multiaddr.NewMultiaddr(bootstrapAddr); err == nil {
|
||||
// Add peers to peerstore so we can connect to them later
|
||||
for _, peerAddr := range c.config.BootstrapPeers {
|
||||
if ma, err := multiaddr.NewMultiaddr(peerAddr); err == nil {
|
||||
if peerInfo, err := peer.AddrInfoFromP2pAddr(ma); err == nil {
|
||||
c.host.Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, time.Hour*24)
|
||||
c.logger.Debug("Added bootstrap peer to peerstore",
|
||||
c.logger.Debug("Added peer to peerstore",
|
||||
zap.String("peer", peerInfo.ID.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
c.logger.Info("Bootstrap peers added to peerstore")
|
||||
c.logger.Info("Peers added to peerstore")
|
||||
|
||||
c.logger.Info("Starting connection monitoring...")
|
||||
|
||||
// Client is a lightweight P2P participant - no discovery needed
|
||||
// We only connect to known bootstrap peers and let nodes handle discovery
|
||||
// We only connect to known peers and let nodes handle discovery
|
||||
c.logger.Debug("Client configured as lightweight P2P participant (no discovery)")
|
||||
|
||||
// Start minimal connection monitoring
|
||||
|
||||
@ -9,8 +9,8 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// connectToBootstrap connects to a bootstrap peer
|
||||
func (c *Client) connectToBootstrap(ctx context.Context, addr string) error {
|
||||
// connectToPeer connects to a peer address
|
||||
func (c *Client) connectToPeer(ctx context.Context, addr string) error {
|
||||
ma, err := multiaddr.NewMultiaddr(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid multiaddr: %w", err)
|
||||
@ -20,14 +20,14 @@ func (c *Client) connectToBootstrap(ctx context.Context, addr string) error {
|
||||
peerInfo, err := peer.AddrInfoFromP2pAddr(ma)
|
||||
if err != nil {
|
||||
// If there's no peer ID, we can't connect
|
||||
c.logger.Warn("Bootstrap address missing peer ID, skipping",
|
||||
c.logger.Warn("Peer address missing peer ID, skipping",
|
||||
zap.String("addr", addr))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Avoid dialing ourselves: if the bootstrap address resolves to our own peer ID, skip.
|
||||
// Avoid dialing ourselves: if the peer address resolves to our own peer ID, skip.
|
||||
if c.host != nil && peerInfo.ID == c.host.ID() {
|
||||
c.logger.Debug("Skipping bootstrap address because it resolves to self",
|
||||
c.logger.Debug("Skipping peer address because it resolves to self",
|
||||
zap.String("addr", addr),
|
||||
zap.String("peer_id", peerInfo.ID.String()))
|
||||
return nil
|
||||
@ -38,7 +38,7 @@ func (c *Client) connectToBootstrap(ctx context.Context, addr string) error {
|
||||
return fmt.Errorf("failed to connect to peer: %w", err)
|
||||
}
|
||||
|
||||
c.logger.Debug("Connected to bootstrap peer",
|
||||
c.logger.Debug("Connected to peer",
|
||||
zap.String("peer_id", peerInfo.ID.String()),
|
||||
zap.String("addr", addr))
|
||||
|
||||
|
||||
@ -9,7 +9,7 @@ import (
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// DefaultBootstrapPeers returns the library's default bootstrap peer multiaddrs.
|
||||
// DefaultBootstrapPeers returns the default peer multiaddrs.
|
||||
// These can be overridden by environment variables or config.
|
||||
func DefaultBootstrapPeers() []string {
|
||||
// Check environment variable first
|
||||
@ -48,7 +48,7 @@ func DefaultDatabaseEndpoints() []string {
|
||||
}
|
||||
}
|
||||
|
||||
// Try to derive from bootstrap peers if available
|
||||
// Try to derive from configured peers if available
|
||||
peers := DefaultBootstrapPeers()
|
||||
if len(peers) > 0 {
|
||||
endpoints := make([]string, 0, len(peers))
|
||||
|
||||
@ -10,15 +10,15 @@ import (
|
||||
func TestDefaultBootstrapPeersNonEmpty(t *testing.T) {
|
||||
old := os.Getenv("DEBROS_BOOTSTRAP_PEERS")
|
||||
t.Cleanup(func() { os.Setenv("DEBROS_BOOTSTRAP_PEERS", old) })
|
||||
// Set a valid bootstrap peer
|
||||
// Set a valid peer
|
||||
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
|
||||
_ = os.Setenv("DEBROS_BOOTSTRAP_PEERS", validPeer)
|
||||
peers := DefaultBootstrapPeers()
|
||||
if len(peers) == 0 {
|
||||
t.Fatalf("expected non-empty default bootstrap peers")
|
||||
t.Fatalf("expected non-empty default peers")
|
||||
}
|
||||
if peers[0] != validPeer {
|
||||
t.Fatalf("expected bootstrap peer %s, got %s", validPeer, peers[0])
|
||||
t.Fatalf("expected peer %s, got %s", validPeer, peers[0])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2,7 +2,9 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -160,17 +162,31 @@ func (d *DatabaseClientImpl) isWriteOperation(sql string) bool {
|
||||
func (d *DatabaseClientImpl) clearConnection() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.connection = nil
|
||||
if d.connection != nil {
|
||||
d.connection.Close()
|
||||
d.connection = nil
|
||||
}
|
||||
}
|
||||
|
||||
// getRQLiteConnection returns a connection to RQLite, creating one if needed
|
||||
func (d *DatabaseClientImpl) getRQLiteConnection() (*gorqlite.Connection, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.mu.RLock()
|
||||
conn := d.connection
|
||||
d.mu.RUnlock()
|
||||
|
||||
// Always try to get a fresh connection to handle leadership changes
|
||||
// and node failures gracefully
|
||||
return d.connectToAvailableNode()
|
||||
if conn != nil {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
newConn, err := d.connectToAvailableNode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
d.connection = newConn
|
||||
d.mu.Unlock()
|
||||
return newConn, nil
|
||||
}
|
||||
|
||||
// getRQLiteNodes returns a list of RQLite node URLs with precedence:
|
||||
@ -227,7 +243,6 @@ func (d *DatabaseClientImpl) connectToAvailableNode() (*gorqlite.Connection, err
|
||||
continue
|
||||
}
|
||||
|
||||
d.connection = conn
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
@ -491,15 +506,100 @@ func (n *NetworkInfoImpl) GetStatus(ctx context.Context) (*NetworkStatus, error)
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get IPFS peer info (optional - don't fail if unavailable)
|
||||
ipfsInfo := queryIPFSPeerInfo()
|
||||
|
||||
// Try to get IPFS Cluster peer info (optional - don't fail if unavailable)
|
||||
ipfsClusterInfo := queryIPFSClusterPeerInfo()
|
||||
|
||||
return &NetworkStatus{
|
||||
NodeID: host.ID().String(),
|
||||
PeerID: host.ID().String(),
|
||||
Connected: true,
|
||||
PeerCount: len(connectedPeers),
|
||||
DatabaseSize: dbSize,
|
||||
Uptime: time.Since(n.client.startTime),
|
||||
IPFS: ipfsInfo,
|
||||
IPFSCluster: ipfsClusterInfo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// queryIPFSPeerInfo queries the local IPFS API for peer information
|
||||
// Returns nil if IPFS is not running or unavailable
|
||||
func queryIPFSPeerInfo() *IPFSPeerInfo {
|
||||
// IPFS API typically runs on port 4501 in our setup
|
||||
client := &http.Client{Timeout: 2 * time.Second}
|
||||
resp, err := client.Post("http://localhost:4501/api/v0/id", "", nil)
|
||||
if err != nil {
|
||||
return nil // IPFS not available
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID string `json:"ID"`
|
||||
Addresses []string `json:"Addresses"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filter addresses to only include public/routable ones
|
||||
var swarmAddrs []string
|
||||
for _, addr := range result.Addresses {
|
||||
// Skip loopback and private addresses for external discovery
|
||||
if !strings.Contains(addr, "127.0.0.1") && !strings.Contains(addr, "/ip6/::1") {
|
||||
swarmAddrs = append(swarmAddrs, addr)
|
||||
}
|
||||
}
|
||||
|
||||
return &IPFSPeerInfo{
|
||||
PeerID: result.ID,
|
||||
SwarmAddresses: swarmAddrs,
|
||||
}
|
||||
}
|
||||
|
||||
// queryIPFSClusterPeerInfo queries the local IPFS Cluster API for peer information
|
||||
// Returns nil if IPFS Cluster is not running or unavailable
|
||||
func queryIPFSClusterPeerInfo() *IPFSClusterPeerInfo {
|
||||
// IPFS Cluster API typically runs on port 9094 in our setup
|
||||
client := &http.Client{Timeout: 2 * time.Second}
|
||||
resp, err := client.Get("http://localhost:9094/id")
|
||||
if err != nil {
|
||||
return nil // IPFS Cluster not available
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil
|
||||
}
|
||||
|
||||
var result struct {
|
||||
ID string `json:"id"`
|
||||
Addresses []string `json:"addresses"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filter addresses to only include public/routable ones for cluster discovery
|
||||
var clusterAddrs []string
|
||||
for _, addr := range result.Addresses {
|
||||
// Skip loopback addresses - only keep routable addresses
|
||||
if !strings.Contains(addr, "127.0.0.1") && !strings.Contains(addr, "/ip6/::1") {
|
||||
clusterAddrs = append(clusterAddrs, addr)
|
||||
}
|
||||
}
|
||||
|
||||
return &IPFSClusterPeerInfo{
|
||||
PeerID: result.ID,
|
||||
Addresses: clusterAddrs,
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectToPeer connects to a specific peer
|
||||
func (n *NetworkInfoImpl) ConnectToPeer(ctx context.Context, peerAddr string) error {
|
||||
if !n.client.isConnected() {
|
||||
|
||||
@ -114,11 +114,26 @@ type PeerInfo struct {
|
||||
|
||||
// NetworkStatus contains overall network status
|
||||
type NetworkStatus struct {
|
||||
NodeID string `json:"node_id"`
|
||||
Connected bool `json:"connected"`
|
||||
PeerCount int `json:"peer_count"`
|
||||
DatabaseSize int64 `json:"database_size"`
|
||||
Uptime time.Duration `json:"uptime"`
|
||||
NodeID string `json:"node_id"`
|
||||
PeerID string `json:"peer_id"`
|
||||
Connected bool `json:"connected"`
|
||||
PeerCount int `json:"peer_count"`
|
||||
DatabaseSize int64 `json:"database_size"`
|
||||
Uptime time.Duration `json:"uptime"`
|
||||
IPFS *IPFSPeerInfo `json:"ipfs,omitempty"`
|
||||
IPFSCluster *IPFSClusterPeerInfo `json:"ipfs_cluster,omitempty"`
|
||||
}
|
||||
|
||||
// IPFSPeerInfo contains IPFS peer information for discovery
|
||||
type IPFSPeerInfo struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
SwarmAddresses []string `json:"swarm_addresses"`
|
||||
}
|
||||
|
||||
// IPFSClusterPeerInfo contains IPFS Cluster peer information for cluster discovery
|
||||
type IPFSClusterPeerInfo struct {
|
||||
PeerID string `json:"peer_id"` // Cluster peer ID (different from IPFS peer ID)
|
||||
Addresses []string `json:"addresses"` // Cluster multiaddresses (e.g., /ip4/x.x.x.x/tcp/9098)
|
||||
}
|
||||
|
||||
// HealthStatus contains health check information
|
||||
@ -158,7 +173,7 @@ type StorageStatus struct {
|
||||
type ClientConfig struct {
|
||||
AppName string `json:"app_name"`
|
||||
DatabaseName string `json:"database_name"`
|
||||
BootstrapPeers []string `json:"bootstrap_peers"`
|
||||
BootstrapPeers []string `json:"peers"`
|
||||
DatabaseEndpoints []string `json:"database_endpoints"`
|
||||
GatewayURL string `json:"gateway_url"` // Gateway URL for HTTP API access (e.g., "http://localhost:6001")
|
||||
ConnectTimeout time.Duration `json:"connect_timeout"`
|
||||
|
||||
@ -8,20 +8,21 @@ import (
|
||||
|
||||
// Config represents the main configuration for a network node
|
||||
type Config struct {
|
||||
Node NodeConfig `yaml:"node"`
|
||||
Database DatabaseConfig `yaml:"database"`
|
||||
Discovery DiscoveryConfig `yaml:"discovery"`
|
||||
Security SecurityConfig `yaml:"security"`
|
||||
Logging LoggingConfig `yaml:"logging"`
|
||||
Node NodeConfig `yaml:"node"`
|
||||
Database DatabaseConfig `yaml:"database"`
|
||||
Discovery DiscoveryConfig `yaml:"discovery"`
|
||||
Security SecurityConfig `yaml:"security"`
|
||||
Logging LoggingConfig `yaml:"logging"`
|
||||
HTTPGateway HTTPGatewayConfig `yaml:"http_gateway"`
|
||||
}
|
||||
|
||||
// NodeConfig contains node-specific configuration
|
||||
type NodeConfig struct {
|
||||
ID string `yaml:"id"` // Auto-generated if empty
|
||||
Type string `yaml:"type"` // "bootstrap" or "node"
|
||||
ListenAddresses []string `yaml:"listen_addresses"` // LibP2P listen addresses
|
||||
DataDir string `yaml:"data_dir"` // Data directory
|
||||
MaxConnections int `yaml:"max_connections"` // Maximum peer connections
|
||||
Domain string `yaml:"domain"` // Domain for this node (e.g., node-1.orama.network)
|
||||
}
|
||||
|
||||
// DatabaseConfig contains database-related configuration
|
||||
@ -37,6 +38,13 @@ type DatabaseConfig struct {
|
||||
RQLiteRaftPort int `yaml:"rqlite_raft_port"` // RQLite Raft consensus port
|
||||
RQLiteJoinAddress string `yaml:"rqlite_join_address"` // Address to join RQLite cluster
|
||||
|
||||
// RQLite node-to-node TLS encryption (for inter-node Raft communication)
|
||||
// See: https://rqlite.io/docs/guides/security/#encrypting-node-to-node-communication
|
||||
NodeCert string `yaml:"node_cert"` // Path to X.509 certificate for node-to-node communication
|
||||
NodeKey string `yaml:"node_key"` // Path to X.509 private key for node-to-node communication
|
||||
NodeCACert string `yaml:"node_ca_cert"` // Path to CA certificate (optional, uses system CA if not set)
|
||||
NodeNoVerify bool `yaml:"node_no_verify"` // Skip certificate verification (for testing/self-signed certs)
|
||||
|
||||
// Dynamic discovery configuration (always enabled)
|
||||
ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s
|
||||
PeerInactivityLimit time.Duration `yaml:"peer_inactivity_limit"` // default: 24h
|
||||
@ -75,9 +83,9 @@ type IPFSConfig struct {
|
||||
|
||||
// DiscoveryConfig contains peer discovery configuration
|
||||
type DiscoveryConfig struct {
|
||||
BootstrapPeers []string `yaml:"bootstrap_peers"` // Bootstrap peer addresses
|
||||
BootstrapPeers []string `yaml:"bootstrap_peers"` // Peer addresses to connect to
|
||||
DiscoveryInterval time.Duration `yaml:"discovery_interval"` // Discovery announcement interval
|
||||
BootstrapPort int `yaml:"bootstrap_port"` // Default port for bootstrap nodes
|
||||
BootstrapPort int `yaml:"bootstrap_port"` // Default port for peer discovery
|
||||
HttpAdvAddress string `yaml:"http_adv_address"` // HTTP advertisement address
|
||||
RaftAdvAddress string `yaml:"raft_adv_address"` // Raft advertisement
|
||||
NodeNamespace string `yaml:"node_namespace"` // Namespace for node identifiers
|
||||
@ -97,6 +105,56 @@ type LoggingConfig struct {
|
||||
OutputFile string `yaml:"output_file"` // Empty for stdout
|
||||
}
|
||||
|
||||
// HTTPGatewayConfig contains HTTP reverse proxy gateway configuration
|
||||
type HTTPGatewayConfig struct {
|
||||
Enabled bool `yaml:"enabled"` // Enable HTTP gateway
|
||||
ListenAddr string `yaml:"listen_addr"` // Address to listen on (e.g., ":8080")
|
||||
NodeName string `yaml:"node_name"` // Node name for routing
|
||||
Routes map[string]RouteConfig `yaml:"routes"` // Service routes
|
||||
HTTPS HTTPSConfig `yaml:"https"` // HTTPS/TLS configuration
|
||||
SNI SNIConfig `yaml:"sni"` // SNI-based TCP routing configuration
|
||||
|
||||
// Full gateway configuration (for API, auth, pubsub)
|
||||
ClientNamespace string `yaml:"client_namespace"` // Namespace for network client
|
||||
RQLiteDSN string `yaml:"rqlite_dsn"` // RQLite database DSN
|
||||
OlricServers []string `yaml:"olric_servers"` // List of Olric server addresses
|
||||
OlricTimeout time.Duration `yaml:"olric_timeout"` // Timeout for Olric operations
|
||||
IPFSClusterAPIURL string `yaml:"ipfs_cluster_api_url"` // IPFS Cluster API URL
|
||||
IPFSAPIURL string `yaml:"ipfs_api_url"` // IPFS API URL
|
||||
IPFSTimeout time.Duration `yaml:"ipfs_timeout"` // Timeout for IPFS operations
|
||||
}
|
||||
|
||||
// HTTPSConfig contains HTTPS/TLS configuration for the gateway
|
||||
type HTTPSConfig struct {
|
||||
Enabled bool `yaml:"enabled"` // Enable HTTPS (port 443)
|
||||
Domain string `yaml:"domain"` // Primary domain (e.g., node-123.orama.network)
|
||||
AutoCert bool `yaml:"auto_cert"` // Use Let's Encrypt for automatic certificate
|
||||
UseSelfSigned bool `yaml:"use_self_signed"` // Use self-signed certificates (pre-generated)
|
||||
CertFile string `yaml:"cert_file"` // Path to certificate file (if not using auto_cert)
|
||||
KeyFile string `yaml:"key_file"` // Path to key file (if not using auto_cert)
|
||||
CacheDir string `yaml:"cache_dir"` // Directory for Let's Encrypt certificate cache
|
||||
HTTPPort int `yaml:"http_port"` // HTTP port for ACME challenge (default: 80)
|
||||
HTTPSPort int `yaml:"https_port"` // HTTPS port (default: 443)
|
||||
Email string `yaml:"email"` // Email for Let's Encrypt account
|
||||
}
|
||||
|
||||
// SNIConfig contains SNI-based TCP routing configuration for port 7001
|
||||
type SNIConfig struct {
|
||||
Enabled bool `yaml:"enabled"` // Enable SNI-based TCP routing
|
||||
ListenAddr string `yaml:"listen_addr"` // Address to listen on (e.g., ":7001")
|
||||
Routes map[string]string `yaml:"routes"` // SNI hostname -> backend address mapping
|
||||
CertFile string `yaml:"cert_file"` // Path to certificate file
|
||||
KeyFile string `yaml:"key_file"` // Path to key file
|
||||
}
|
||||
|
||||
// RouteConfig defines a single reverse proxy route
|
||||
type RouteConfig struct {
|
||||
PathPrefix string `yaml:"path_prefix"` // URL path prefix (e.g., "/rqlite/http")
|
||||
BackendURL string `yaml:"backend_url"` // Backend service URL
|
||||
Timeout time.Duration `yaml:"timeout"` // Request timeout
|
||||
WebSocket bool `yaml:"websocket"` // Support WebSocket upgrades
|
||||
}
|
||||
|
||||
// ClientConfig represents configuration for network clients
|
||||
type ClientConfig struct {
|
||||
AppName string `yaml:"app_name"`
|
||||
@ -123,7 +181,6 @@ func (c *Config) ParseMultiaddrs() ([]multiaddr.Multiaddr, error) {
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Node: NodeConfig{
|
||||
Type: "node",
|
||||
ListenAddresses: []string{
|
||||
"/ip4/0.0.0.0/tcp/4001", // TCP only - compatible with Anyone proxy/SOCKS5
|
||||
},
|
||||
@ -140,7 +197,7 @@ func DefaultConfig() *Config {
|
||||
// RQLite-specific configuration
|
||||
RQLitePort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
RQLiteJoinAddress: "", // Empty for bootstrap node
|
||||
RQLiteJoinAddress: "", // Empty for first node (creates cluster)
|
||||
|
||||
// Dynamic discovery (always enabled)
|
||||
ClusterSyncInterval: 30 * time.Second,
|
||||
@ -175,5 +232,18 @@ func DefaultConfig() *Config {
|
||||
Level: "info",
|
||||
Format: "console",
|
||||
},
|
||||
HTTPGateway: HTTPGatewayConfig{
|
||||
Enabled: true,
|
||||
ListenAddr: ":8080",
|
||||
NodeName: "default",
|
||||
Routes: make(map[string]RouteConfig),
|
||||
ClientNamespace: "default",
|
||||
RQLiteDSN: "http://localhost:5001",
|
||||
OlricServers: []string{"localhost:3320"},
|
||||
OlricTimeout: 10 * time.Second,
|
||||
IPFSClusterAPIURL: "http://localhost:9094",
|
||||
IPFSAPIURL: "http://localhost:5001",
|
||||
IPFSTimeout: 60 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -6,13 +6,13 @@ import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// ConfigDir returns the path to the DeBros config directory (~/.debros).
|
||||
// ConfigDir returns the path to the DeBros config directory (~/.orama).
|
||||
func ConfigDir() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to determine home directory: %w", err)
|
||||
}
|
||||
return filepath.Join(home, ".debros"), nil
|
||||
return filepath.Join(home, ".orama"), nil
|
||||
}
|
||||
|
||||
// EnsureConfigDir creates the config directory if it does not exist.
|
||||
@ -28,11 +28,50 @@ func EnsureConfigDir() (string, error) {
|
||||
}
|
||||
|
||||
// DefaultPath returns the path to the config file for the given component name.
|
||||
// component should be e.g., "node.yaml", "bootstrap.yaml", "gateway.yaml"
|
||||
// component should be e.g., "node.yaml", "gateway.yaml"
|
||||
// It checks ~/.orama/data/, ~/.orama/configs/, and ~/.orama/ for backward compatibility.
|
||||
// If component is already an absolute path, it returns it as-is.
|
||||
func DefaultPath(component string) (string, error) {
|
||||
// If component is already an absolute path, return it directly
|
||||
if filepath.IsAbs(component) {
|
||||
return component, nil
|
||||
}
|
||||
|
||||
dir, err := ConfigDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(dir, component), nil
|
||||
|
||||
var gatewayDefault string
|
||||
// For gateway.yaml, check data/ directory first (production location)
|
||||
if component == "gateway.yaml" {
|
||||
dataPath := filepath.Join(dir, "data", component)
|
||||
if _, err := os.Stat(dataPath); err == nil {
|
||||
return dataPath, nil
|
||||
}
|
||||
// Remember the preferred default so we can still fall back to legacy paths
|
||||
gatewayDefault = dataPath
|
||||
}
|
||||
|
||||
// First check in ~/.orama/configs/ (production installer location)
|
||||
configsPath := filepath.Join(dir, "configs", component)
|
||||
if _, err := os.Stat(configsPath); err == nil {
|
||||
return configsPath, nil
|
||||
}
|
||||
|
||||
// Fallback to ~/.orama/ (legacy/development location)
|
||||
legacyPath := filepath.Join(dir, component)
|
||||
if _, err := os.Stat(legacyPath); err == nil {
|
||||
return legacyPath, nil
|
||||
}
|
||||
|
||||
if gatewayDefault != "" {
|
||||
// If we preferred the data path (gateway.yaml) but didn't find it anywhere else,
|
||||
// return the data path so error messages point to the production location.
|
||||
return gatewayDefault, nil
|
||||
}
|
||||
|
||||
// Return configs path as default (even if it doesn't exist yet)
|
||||
// This allows the error message to show the expected production location
|
||||
return configsPath, nil
|
||||
}
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
|
||||
// ValidationError represents a single validation error with context.
|
||||
type ValidationError struct {
|
||||
Path string // e.g., "discovery.bootstrap_peers[0]"
|
||||
Path string // e.g., "discovery.bootstrap_peers[0]" or "discovery.peers[0]"
|
||||
Message string // e.g., "invalid multiaddr"
|
||||
Hint string // e.g., "expected /ip{4,6}/.../tcp/<port>/p2p/<peerID>"
|
||||
}
|
||||
@ -61,14 +61,6 @@ func (c *Config) validateNode() []error {
|
||||
})
|
||||
}
|
||||
|
||||
// Validate type
|
||||
if nc.Type != "bootstrap" && nc.Type != "node" {
|
||||
errs = append(errs, ValidationError{
|
||||
Path: "node.type",
|
||||
Message: fmt.Sprintf("must be one of [bootstrap node]; got %q", nc.Type),
|
||||
})
|
||||
}
|
||||
|
||||
// Validate listen_addresses
|
||||
if len(nc.ListenAddresses) == 0 {
|
||||
errs = append(errs, ValidationError{
|
||||
@ -218,27 +210,14 @@ func (c *Config) validateDatabase() []error {
|
||||
})
|
||||
}
|
||||
|
||||
// Validate rqlite_join_address context-dependently
|
||||
if c.Node.Type == "node" {
|
||||
if dc.RQLiteJoinAddress == "" {
|
||||
// Validate rqlite_join_address format if provided (optional for all nodes)
|
||||
// The first node in a cluster won't have a join address; subsequent nodes will
|
||||
if dc.RQLiteJoinAddress != "" {
|
||||
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
|
||||
errs = append(errs, ValidationError{
|
||||
Path: "database.rqlite_join_address",
|
||||
Message: "required for node type (non-bootstrap)",
|
||||
})
|
||||
} else {
|
||||
if err := validateHostPort(dc.RQLiteJoinAddress); err != nil {
|
||||
errs = append(errs, ValidationError{
|
||||
Path: "database.rqlite_join_address",
|
||||
Message: err.Error(),
|
||||
Hint: "expected format: host:port",
|
||||
})
|
||||
}
|
||||
}
|
||||
} else if c.Node.Type == "bootstrap" {
|
||||
if dc.RQLiteJoinAddress != "" {
|
||||
errs = append(errs, ValidationError{
|
||||
Path: "database.rqlite_join_address",
|
||||
Message: "must be empty for bootstrap type",
|
||||
Message: err.Error(),
|
||||
Hint: "expected format: host:port",
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -292,7 +271,7 @@ func (c *Config) validateDiscovery() []error {
|
||||
})
|
||||
}
|
||||
|
||||
// Validate bootstrap_port
|
||||
// Validate peer discovery port
|
||||
if disc.BootstrapPort < 1 || disc.BootstrapPort > 65535 {
|
||||
errs = append(errs, ValidationError{
|
||||
Path: "discovery.bootstrap_port",
|
||||
@ -300,17 +279,8 @@ func (c *Config) validateDiscovery() []error {
|
||||
})
|
||||
}
|
||||
|
||||
// Validate bootstrap_peers context-dependently
|
||||
if c.Node.Type == "node" {
|
||||
if len(disc.BootstrapPeers) == 0 {
|
||||
errs = append(errs, ValidationError{
|
||||
Path: "discovery.bootstrap_peers",
|
||||
Message: "required for node type (must not be empty)",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Validate each bootstrap peer multiaddr
|
||||
// Validate peer addresses (optional - all nodes are unified peers now)
|
||||
// Validate each peer multiaddr
|
||||
seenPeers := make(map[string]bool)
|
||||
for i, peer := range disc.BootstrapPeers {
|
||||
path := fmt.Sprintf("discovery.bootstrap_peers[%d]", i)
|
||||
@ -358,7 +328,7 @@ func (c *Config) validateDiscovery() []error {
|
||||
if seenPeers[peer] {
|
||||
errs = append(errs, ValidationError{
|
||||
Path: path,
|
||||
Message: "duplicate bootstrap peer",
|
||||
Message: "duplicate peer",
|
||||
})
|
||||
}
|
||||
seenPeers[peer] = true
|
||||
@ -481,27 +451,6 @@ func (c *Config) validateLogging() []error {
|
||||
|
||||
func (c *Config) validateCrossFields() []error {
|
||||
var errs []error
|
||||
|
||||
// If node.type is invalid, don't run cross-checks
|
||||
if c.Node.Type != "bootstrap" && c.Node.Type != "node" {
|
||||
return errs
|
||||
}
|
||||
|
||||
// Cross-check rqlite_join_address vs node type
|
||||
if c.Node.Type == "bootstrap" && c.Database.RQLiteJoinAddress != "" {
|
||||
errs = append(errs, ValidationError{
|
||||
Path: "database.rqlite_join_address",
|
||||
Message: "must be empty for bootstrap node type",
|
||||
})
|
||||
}
|
||||
|
||||
if c.Node.Type == "node" && c.Database.RQLiteJoinAddress == "" {
|
||||
errs = append(errs, ValidationError{
|
||||
Path: "database.rqlite_join_address",
|
||||
Message: "required for non-bootstrap node type",
|
||||
})
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
|
||||
@ -5,12 +5,11 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// validConfigForType returns a valid config for the given node type
|
||||
func validConfigForType(nodeType string) *Config {
|
||||
// validConfigForNode returns a valid config
|
||||
func validConfigForNode() *Config {
|
||||
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
|
||||
cfg := &Config{
|
||||
Node: NodeConfig{
|
||||
Type: nodeType,
|
||||
ID: "test-node-id",
|
||||
ListenAddresses: []string{"/ip4/0.0.0.0/tcp/4001"},
|
||||
DataDir: ".",
|
||||
@ -25,6 +24,7 @@ func validConfigForType(nodeType string) *Config {
|
||||
RQLitePort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
MinClusterSize: 1,
|
||||
RQLiteJoinAddress: "", // Optional - first node creates cluster, others join
|
||||
},
|
||||
Discovery: DiscoveryConfig{
|
||||
BootstrapPeers: []string{validPeer},
|
||||
@ -40,51 +40,9 @@ func validConfigForType(nodeType string) *Config {
|
||||
},
|
||||
}
|
||||
|
||||
// Set rqlite_join_address based on node type
|
||||
if nodeType == "node" {
|
||||
cfg.Database.RQLiteJoinAddress = "localhost:5001"
|
||||
// Node type requires bootstrap peers
|
||||
cfg.Discovery.BootstrapPeers = []string{validPeer}
|
||||
} else {
|
||||
// Bootstrap type: empty join address and peers optional
|
||||
cfg.Database.RQLiteJoinAddress = ""
|
||||
cfg.Discovery.BootstrapPeers = []string{}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func TestValidateNodeType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeType string
|
||||
shouldError bool
|
||||
}{
|
||||
{"bootstrap", "bootstrap", false},
|
||||
{"node", "node", false},
|
||||
{"invalid", "invalid-type", true},
|
||||
{"empty", "", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType("bootstrap") // Start with valid bootstrap
|
||||
if tt.nodeType == "node" {
|
||||
cfg = validConfigForType("node")
|
||||
} else {
|
||||
cfg.Node.Type = tt.nodeType
|
||||
}
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
t.Errorf("expected error, got none")
|
||||
}
|
||||
if !tt.shouldError && len(errs) > 0 {
|
||||
t.Errorf("unexpected errors: %v", errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateListenAddresses(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -102,7 +60,7 @@ func TestValidateListenAddresses(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType("node")
|
||||
cfg := validConfigForNode()
|
||||
cfg.Node.ListenAddresses = tt.addresses
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
@ -130,7 +88,7 @@ func TestValidateReplicationFactor(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType("node")
|
||||
cfg := validConfigForNode()
|
||||
cfg.Database.ReplicationFactor = tt.replication
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
@ -160,7 +118,7 @@ func TestValidateRQLitePorts(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType("node")
|
||||
cfg := validConfigForNode()
|
||||
cfg.Database.RQLitePort = tt.httpPort
|
||||
cfg.Database.RQLiteRaftPort = tt.raftPort
|
||||
errs := cfg.Validate()
|
||||
@ -177,21 +135,18 @@ func TestValidateRQLitePorts(t *testing.T) {
|
||||
func TestValidateRQLiteJoinAddress(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeType string
|
||||
joinAddr string
|
||||
shouldError bool
|
||||
}{
|
||||
{"node with join", "node", "localhost:5001", false},
|
||||
{"node without join", "node", "", true},
|
||||
{"bootstrap with join", "bootstrap", "localhost:5001", true},
|
||||
{"bootstrap without join", "bootstrap", "", false},
|
||||
{"invalid join format", "node", "localhost", true},
|
||||
{"invalid join port", "node", "localhost:99999", true},
|
||||
{"node with join", "localhost:5001", false},
|
||||
{"node without join", "", false}, // Join address is optional (first node creates cluster)
|
||||
{"invalid join format", "localhost", true},
|
||||
{"invalid join port", "localhost:99999", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType(tt.nodeType)
|
||||
cfg := validConfigForNode()
|
||||
cfg.Database.RQLiteJoinAddress = tt.joinAddr
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
@ -204,27 +159,24 @@ func TestValidateRQLiteJoinAddress(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateBootstrapPeers(t *testing.T) {
|
||||
func TestValidatePeerAddresses(t *testing.T) {
|
||||
validPeer := "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeType string
|
||||
peers []string
|
||||
shouldError bool
|
||||
}{
|
||||
{"node with peer", "node", []string{validPeer}, false},
|
||||
{"node without peer", "node", []string{}, true},
|
||||
{"bootstrap with peer", "bootstrap", []string{validPeer}, false},
|
||||
{"bootstrap without peer", "bootstrap", []string{}, false},
|
||||
{"invalid multiaddr", "node", []string{"invalid"}, true},
|
||||
{"missing p2p", "node", []string{"/ip4/127.0.0.1/tcp/4001"}, true},
|
||||
{"duplicate peer", "node", []string{validPeer, validPeer}, true},
|
||||
{"invalid port", "node", []string{"/ip4/127.0.0.1/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true},
|
||||
{"node with peer", []string{validPeer}, false},
|
||||
{"node without peer", []string{}, false}, // All nodes are unified peers - bootstrap peers optional
|
||||
{"invalid multiaddr", []string{"invalid"}, true},
|
||||
{"missing p2p", []string{"/ip4/127.0.0.1/tcp/4001"}, true},
|
||||
{"duplicate peer", []string{validPeer, validPeer}, true},
|
||||
{"invalid port", []string{"/ip4/127.0.0.1/tcp/99999/p2p/12D3KooWHbcFcrGPXKUrHcxvd8MXEeUzRYyvY8fQcpEBxncSUwhj"}, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType(tt.nodeType)
|
||||
cfg := validConfigForNode()
|
||||
cfg.Discovery.BootstrapPeers = tt.peers
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
@ -253,7 +205,7 @@ func TestValidateLoggingLevel(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType("node")
|
||||
cfg := validConfigForNode()
|
||||
cfg.Logging.Level = tt.level
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
@ -280,7 +232,7 @@ func TestValidateLoggingFormat(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType("node")
|
||||
cfg := validConfigForNode()
|
||||
cfg.Logging.Format = tt.format
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
@ -307,7 +259,7 @@ func TestValidateMaxConnections(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType("node")
|
||||
cfg := validConfigForNode()
|
||||
cfg.Node.MaxConnections = tt.maxConn
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
@ -334,7 +286,7 @@ func TestValidateDiscoveryInterval(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType("node")
|
||||
cfg := validConfigForNode()
|
||||
cfg.Discovery.DiscoveryInterval = tt.interval
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
@ -347,7 +299,7 @@ func TestValidateDiscoveryInterval(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateBootstrapPort(t *testing.T) {
|
||||
func TestValidatePeerDiscoveryPort(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
port int
|
||||
@ -361,7 +313,7 @@ func TestValidateBootstrapPort(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := validConfigForType("node")
|
||||
cfg := validConfigForNode()
|
||||
cfg.Discovery.BootstrapPort = tt.port
|
||||
errs := cfg.Validate()
|
||||
if tt.shouldError && len(errs) == 0 {
|
||||
@ -378,7 +330,6 @@ func TestValidateCompleteConfig(t *testing.T) {
|
||||
// Test a complete valid config
|
||||
validCfg := &Config{
|
||||
Node: NodeConfig{
|
||||
Type: "node",
|
||||
ID: "node1",
|
||||
ListenAddresses: []string{"/ip4/0.0.0.0/tcp/4002"},
|
||||
DataDir: ".",
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
@ -115,35 +116,34 @@ func (d *Manager) handlePeerExchangeStream(s network.Stream) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter addresses to only include configured listen addresses, not ephemeral ports
|
||||
// Ephemeral ports are typically > 32768, so we filter those out
|
||||
// Filter addresses to only include port 4001 (standard libp2p port)
|
||||
// This prevents including non-libp2p service ports (like RQLite ports) in peer exchange
|
||||
const libp2pPort = 4001
|
||||
filteredAddrs := make([]multiaddr.Multiaddr, 0)
|
||||
filteredCount := 0
|
||||
for _, addr := range addrs {
|
||||
// Extract TCP port from multiaddr
|
||||
port, err := addr.ValueForProtocol(multiaddr.P_TCP)
|
||||
if err == nil {
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err == nil {
|
||||
// Only include ports that are reasonable (not ephemeral ports > 32768)
|
||||
// Common LibP2P ports are typically < 10000
|
||||
if portNum > 0 && portNum <= 32767 {
|
||||
// Only include addresses with port 4001
|
||||
if portNum == libp2pPort {
|
||||
filteredAddrs = append(filteredAddrs, addr)
|
||||
} else {
|
||||
filteredCount++
|
||||
}
|
||||
} else {
|
||||
// If we can't parse port, include it anyway (might be non-TCP)
|
||||
filteredAddrs = append(filteredAddrs, addr)
|
||||
}
|
||||
// Skip addresses with unparseable ports
|
||||
} else {
|
||||
// If no TCP port found, include it anyway (might be non-TCP)
|
||||
filteredAddrs = append(filteredAddrs, addr)
|
||||
// Skip non-TCP addresses (libp2p uses TCP)
|
||||
filteredCount++
|
||||
}
|
||||
}
|
||||
|
||||
// If no addresses remain after filtering, skip this peer
|
||||
// (Filtering is routine - no need to log every occurrence)
|
||||
if len(filteredAddrs) == 0 {
|
||||
d.logger.Debug("No valid addresses after filtering ephemeral ports",
|
||||
zap.String("peer_id", pid.String()[:8]+"..."),
|
||||
zap.Int("original_count", len(addrs)))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -177,9 +177,7 @@ func (d *Manager) handlePeerExchangeStream(s network.Stream) {
|
||||
return
|
||||
}
|
||||
|
||||
d.logger.Debug("Sent peer exchange response",
|
||||
zap.Int("peer_count", len(resp.Peers)),
|
||||
zap.Bool("has_rqlite_metadata", resp.RQLiteMetadata != nil))
|
||||
// Response sent - routine operation, no need to log
|
||||
}
|
||||
|
||||
// Start begins periodic peer discovery
|
||||
@ -216,15 +214,12 @@ func (d *Manager) Stop() {
|
||||
}
|
||||
|
||||
// discoverPeers discovers and connects to new peers using non-DHT strategies:
|
||||
// - Peerstore entries (bootstrap peers added to peerstore by the caller)
|
||||
// - Peerstore entries (peers added to peerstore by the caller)
|
||||
// - Peer exchange: query currently connected peers' peerstore entries
|
||||
func (d *Manager) discoverPeers(ctx context.Context, config Config) {
|
||||
connectedPeers := d.host.Network().Peers()
|
||||
initialCount := len(connectedPeers)
|
||||
|
||||
d.logger.Debug("Starting peer discovery",
|
||||
zap.Int("current_peers", initialCount))
|
||||
|
||||
newConnections := 0
|
||||
|
||||
// Strategy 1: Try to connect to peers learned from the host's peerstore
|
||||
@ -237,16 +232,17 @@ func (d *Manager) discoverPeers(ctx context.Context, config Config) {
|
||||
|
||||
finalPeerCount := len(d.host.Network().Peers())
|
||||
|
||||
// Summary log: only log if there were changes or new connections
|
||||
if newConnections > 0 || finalPeerCount != initialCount {
|
||||
d.logger.Debug("Peer discovery completed",
|
||||
zap.Int("new_connections", newConnections),
|
||||
zap.Int("initial_peers", initialCount),
|
||||
zap.Int("final_peers", finalPeerCount))
|
||||
d.logger.Debug("Discovery summary",
|
||||
zap.Int("connected", finalPeerCount),
|
||||
zap.Int("new", newConnections),
|
||||
zap.Int("was", initialCount))
|
||||
}
|
||||
}
|
||||
|
||||
// discoverViaPeerstore attempts to connect to peers found in the host's peerstore.
|
||||
// This is useful for bootstrap peers that have been pre-populated into the peerstore.
|
||||
// This is useful for peers that have been pre-populated into the peerstore.
|
||||
func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int) int {
|
||||
if maxConnections <= 0 {
|
||||
return 0
|
||||
@ -256,7 +252,10 @@ func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int)
|
||||
|
||||
// Iterate over peerstore known peers
|
||||
peers := d.host.Peerstore().Peers()
|
||||
d.logger.Debug("Peerstore contains peers", zap.Int("count", len(peers)))
|
||||
|
||||
// Only connect to peers on our standard LibP2P port to avoid cross-connecting
|
||||
// with IPFS/IPFS Cluster instances that use different ports
|
||||
const libp2pPort = 4001
|
||||
|
||||
for _, pid := range peers {
|
||||
if connected >= maxConnections {
|
||||
@ -271,6 +270,24 @@ func (d *Manager) discoverViaPeerstore(ctx context.Context, maxConnections int)
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter peers to only include those with addresses on our port (4001)
|
||||
// This prevents attempting to connect to IPFS (port 4101) or IPFS Cluster (port 9096/9098)
|
||||
peerInfo := d.host.Peerstore().PeerInfo(pid)
|
||||
hasValidPort := false
|
||||
for _, addr := range peerInfo.Addrs {
|
||||
if port, err := addr.ValueForProtocol(multiaddr.P_TCP); err == nil {
|
||||
if portNum, err := strconv.Atoi(port); err == nil && portNum == libp2pPort {
|
||||
hasValidPort = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip peers without valid port 4001 addresses
|
||||
if !hasValidPort {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to connect
|
||||
if err := d.connectToPeer(ctx, pid); err == nil {
|
||||
connected++
|
||||
@ -293,8 +310,8 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
||||
return 0
|
||||
}
|
||||
|
||||
d.logger.Debug("Starting peer exchange with connected peers",
|
||||
zap.Int("num_peers", len(connectedPeers)))
|
||||
exchangedPeers := 0
|
||||
metadataCollected := 0
|
||||
|
||||
for _, peerID := range connectedPeers {
|
||||
if connected >= maxConnections {
|
||||
@ -307,9 +324,13 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
||||
continue
|
||||
}
|
||||
|
||||
d.logger.Debug("Received peer list from peer",
|
||||
zap.String("from_peer", peerID.String()[:8]+"..."),
|
||||
zap.Int("peer_count", len(peers)))
|
||||
exchangedPeers++
|
||||
// Check if we got RQLite metadata
|
||||
if val, err := d.host.Peerstore().Get(peerID, "rqlite_metadata"); err == nil {
|
||||
if _, ok := val.([]byte); ok {
|
||||
metadataCollected++
|
||||
}
|
||||
}
|
||||
|
||||
// Try to connect to discovered peers
|
||||
for _, peerInfo := range peers {
|
||||
@ -334,7 +355,8 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse addresses
|
||||
// Parse and filter addresses to only include port 4001 (standard libp2p port)
|
||||
const libp2pPort = 4001
|
||||
addrs := make([]multiaddr.Multiaddr, 0, len(peerInfo.Addrs))
|
||||
for _, addrStr := range peerInfo.Addrs {
|
||||
ma, err := multiaddr.NewMultiaddr(addrStr)
|
||||
@ -342,14 +364,24 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
||||
d.logger.Debug("Failed to parse multiaddr", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, ma)
|
||||
// Only include addresses with port 4001
|
||||
port, err := ma.ValueForProtocol(multiaddr.P_TCP)
|
||||
if err == nil {
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err == nil && portNum == libp2pPort {
|
||||
addrs = append(addrs, ma)
|
||||
}
|
||||
// Skip addresses with wrong ports
|
||||
}
|
||||
// Skip non-TCP addresses
|
||||
}
|
||||
|
||||
if len(addrs) == 0 {
|
||||
// Skip peers without valid addresses - no need to log every occurrence
|
||||
continue
|
||||
}
|
||||
|
||||
// Add to peerstore
|
||||
// Add to peerstore (only valid addresses with port 4001)
|
||||
d.host.Peerstore().AddAddrs(parsedID, addrs, time.Hour*24)
|
||||
|
||||
// Try to connect
|
||||
@ -358,20 +390,29 @@ func (d *Manager) discoverViaPeerExchange(ctx context.Context, maxConnections in
|
||||
|
||||
if err := d.host.Connect(connectCtx, peerAddrInfo); err != nil {
|
||||
cancel()
|
||||
d.logger.Debug("Failed to connect to discovered peer",
|
||||
zap.String("peer_id", parsedID.String()[:8]+"..."),
|
||||
// Only log connection failures for debugging - errors are still useful
|
||||
d.logger.Debug("Connect failed",
|
||||
zap.String("peer", parsedID.String()[:8]+"..."),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
cancel()
|
||||
|
||||
d.logger.Info("Successfully connected to discovered peer",
|
||||
zap.String("peer_id", parsedID.String()[:8]+"..."),
|
||||
zap.String("discovered_from", peerID.String()[:8]+"..."))
|
||||
d.logger.Info("Connected",
|
||||
zap.String("peer", parsedID.String()[:8]+"..."),
|
||||
zap.String("from", peerID.String()[:8]+"..."))
|
||||
connected++
|
||||
}
|
||||
}
|
||||
|
||||
// Summary log for peer exchange
|
||||
if exchangedPeers > 0 {
|
||||
d.logger.Debug("Exchange summary",
|
||||
zap.Int("exchanged_with", exchangedPeers),
|
||||
zap.Int("metadata_collected", metadataCollected),
|
||||
zap.Int("new_connections", connected))
|
||||
}
|
||||
|
||||
return connected
|
||||
}
|
||||
|
||||
@ -380,11 +421,20 @@ func (d *Manager) requestPeersFromPeer(ctx context.Context, peerID peer.ID, limi
|
||||
// Open a stream to the peer
|
||||
stream, err := d.host.NewStream(ctx, peerID, PeerExchangeProtocol)
|
||||
if err != nil {
|
||||
// Suppress repeated warnings for the same peer (log once per minute max)
|
||||
// Check if this is a "protocols not supported" error (expected for lightweight clients like gateway)
|
||||
if strings.Contains(err.Error(), "protocols not supported") {
|
||||
// This is a lightweight client (gateway, etc.) that doesn't support peer exchange - expected behavior
|
||||
// Track it to avoid repeated attempts, but don't log as it's not an error
|
||||
d.failedPeerExchanges[peerID] = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
// For actual connection errors, log but suppress repeated warnings for the same peer
|
||||
lastFailure, seen := d.failedPeerExchanges[peerID]
|
||||
if !seen || time.Since(lastFailure) > time.Minute {
|
||||
d.logger.Debug("Failed to open peer exchange stream",
|
||||
d.logger.Debug("Failed to open peer exchange stream with node",
|
||||
zap.String("peer_id", peerID.String()[:8]+"..."),
|
||||
zap.String("reason", "peer does not support peer exchange protocol or connection failed"),
|
||||
zap.Error(err))
|
||||
d.failedPeerExchanges[peerID] = time.Now()
|
||||
}
|
||||
@ -424,9 +474,10 @@ func (d *Manager) requestPeersFromPeer(ctx context.Context, peerID peer.ID, limi
|
||||
metadataJSON, err := json.Marshal(resp.RQLiteMetadata)
|
||||
if err == nil {
|
||||
_ = d.host.Peerstore().Put(peerID, "rqlite_metadata", metadataJSON)
|
||||
d.logger.Debug("Stored RQLite metadata from peer",
|
||||
zap.String("peer_id", peerID.String()[:8]+"..."),
|
||||
zap.String("node_id", resp.RQLiteMetadata.NodeID))
|
||||
// Only log when new metadata is stored (useful for debugging)
|
||||
d.logger.Debug("Metadata stored",
|
||||
zap.String("peer", peerID.String()[:8]+"..."),
|
||||
zap.String("node", resp.RQLiteMetadata.NodeID))
|
||||
}
|
||||
}
|
||||
|
||||
@ -442,9 +493,6 @@ func (d *Manager) TriggerPeerExchange(ctx context.Context) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
d.logger.Info("Manually triggering peer exchange",
|
||||
zap.Int("connected_peers", len(connectedPeers)))
|
||||
|
||||
metadataCollected := 0
|
||||
for _, peerID := range connectedPeers {
|
||||
// Request peer list from this peer (which includes their RQLite metadata)
|
||||
@ -458,9 +506,9 @@ func (d *Manager) TriggerPeerExchange(ctx context.Context) int {
|
||||
}
|
||||
}
|
||||
|
||||
d.logger.Info("Peer exchange completed",
|
||||
zap.Int("peers_with_metadata", metadataCollected),
|
||||
zap.Int("total_peers", len(connectedPeers)))
|
||||
d.logger.Info("Exchange completed",
|
||||
zap.Int("peers", len(connectedPeers)),
|
||||
zap.Int("with_metadata", metadataCollected))
|
||||
|
||||
return metadataCollected
|
||||
}
|
||||
@ -480,8 +528,7 @@ func (d *Manager) connectToPeer(ctx context.Context, peerID peer.ID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
d.logger.Debug("Successfully connected to peer",
|
||||
zap.String("peer_id", peerID.String()[:8]+"..."))
|
||||
// Connection success logged at higher level - no need for duplicate DEBUG log
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ type RQLiteNodeMetadata struct {
|
||||
NodeID string `json:"node_id"` // RQLite node ID (from config)
|
||||
RaftAddress string `json:"raft_address"` // Raft port address (e.g., "51.83.128.181:7001")
|
||||
HTTPAddress string `json:"http_address"` // HTTP API address (e.g., "51.83.128.181:5001")
|
||||
NodeType string `json:"node_type"` // "bootstrap" or "node"
|
||||
NodeType string `json:"node_type"` // Node type identifier
|
||||
RaftLogIndex uint64 `json:"raft_log_index"` // Current Raft log index (for data comparison)
|
||||
LastSeen time.Time `json:"last_seen"` // Updated on every announcement
|
||||
ClusterVersion string `json:"cluster_version"` // For compatibility checking
|
||||
|
||||
@ -87,26 +87,8 @@ type PortChecker struct {
|
||||
}
|
||||
|
||||
// RequiredPorts defines all ports needed for dev environment
|
||||
var RequiredPorts = []int{
|
||||
// LibP2P
|
||||
4001, 4002, 4003,
|
||||
// IPFS API
|
||||
4501, 4502, 4503,
|
||||
// RQLite HTTP
|
||||
5001, 5002, 5003,
|
||||
// RQLite Raft
|
||||
7001, 7002, 7003,
|
||||
// Gateway
|
||||
6001,
|
||||
// Olric
|
||||
3320, 3322,
|
||||
// Anon SOCKS
|
||||
9050,
|
||||
// IPFS Cluster
|
||||
9094, 9104, 9114,
|
||||
// IPFS Gateway
|
||||
8080, 8081, 8082,
|
||||
}
|
||||
// Computed from DefaultTopology
|
||||
var RequiredPorts = DefaultTopology().AllPorts()
|
||||
|
||||
// NewPortChecker creates a new port checker with required ports
|
||||
func NewPortChecker() *PortChecker {
|
||||
@ -150,28 +132,5 @@ func isPortAvailable(port int) bool {
|
||||
|
||||
// PortMap provides a human-readable mapping of ports to services
|
||||
func PortMap() map[int]string {
|
||||
return map[int]string{
|
||||
4001: "Bootstrap P2P",
|
||||
4002: "Node2 P2P",
|
||||
4003: "Node3 P2P",
|
||||
4501: "Bootstrap IPFS API",
|
||||
4502: "Node2 IPFS API",
|
||||
4503: "Node3 IPFS API",
|
||||
5001: "Bootstrap RQLite HTTP",
|
||||
5002: "Node2 RQLite HTTP",
|
||||
5003: "Node3 RQLite HTTP",
|
||||
7001: "Bootstrap RQLite Raft",
|
||||
7002: "Node2 RQLite Raft",
|
||||
7003: "Node3 RQLite Raft",
|
||||
6001: "Gateway",
|
||||
3320: "Olric HTTP API",
|
||||
3322: "Olric Memberlist",
|
||||
9050: "Anon SOCKS Proxy",
|
||||
9094: "Bootstrap IPFS Cluster",
|
||||
9104: "Node2 IPFS Cluster",
|
||||
9114: "Node3 IPFS Cluster",
|
||||
8080: "Bootstrap IPFS Gateway",
|
||||
8081: "Node2 IPFS Gateway",
|
||||
8082: "Node3 IPFS Gateway",
|
||||
}
|
||||
return DefaultTopology().PortMap()
|
||||
}
|
||||
|
||||
@ -17,7 +17,8 @@ func TestPortChecker(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check that required port counts match expectations
|
||||
expectedPortCount := 22 // Based on RequiredPorts
|
||||
// 5 nodes × 9 ports per node + 4 shared ports = 49
|
||||
expectedPortCount := 49 // Based on RequiredPorts
|
||||
if len(checker.ports) != expectedPortCount {
|
||||
t.Errorf("Expected %d ports, got %d", expectedPortCount, len(checker.ports))
|
||||
}
|
||||
|
||||
@ -14,24 +14,24 @@ import (
|
||||
|
||||
// ConfigEnsurer handles all config file creation and validation
|
||||
type ConfigEnsurer struct {
|
||||
debrosDir string
|
||||
oramaDir string
|
||||
}
|
||||
|
||||
// NewConfigEnsurer creates a new config ensurer
|
||||
func NewConfigEnsurer(debrosDir string) *ConfigEnsurer {
|
||||
func NewConfigEnsurer(oramaDir string) *ConfigEnsurer {
|
||||
return &ConfigEnsurer{
|
||||
debrosDir: debrosDir,
|
||||
oramaDir: oramaDir,
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureAll ensures all necessary config files and secrets exist
|
||||
func (ce *ConfigEnsurer) EnsureAll() error {
|
||||
// Create directories
|
||||
if err := os.MkdirAll(ce.debrosDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create .debros directory: %w", err)
|
||||
if err := os.MkdirAll(ce.oramaDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create .orama directory: %w", err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(ce.debrosDir, "logs"), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Join(ce.oramaDir, "logs"), 0755); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
}
|
||||
|
||||
@ -40,20 +40,30 @@ func (ce *ConfigEnsurer) EnsureAll() error {
|
||||
return fmt.Errorf("failed to ensure shared secrets: %w", err)
|
||||
}
|
||||
|
||||
// Ensure bootstrap config and identity
|
||||
if err := ce.ensureBootstrap(); err != nil {
|
||||
return fmt.Errorf("failed to ensure bootstrap: %w", err)
|
||||
// Load topology
|
||||
topology := DefaultTopology()
|
||||
|
||||
// Generate identities for first two nodes and collect their multiaddrs as peer addresses
|
||||
// All nodes use these addresses for initial peer discovery
|
||||
peerAddrs := []string{}
|
||||
for i := 0; i < 2 && i < len(topology.Nodes); i++ {
|
||||
nodeSpec := topology.Nodes[i]
|
||||
addr, err := ce.ensureNodeIdentity(nodeSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure identity for %s: %w", nodeSpec.Name, err)
|
||||
}
|
||||
peerAddrs = append(peerAddrs, addr)
|
||||
}
|
||||
|
||||
// Ensure node2 and node3 configs
|
||||
if err := ce.ensureNode2And3(); err != nil {
|
||||
return fmt.Errorf("failed to ensure nodes: %w", err)
|
||||
// Ensure configs for all nodes
|
||||
for _, nodeSpec := range topology.Nodes {
|
||||
if err := ce.ensureNodeConfig(nodeSpec, peerAddrs); err != nil {
|
||||
return fmt.Errorf("failed to ensure config for %s: %w", nodeSpec.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure gateway config
|
||||
if err := ce.ensureGateway(); err != nil {
|
||||
return fmt.Errorf("failed to ensure gateway: %w", err)
|
||||
}
|
||||
// Gateway configuration is now embedded in each node's config
|
||||
// No separate gateway.yaml needed anymore
|
||||
|
||||
// Ensure Olric config
|
||||
if err := ce.ensureOlric(); err != nil {
|
||||
@ -65,7 +75,7 @@ func (ce *ConfigEnsurer) EnsureAll() error {
|
||||
|
||||
// ensureSharedSecrets creates cluster secret and swarm key if they don't exist
|
||||
func (ce *ConfigEnsurer) ensureSharedSecrets() error {
|
||||
secretPath := filepath.Join(ce.debrosDir, "cluster-secret")
|
||||
secretPath := filepath.Join(ce.oramaDir, "cluster-secret")
|
||||
if _, err := os.Stat(secretPath); os.IsNotExist(err) {
|
||||
secret := generateRandomHex(64) // 64 hex chars = 32 bytes
|
||||
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
|
||||
@ -74,7 +84,7 @@ func (ce *ConfigEnsurer) ensureSharedSecrets() error {
|
||||
fmt.Printf("✓ Generated cluster secret\n")
|
||||
}
|
||||
|
||||
swarmKeyPath := filepath.Join(ce.debrosDir, "swarm.key")
|
||||
swarmKeyPath := filepath.Join(ce.oramaDir, "swarm.key")
|
||||
if _, err := os.Stat(swarmKeyPath); os.IsNotExist(err) {
|
||||
keyHex := strings.ToUpper(generateRandomHex(64))
|
||||
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
|
||||
@ -87,176 +97,104 @@ func (ce *ConfigEnsurer) ensureSharedSecrets() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureBootstrap creates bootstrap identity and config
|
||||
func (ce *ConfigEnsurer) ensureBootstrap() error {
|
||||
bootstrapDir := filepath.Join(ce.debrosDir, "bootstrap")
|
||||
identityPath := filepath.Join(bootstrapDir, "identity.key")
|
||||
// ensureNodeIdentity creates or loads a node identity and returns its multiaddr
|
||||
func (ce *ConfigEnsurer) ensureNodeIdentity(nodeSpec NodeSpec) (string, error) {
|
||||
nodeDir := filepath.Join(ce.oramaDir, nodeSpec.DataDir)
|
||||
identityPath := filepath.Join(nodeDir, "identity.key")
|
||||
|
||||
// Create identity if missing
|
||||
var bootstrapPeerID string
|
||||
var peerID string
|
||||
if _, err := os.Stat(identityPath); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(bootstrapDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create bootstrap directory: %w", err)
|
||||
if err := os.MkdirAll(nodeDir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create node directory: %w", err)
|
||||
}
|
||||
|
||||
info, err := encryption.GenerateIdentity()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate bootstrap identity: %w", err)
|
||||
return "", fmt.Errorf("failed to generate identity: %w", err)
|
||||
}
|
||||
|
||||
if err := encryption.SaveIdentity(info, identityPath); err != nil {
|
||||
return fmt.Errorf("failed to save bootstrap identity: %w", err)
|
||||
return "", fmt.Errorf("failed to save identity: %w", err)
|
||||
}
|
||||
|
||||
bootstrapPeerID = info.PeerID.String()
|
||||
fmt.Printf("✓ Generated bootstrap identity (Peer ID: %s)\n", bootstrapPeerID)
|
||||
peerID = info.PeerID.String()
|
||||
fmt.Printf("✓ Generated %s identity (Peer ID: %s)\n", nodeSpec.Name, peerID)
|
||||
} else {
|
||||
info, err := encryption.LoadIdentity(identityPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load bootstrap identity: %w", err)
|
||||
return "", fmt.Errorf("failed to load identity: %w", err)
|
||||
}
|
||||
bootstrapPeerID = info.PeerID.String()
|
||||
peerID = info.PeerID.String()
|
||||
}
|
||||
|
||||
// Ensure bootstrap config - always regenerate to ensure template fixes are applied
|
||||
bootstrapConfigPath := filepath.Join(ce.debrosDir, "bootstrap.yaml")
|
||||
data := templates.BootstrapConfigData{
|
||||
NodeID: "bootstrap",
|
||||
P2PPort: 4001,
|
||||
DataDir: bootstrapDir,
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
}
|
||||
|
||||
config, err := templates.RenderBootstrapConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render bootstrap config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(bootstrapConfigPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write bootstrap config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Generated bootstrap.yaml\n")
|
||||
|
||||
return nil
|
||||
// Return multiaddr
|
||||
return fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", nodeSpec.P2PPort, peerID), nil
|
||||
}
|
||||
|
||||
// ensureNode2And3 creates node2 and node3 configs
|
||||
func (ce *ConfigEnsurer) ensureNode2And3() error {
|
||||
// Get bootstrap multiaddr for join
|
||||
bootstrapInfo, err := encryption.LoadIdentity(filepath.Join(ce.debrosDir, "bootstrap", "identity.key"))
|
||||
// ensureNodeConfig creates or updates a node configuration
|
||||
func (ce *ConfigEnsurer) ensureNodeConfig(nodeSpec NodeSpec, peerAddrs []string) error {
|
||||
nodeDir := filepath.Join(ce.oramaDir, nodeSpec.DataDir)
|
||||
configPath := filepath.Join(ce.oramaDir, nodeSpec.ConfigFilename)
|
||||
|
||||
if err := os.MkdirAll(nodeDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create node directory: %w", err)
|
||||
}
|
||||
|
||||
// Generate node config (all nodes are unified)
|
||||
data := templates.NodeConfigData{
|
||||
NodeID: nodeSpec.Name,
|
||||
P2PPort: nodeSpec.P2PPort,
|
||||
DataDir: nodeDir,
|
||||
RQLiteHTTPPort: nodeSpec.RQLiteHTTPPort,
|
||||
RQLiteRaftPort: nodeSpec.RQLiteRaftPort,
|
||||
RQLiteJoinAddress: nodeSpec.RQLiteJoinTarget,
|
||||
BootstrapPeers: peerAddrs,
|
||||
ClusterAPIPort: nodeSpec.ClusterAPIPort,
|
||||
IPFSAPIPort: nodeSpec.IPFSAPIPort,
|
||||
UnifiedGatewayPort: nodeSpec.UnifiedGatewayPort,
|
||||
}
|
||||
|
||||
config, err := templates.RenderNodeConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load bootstrap identity: %w", err)
|
||||
return fmt.Errorf("failed to render node config: %w", err)
|
||||
}
|
||||
|
||||
bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String())
|
||||
|
||||
nodes := []struct {
|
||||
name string
|
||||
p2pPort int
|
||||
rqliteHTTPPort int
|
||||
rqliteRaftPort int
|
||||
clusterAPIPort int
|
||||
ipfsAPIPort int
|
||||
}{
|
||||
{"node2", 4002, 5002, 7002, 9104, 4502},
|
||||
{"node3", 4003, 5003, 7003, 9114, 4503},
|
||||
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write node config: %w", err)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeDir := filepath.Join(ce.debrosDir, node.name)
|
||||
configPath := filepath.Join(ce.debrosDir, fmt.Sprintf("%s.yaml", node.name))
|
||||
|
||||
// Always regenerate to ensure template fixes are applied
|
||||
if err := os.MkdirAll(nodeDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create %s directory: %w", node.name, err)
|
||||
}
|
||||
|
||||
data := templates.NodeConfigData{
|
||||
NodeID: node.name,
|
||||
P2PPort: node.p2pPort,
|
||||
DataDir: nodeDir,
|
||||
RQLiteHTTPPort: node.rqliteHTTPPort,
|
||||
RQLiteRaftPort: node.rqliteRaftPort,
|
||||
RQLiteJoinAddress: "localhost:7001",
|
||||
BootstrapPeers: []string{bootstrapMultiaddr},
|
||||
ClusterAPIPort: node.clusterAPIPort,
|
||||
IPFSAPIPort: node.ipfsAPIPort,
|
||||
}
|
||||
|
||||
config, err := templates.RenderNodeConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render %s config: %w", node.name, err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write %s config: %w", node.name, err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Generated %s.yaml\n", node.name)
|
||||
}
|
||||
fmt.Printf("✓ Generated %s.yaml\n", nodeSpec.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureGateway creates gateway config
|
||||
func (ce *ConfigEnsurer) ensureGateway() error {
|
||||
configPath := filepath.Join(ce.debrosDir, "gateway.yaml")
|
||||
|
||||
// Always regenerate to ensure template fixes are applied
|
||||
// Get bootstrap multiaddr
|
||||
bootstrapInfo, err := encryption.LoadIdentity(filepath.Join(ce.debrosDir, "bootstrap", "identity.key"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load bootstrap identity: %w", err)
|
||||
}
|
||||
|
||||
bootstrapMultiaddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/4001/p2p/%s", bootstrapInfo.PeerID.String())
|
||||
|
||||
data := templates.GatewayConfigData{
|
||||
ListenPort: 6001,
|
||||
BootstrapPeers: []string{bootstrapMultiaddr},
|
||||
OlricServers: []string{"127.0.0.1:3320"},
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
}
|
||||
|
||||
config, err := templates.RenderGatewayConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render gateway config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write gateway config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Generated gateway.yaml\n")
|
||||
return nil
|
||||
}
|
||||
// Gateway configuration is now embedded in each node's config
|
||||
// ensureGateway is no longer needed - each node runs its own embedded gateway
|
||||
|
||||
// ensureOlric creates Olric config
|
||||
func (ce *ConfigEnsurer) ensureOlric() error {
|
||||
configPath := filepath.Join(ce.debrosDir, "olric-config.yaml")
|
||||
configPath := filepath.Join(ce.oramaDir, "olric-config.yaml")
|
||||
|
||||
// Always regenerate to ensure template fixes are applied
|
||||
data := templates.OlricConfigData{
|
||||
BindAddr: "127.0.0.1",
|
||||
HTTPPort: 3320,
|
||||
MemberlistPort: 3322,
|
||||
}
|
||||
topology := DefaultTopology()
|
||||
data := templates.OlricConfigData{
|
||||
ServerBindAddr: "127.0.0.1",
|
||||
HTTPPort: topology.OlricHTTPPort,
|
||||
MemberlistBindAddr: "127.0.0.1", // localhost for development
|
||||
MemberlistPort: topology.OlricMemberPort,
|
||||
MemberlistEnvironment: "local", // development environment
|
||||
}
|
||||
|
||||
config, err := templates.RenderOlricConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render olric config: %w", err)
|
||||
}
|
||||
config, err := templates.RenderOlricConfig(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render olric config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write olric config: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(configPath, []byte(config), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write olric config: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Generated olric-config.yaml\n")
|
||||
fmt.Printf("✓ Generated olric-config.yaml\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -9,6 +9,8 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
||||
)
|
||||
|
||||
// HealthCheckResult represents the result of a health check
|
||||
@ -37,17 +39,19 @@ func (pm *ProcessManager) IPFSHealthCheck(ctx context.Context, nodes []ipfsNodeI
|
||||
for _, line := range peerLines {
|
||||
if strings.TrimSpace(line) != "" {
|
||||
peerCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if peerCount < 2 {
|
||||
result.Details += fmt.Sprintf("%s: only %d peers (want 2+); ", node.name, peerCount)
|
||||
// With 5 nodes, expect each node to see at least 3 other peers
|
||||
if peerCount < 3 {
|
||||
result.Details += fmt.Sprintf("%s: only %d peers (want 3+); ", node.name, peerCount)
|
||||
} else {
|
||||
result.Details += fmt.Sprintf("%s: %d peers; ", node.name, peerCount)
|
||||
healthyCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Require all 5 nodes to have healthy peer counts
|
||||
result.Healthy = healthyCount == len(nodes)
|
||||
return result
|
||||
}
|
||||
@ -56,24 +60,19 @@ func (pm *ProcessManager) IPFSHealthCheck(ctx context.Context, nodes []ipfsNodeI
|
||||
func (pm *ProcessManager) RQLiteHealthCheck(ctx context.Context) HealthCheckResult {
|
||||
result := HealthCheckResult{Name: "RQLite Cluster"}
|
||||
|
||||
// Check bootstrap node
|
||||
bootstrapStatus := pm.checkRQLiteNode(ctx, "bootstrap", 5001)
|
||||
if !bootstrapStatus.Healthy {
|
||||
result.Details += fmt.Sprintf("bootstrap: %s; ", bootstrapStatus.Details)
|
||||
return result
|
||||
}
|
||||
|
||||
// Check node2 and node3
|
||||
node2Status := pm.checkRQLiteNode(ctx, "node2", 5002)
|
||||
node3Status := pm.checkRQLiteNode(ctx, "node3", 5003)
|
||||
|
||||
if node2Status.Healthy && node3Status.Healthy {
|
||||
result.Healthy = true
|
||||
result.Details = fmt.Sprintf("bootstrap: leader ok; node2: %s; node3: %s", node2Status.Details, node3Status.Details)
|
||||
} else {
|
||||
result.Details = fmt.Sprintf("bootstrap: ok; node2: %s; node3: %s", node2Status.Details, node3Status.Details)
|
||||
topology := DefaultTopology()
|
||||
healthyCount := 0
|
||||
|
||||
for _, nodeSpec := range topology.Nodes {
|
||||
status := pm.checkRQLiteNode(ctx, nodeSpec.Name, nodeSpec.RQLiteHTTPPort)
|
||||
if status.Healthy {
|
||||
healthyCount++
|
||||
}
|
||||
result.Details += fmt.Sprintf("%s: %s; ", nodeSpec.Name, status.Details)
|
||||
}
|
||||
|
||||
// Require at least 3 out of 5 nodes to be healthy for quorum
|
||||
result.Healthy = healthyCount >= 3
|
||||
return result
|
||||
}
|
||||
|
||||
@ -82,7 +81,7 @@ func (pm *ProcessManager) checkRQLiteNode(ctx context.Context, name string, http
|
||||
result := HealthCheckResult{Name: fmt.Sprintf("RQLite-%s", name)}
|
||||
|
||||
urlStr := fmt.Sprintf("http://localhost:%d/status", httpPort)
|
||||
client := &http.Client{Timeout: 2 * time.Second}
|
||||
client := tlsutil.NewHTTPClient(2 * time.Second)
|
||||
resp, err := client.Get(urlStr)
|
||||
if err != nil {
|
||||
result.Details = fmt.Sprintf("connection failed: %v", err)
|
||||
@ -99,7 +98,7 @@ func (pm *ProcessManager) checkRQLiteNode(ctx context.Context, name string, http
|
||||
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
||||
result.Details = fmt.Sprintf("decode error: %v", err)
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// Check the store.raft structure (RQLite 8 format)
|
||||
store, ok := status["store"].(map[string]interface{})
|
||||
@ -146,64 +145,63 @@ func (pm *ProcessManager) checkRQLiteNode(ctx context.Context, name string, http
|
||||
func (pm *ProcessManager) LibP2PHealthCheck(ctx context.Context) HealthCheckResult {
|
||||
result := HealthCheckResult{Name: "LibP2P/Node Peers"}
|
||||
|
||||
// Check that at least 2 nodes are part of the RQLite cluster (implies peer connectivity)
|
||||
// and that they can communicate via LibP2P (which they use for cluster discovery)
|
||||
// Check that nodes are part of the RQLite cluster and can communicate via LibP2P
|
||||
topology := DefaultTopology()
|
||||
healthyNodes := 0
|
||||
for i, name := range []string{"bootstrap", "node2", "node3"} {
|
||||
httpPort := 5001 + i
|
||||
status := pm.checkRQLiteNode(ctx, name, httpPort)
|
||||
|
||||
for _, nodeSpec := range topology.Nodes {
|
||||
status := pm.checkRQLiteNode(ctx, nodeSpec.Name, nodeSpec.RQLiteHTTPPort)
|
||||
if status.Healthy {
|
||||
healthyNodes++
|
||||
result.Details += fmt.Sprintf("%s: connected; ", name)
|
||||
result.Details += fmt.Sprintf("%s: connected; ", nodeSpec.Name)
|
||||
} else {
|
||||
result.Details += fmt.Sprintf("%s: %s; ", name, status.Details)
|
||||
result.Details += fmt.Sprintf("%s: %s; ", nodeSpec.Name, status.Details)
|
||||
}
|
||||
}
|
||||
|
||||
// Healthy if at least 2 nodes report connectivity (including bootstrap)
|
||||
result.Healthy = healthyNodes >= 2
|
||||
// Healthy if at least 3 nodes report connectivity
|
||||
result.Healthy = healthyNodes >= 3
|
||||
return result
|
||||
}
|
||||
|
||||
// HealthCheckWithRetry performs a health check with retry logic
|
||||
func (pm *ProcessManager) HealthCheckWithRetry(ctx context.Context, nodes []ipfsNodeInfo, retries int, retryInterval time.Duration, timeout time.Duration) bool {
|
||||
fmt.Fprintf(pm.logWriter, "\n⚕️ Validating cluster health...\n")
|
||||
fmt.Fprintf(pm.logWriter, "⚕️ Validating cluster health...")
|
||||
|
||||
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
spinnerFrames := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
||||
spinnerIndex := 0
|
||||
|
||||
for attempt := 1; attempt <= retries; attempt++ {
|
||||
// Perform all checks
|
||||
ipfsResult := pm.IPFSHealthCheck(deadlineCtx, nodes)
|
||||
rqliteResult := pm.RQLiteHealthCheck(deadlineCtx)
|
||||
libp2pResult := pm.LibP2PHealthCheck(deadlineCtx)
|
||||
|
||||
// Log results
|
||||
if attempt == 1 || attempt == retries || (attempt%3 == 0) {
|
||||
fmt.Fprintf(pm.logWriter, " Attempt %d/%d:\n", attempt, retries)
|
||||
pm.logHealthCheckResult(pm.logWriter, " ", ipfsResult)
|
||||
pm.logHealthCheckResult(pm.logWriter, " ", rqliteResult)
|
||||
pm.logHealthCheckResult(pm.logWriter, " ", libp2pResult)
|
||||
}
|
||||
|
||||
// All checks must pass
|
||||
if ipfsResult.Healthy && rqliteResult.Healthy && libp2pResult.Healthy {
|
||||
fmt.Fprintf(pm.logWriter, "\n✓ All health checks passed!\n")
|
||||
fmt.Fprintf(pm.logWriter, "\r✓ Cluster health validated\n")
|
||||
return true
|
||||
}
|
||||
|
||||
// Show spinner progress
|
||||
fmt.Fprintf(pm.logWriter, "\r%s Validating cluster health... (%d/%d)", spinnerFrames[spinnerIndex%len(spinnerFrames)], attempt, retries)
|
||||
spinnerIndex++
|
||||
|
||||
if attempt < retries {
|
||||
select {
|
||||
case <-time.After(retryInterval):
|
||||
continue
|
||||
case <-deadlineCtx.Done():
|
||||
fmt.Fprintf(pm.logWriter, "\n❌ Health check timeout reached\n")
|
||||
fmt.Fprintf(pm.logWriter, "\r❌ Health check timeout reached\n")
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed after %d attempts\n", retries)
|
||||
fmt.Fprintf(pm.logWriter, "\r❌ Health checks failed - services not ready\n")
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@ -15,11 +15,13 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
||||
)
|
||||
|
||||
// ProcessManager manages all dev environment processes
|
||||
type ProcessManager struct {
|
||||
debrosDir string
|
||||
oramaDir string
|
||||
pidsDir string
|
||||
processes map[string]*ManagedProcess
|
||||
mutex sync.Mutex
|
||||
@ -35,12 +37,12 @@ type ManagedProcess struct {
|
||||
}
|
||||
|
||||
// NewProcessManager creates a new process manager
|
||||
func NewProcessManager(debrosDir string, logWriter io.Writer) *ProcessManager {
|
||||
pidsDir := filepath.Join(debrosDir, ".pids")
|
||||
func NewProcessManager(oramaDir string, logWriter io.Writer) *ProcessManager {
|
||||
pidsDir := filepath.Join(oramaDir, ".pids")
|
||||
os.MkdirAll(pidsDir, 0755)
|
||||
|
||||
return &ProcessManager{
|
||||
debrosDir: debrosDir,
|
||||
oramaDir: oramaDir,
|
||||
pidsDir: pidsDir,
|
||||
processes: make(map[string]*ManagedProcess),
|
||||
logWriter: logWriter,
|
||||
@ -49,14 +51,13 @@ func NewProcessManager(debrosDir string, logWriter io.Writer) *ProcessManager {
|
||||
|
||||
// StartAll starts all development services
|
||||
func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
||||
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n\n")
|
||||
fmt.Fprintf(pm.logWriter, "\n🚀 Starting development environment...\n")
|
||||
fmt.Fprintf(pm.logWriter, "═══════════════════════════════════════\n\n")
|
||||
|
||||
// Define IPFS nodes for later use in health checks
|
||||
ipfsNodes := []ipfsNodeInfo{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs/repo"), 4501, 4101, 7501, ""},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs/repo"), 4502, 4102, 7502, ""},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs/repo"), 4503, 4103, 7503, ""},
|
||||
}
|
||||
topology := DefaultTopology()
|
||||
|
||||
// Build IPFS node info from topology
|
||||
ipfsNodes := pm.buildIPFSNodes(topology)
|
||||
|
||||
// Start in order of dependencies
|
||||
services := []struct {
|
||||
@ -67,10 +68,8 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
||||
{"IPFS Cluster", pm.startIPFSCluster},
|
||||
{"Olric", pm.startOlric},
|
||||
{"Anon", pm.startAnon},
|
||||
{"Bootstrap Node", pm.startBootstrapNode},
|
||||
{"Node2", pm.startNode2},
|
||||
{"Node3", pm.startNode3},
|
||||
{"Gateway", pm.startGateway},
|
||||
{"Nodes (Network)", pm.startNodes},
|
||||
// Gateway is now per-node (embedded in each node) - no separate main gateway needed
|
||||
}
|
||||
|
||||
for _, svc := range services {
|
||||
@ -80,6 +79,8 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "\n")
|
||||
|
||||
// Run health checks with retries before declaring success
|
||||
const (
|
||||
healthCheckRetries = 20
|
||||
@ -88,42 +89,84 @@ func (pm *ProcessManager) StartAll(ctx context.Context) error {
|
||||
)
|
||||
|
||||
if !pm.HealthCheckWithRetry(ctx, ipfsNodes, healthCheckRetries, healthCheckInterval, healthCheckTimeout) {
|
||||
fmt.Fprintf(pm.logWriter, "\n❌ Development environment failed health checks - stopping all services\n")
|
||||
fmt.Fprintf(pm.logWriter, "\n❌ Health checks failed - stopping all services\n")
|
||||
pm.StopAll(ctx)
|
||||
return fmt.Errorf("cluster health checks failed - services stopped")
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "\n✅ Development environment started!\n\n")
|
||||
// Print success and key endpoints
|
||||
pm.printStartupSummary(topology)
|
||||
return nil
|
||||
}
|
||||
|
||||
// printStartupSummary prints the final startup summary with key endpoints
|
||||
func (pm *ProcessManager) printStartupSummary(topology *Topology) {
|
||||
fmt.Fprintf(pm.logWriter, "\n✅ Development environment ready!\n")
|
||||
fmt.Fprintf(pm.logWriter, "═══════════════════════════════════════\n\n")
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "📡 Access your nodes via unified gateway ports:\n\n")
|
||||
for _, node := range topology.Nodes {
|
||||
fmt.Fprintf(pm.logWriter, " %s:\n", node.Name)
|
||||
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/health\n", node.UnifiedGatewayPort)
|
||||
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/rqlite/http/db/execute\n", node.UnifiedGatewayPort)
|
||||
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/cluster/health\n\n", node.UnifiedGatewayPort)
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "🌐 Main Gateway:\n")
|
||||
fmt.Fprintf(pm.logWriter, " curl http://localhost:%d/v1/status\n\n", topology.GatewayPort)
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "📊 Other Services:\n")
|
||||
fmt.Fprintf(pm.logWriter, " Olric: http://localhost:%d\n", topology.OlricHTTPPort)
|
||||
fmt.Fprintf(pm.logWriter, " Anon SOCKS: 127.0.0.1:%d\n\n", topology.AnonSOCKSPort)
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "📝 Useful Commands:\n")
|
||||
fmt.Fprintf(pm.logWriter, " ./bin/orama dev status - Check service status\n")
|
||||
fmt.Fprintf(pm.logWriter, " ./bin/orama dev logs node-1 - View logs\n")
|
||||
fmt.Fprintf(pm.logWriter, " ./bin/orama dev down - Stop all services\n\n")
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "📂 Logs: %s/logs\n", pm.oramaDir)
|
||||
fmt.Fprintf(pm.logWriter, "⚙️ Config: %s\n\n", pm.oramaDir)
|
||||
}
|
||||
|
||||
// StopAll stops all running processes
|
||||
func (pm *ProcessManager) StopAll(ctx context.Context) error {
|
||||
fmt.Fprintf(pm.logWriter, "\n🛑 Stopping development environment...\n")
|
||||
fmt.Fprintf(pm.logWriter, "\n🛑 Stopping development environment...\n\n")
|
||||
|
||||
services := []string{
|
||||
"gateway",
|
||||
"node3",
|
||||
"node2",
|
||||
"bootstrap",
|
||||
"olric",
|
||||
"ipfs-cluster-node3",
|
||||
"ipfs-cluster-node2",
|
||||
"ipfs-cluster-bootstrap",
|
||||
"rqlite-node3",
|
||||
"rqlite-node2",
|
||||
"rqlite-bootstrap",
|
||||
"ipfs-node3",
|
||||
"ipfs-node2",
|
||||
"ipfs-bootstrap",
|
||||
"anon",
|
||||
topology := DefaultTopology()
|
||||
var services []string
|
||||
|
||||
// Build service list from topology (in reverse order)
|
||||
services = append(services, "gateway")
|
||||
for i := len(topology.Nodes) - 1; i >= 0; i-- {
|
||||
node := topology.Nodes[i]
|
||||
services = append(services, node.Name)
|
||||
}
|
||||
for i := len(topology.Nodes) - 1; i >= 0; i-- {
|
||||
node := topology.Nodes[i]
|
||||
services = append(services, fmt.Sprintf("ipfs-cluster-%s", node.Name))
|
||||
}
|
||||
for i := len(topology.Nodes) - 1; i >= 0; i-- {
|
||||
node := topology.Nodes[i]
|
||||
services = append(services, fmt.Sprintf("ipfs-%s", node.Name))
|
||||
}
|
||||
services = append(services, "olric", "anon")
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "Stopping %d services...\n\n", len(services))
|
||||
|
||||
// Stop all processes sequentially (in dependency order) and wait for each
|
||||
stoppedCount := 0
|
||||
for _, svc := range services {
|
||||
pm.stopProcess(svc)
|
||||
if err := pm.stopProcess(svc); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, "⚠️ Error stopping %s: %v\n", svc, err)
|
||||
} else {
|
||||
stoppedCount++
|
||||
}
|
||||
|
||||
// Show progress
|
||||
fmt.Fprintf(pm.logWriter, " [%d/%d] stopped\n", stoppedCount, len(services))
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "✓ All services stopped\n\n")
|
||||
fmt.Fprintf(pm.logWriter, "\n✅ All %d services have been stopped\n\n", stoppedCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -132,27 +175,51 @@ func (pm *ProcessManager) Status(ctx context.Context) {
|
||||
fmt.Fprintf(pm.logWriter, "\n📊 Development Environment Status\n")
|
||||
fmt.Fprintf(pm.logWriter, "================================\n\n")
|
||||
|
||||
services := []struct {
|
||||
topology := DefaultTopology()
|
||||
|
||||
// Build service list from topology
|
||||
var services []struct {
|
||||
name string
|
||||
ports []int
|
||||
}{
|
||||
{"Bootstrap IPFS", []int{4501, 4101}},
|
||||
{"Bootstrap RQLite", []int{5001, 7001}},
|
||||
{"Node2 IPFS", []int{4502, 4102}},
|
||||
{"Node2 RQLite", []int{5002, 7002}},
|
||||
{"Node3 IPFS", []int{4503, 4103}},
|
||||
{"Node3 RQLite", []int{5003, 7003}},
|
||||
{"Bootstrap Cluster", []int{9094}},
|
||||
{"Node2 Cluster", []int{9104}},
|
||||
{"Node3 Cluster", []int{9114}},
|
||||
{"Bootstrap Node (P2P)", []int{4001}},
|
||||
{"Node2 (P2P)", []int{4002}},
|
||||
{"Node3 (P2P)", []int{4003}},
|
||||
{"Gateway", []int{6001}},
|
||||
{"Olric", []int{3320, 3322}},
|
||||
{"Anon SOCKS", []int{9050}},
|
||||
}
|
||||
|
||||
for _, node := range topology.Nodes {
|
||||
services = append(services, struct {
|
||||
name string
|
||||
ports []int
|
||||
}{
|
||||
fmt.Sprintf("%s IPFS", node.Name),
|
||||
[]int{node.IPFSAPIPort, node.IPFSSwarmPort},
|
||||
})
|
||||
services = append(services, struct {
|
||||
name string
|
||||
ports []int
|
||||
}{
|
||||
fmt.Sprintf("%s Cluster", node.Name),
|
||||
[]int{node.ClusterAPIPort},
|
||||
})
|
||||
services = append(services, struct {
|
||||
name string
|
||||
ports []int
|
||||
}{
|
||||
fmt.Sprintf("%s Node (P2P)", node.Name),
|
||||
[]int{node.P2PPort},
|
||||
})
|
||||
}
|
||||
|
||||
services = append(services, struct {
|
||||
name string
|
||||
ports []int
|
||||
}{"Gateway", []int{topology.GatewayPort}})
|
||||
services = append(services, struct {
|
||||
name string
|
||||
ports []int
|
||||
}{"Olric", []int{topology.OlricHTTPPort, topology.OlricMemberPort}})
|
||||
services = append(services, struct {
|
||||
name string
|
||||
ports []int
|
||||
}{"Anon SOCKS", []int{topology.AnonSOCKSPort}})
|
||||
|
||||
for _, svc := range services {
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", svc.name))
|
||||
running := false
|
||||
@ -172,10 +239,10 @@ func (pm *ProcessManager) Status(ctx context.Context) {
|
||||
fmt.Fprintf(pm.logWriter, " %-25s %s (%s)\n", svc.name, status, portStr)
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.debrosDir)
|
||||
files := []string{"bootstrap.yaml", "node2.yaml", "node3.yaml", "gateway.yaml", "olric-config.yaml"}
|
||||
for _, f := range files {
|
||||
path := filepath.Join(pm.debrosDir, f)
|
||||
fmt.Fprintf(pm.logWriter, "\nConfiguration files in %s:\n", pm.oramaDir)
|
||||
configFiles := []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml", "olric-config.yaml"}
|
||||
for _, f := range configFiles {
|
||||
path := filepath.Join(pm.oramaDir, f)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
fmt.Fprintf(pm.logWriter, " ✓ %s\n", f)
|
||||
} else {
|
||||
@ -183,11 +250,40 @@ func (pm *ProcessManager) Status(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "\nLogs directory: %s/logs\n\n", pm.debrosDir)
|
||||
fmt.Fprintf(pm.logWriter, "\nLogs directory: %s/logs\n\n", pm.oramaDir)
|
||||
}
|
||||
|
||||
// Helper functions for starting individual services
|
||||
|
||||
// buildIPFSNodes constructs ipfsNodeInfo from topology
|
||||
func (pm *ProcessManager) buildIPFSNodes(topology *Topology) []ipfsNodeInfo {
|
||||
var nodes []ipfsNodeInfo
|
||||
for _, nodeSpec := range topology.Nodes {
|
||||
nodes = append(nodes, ipfsNodeInfo{
|
||||
name: nodeSpec.Name,
|
||||
ipfsPath: filepath.Join(pm.oramaDir, nodeSpec.DataDir, "ipfs/repo"),
|
||||
apiPort: nodeSpec.IPFSAPIPort,
|
||||
swarmPort: nodeSpec.IPFSSwarmPort,
|
||||
gatewayPort: nodeSpec.IPFSGatewayPort,
|
||||
peerID: "",
|
||||
})
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// startNodes starts all network nodes
|
||||
func (pm *ProcessManager) startNodes(ctx context.Context) error {
|
||||
topology := DefaultTopology()
|
||||
for _, nodeSpec := range topology.Nodes {
|
||||
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("%s.log", nodeSpec.Name))
|
||||
if err := pm.startNode(nodeSpec.Name, nodeSpec.ConfigFilename, logPath); err != nil {
|
||||
return fmt.Errorf("failed to start %s: %w", nodeSpec.Name, err)
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ipfsNodeInfo holds information about an IPFS node for peer discovery
|
||||
type ipfsNodeInfo struct {
|
||||
name string
|
||||
@ -226,6 +322,107 @@ func readIPFSConfigValue(ctx context.Context, repoPath string, key string) (stri
|
||||
return "", fmt.Errorf("key %s not found in IPFS config", key)
|
||||
}
|
||||
|
||||
// configureIPFSRepo directly modifies IPFS config JSON to set addresses, bootstrap, and CORS headers
|
||||
// This avoids shell commands which fail on some systems and instead manipulates the config directly
|
||||
// Returns the peer ID from the config
|
||||
func configureIPFSRepo(repoPath string, apiPort, gatewayPort, swarmPort int) (string, error) {
|
||||
configPath := filepath.Join(repoPath, "config")
|
||||
|
||||
// Read existing config
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read IPFS config: %w", err)
|
||||
}
|
||||
|
||||
var config map[string]interface{}
|
||||
if err := json.Unmarshal(data, &config); err != nil {
|
||||
return "", fmt.Errorf("failed to parse IPFS config: %w", err)
|
||||
}
|
||||
|
||||
// Set Addresses
|
||||
config["Addresses"] = map[string]interface{}{
|
||||
"API": []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort)},
|
||||
"Gateway": []string{fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", gatewayPort)},
|
||||
"Swarm": []string{
|
||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", swarmPort),
|
||||
fmt.Sprintf("/ip6/::/tcp/%d", swarmPort),
|
||||
},
|
||||
}
|
||||
|
||||
// Disable AutoConf for private swarm
|
||||
config["AutoConf"] = map[string]interface{}{
|
||||
"Enabled": false,
|
||||
}
|
||||
|
||||
// Clear Bootstrap (will be set via HTTP API after startup)
|
||||
config["Bootstrap"] = []string{}
|
||||
|
||||
// Clear DNS Resolvers
|
||||
if dns, ok := config["DNS"].(map[string]interface{}); ok {
|
||||
dns["Resolvers"] = map[string]interface{}{}
|
||||
} else {
|
||||
config["DNS"] = map[string]interface{}{
|
||||
"Resolvers": map[string]interface{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// Clear Routing DelegatedRouters
|
||||
if routing, ok := config["Routing"].(map[string]interface{}); ok {
|
||||
routing["DelegatedRouters"] = []string{}
|
||||
} else {
|
||||
config["Routing"] = map[string]interface{}{
|
||||
"DelegatedRouters": []string{},
|
||||
}
|
||||
}
|
||||
|
||||
// Clear IPNS DelegatedPublishers
|
||||
if ipns, ok := config["Ipns"].(map[string]interface{}); ok {
|
||||
ipns["DelegatedPublishers"] = []string{}
|
||||
} else {
|
||||
config["Ipns"] = map[string]interface{}{
|
||||
"DelegatedPublishers": []string{},
|
||||
}
|
||||
}
|
||||
|
||||
// Set API HTTPHeaders with CORS (must be map[string][]string)
|
||||
if api, ok := config["API"].(map[string]interface{}); ok {
|
||||
api["HTTPHeaders"] = map[string][]string{
|
||||
"Access-Control-Allow-Origin": {"*"},
|
||||
"Access-Control-Allow-Methods": {"GET", "PUT", "POST", "DELETE", "OPTIONS"},
|
||||
"Access-Control-Allow-Headers": {"Content-Type", "X-Requested-With"},
|
||||
"Access-Control-Expose-Headers": {"Content-Length", "Content-Range"},
|
||||
}
|
||||
} else {
|
||||
config["API"] = map[string]interface{}{
|
||||
"HTTPHeaders": map[string][]string{
|
||||
"Access-Control-Allow-Origin": {"*"},
|
||||
"Access-Control-Allow-Methods": {"GET", "PUT", "POST", "DELETE", "OPTIONS"},
|
||||
"Access-Control-Allow-Headers": {"Content-Type", "X-Requested-With"},
|
||||
"Access-Control-Expose-Headers": {"Content-Length", "Content-Range"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Write config back
|
||||
updatedData, err := json.MarshalIndent(config, "", " ")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal IPFS config: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(configPath, updatedData, 0644); err != nil {
|
||||
return "", fmt.Errorf("failed to write IPFS config: %w", err)
|
||||
}
|
||||
|
||||
// Extract and return peer ID
|
||||
if id, ok := config["Identity"].(map[string]interface{}); ok {
|
||||
if peerID, ok := id["PeerID"].(string); ok {
|
||||
return peerID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("could not extract peer ID from config")
|
||||
}
|
||||
|
||||
// seedIPFSPeersWithHTTP configures each IPFS node to bootstrap with its local peers using HTTP API
|
||||
func (pm *ProcessManager) seedIPFSPeersWithHTTP(ctx context.Context, nodes []ipfsNodeInfo) error {
|
||||
fmt.Fprintf(pm.logWriter, " Seeding IPFS local bootstrap peers via HTTP API...\n")
|
||||
@ -286,7 +483,7 @@ func (pm *ProcessManager) waitIPFSReady(ctx context.Context, node ipfsNodeInfo)
|
||||
|
||||
// ipfsHTTPCall makes an HTTP call to IPFS API
|
||||
func (pm *ProcessManager) ipfsHTTPCall(ctx context.Context, urlStr string, method string) error {
|
||||
client := &http.Client{Timeout: 5 * time.Second}
|
||||
client := tlsutil.NewHTTPClient(5 * time.Second)
|
||||
req, err := http.NewRequestWithContext(ctx, method, urlStr, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %w", err)
|
||||
@ -307,11 +504,8 @@ func (pm *ProcessManager) ipfsHTTPCall(ctx context.Context, urlStr string, metho
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startIPFS(ctx context.Context) error {
|
||||
nodes := []ipfsNodeInfo{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs/repo"), 4501, 4101, 7501, ""},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs/repo"), 4502, 4102, 7502, ""},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs/repo"), 4503, 4103, 7503, ""},
|
||||
}
|
||||
topology := DefaultTopology()
|
||||
nodes := pm.buildIPFSNodes(topology)
|
||||
|
||||
// Phase 1: Initialize repos and configure addresses
|
||||
for i := range nodes {
|
||||
@ -326,31 +520,17 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Copy swarm key
|
||||
swarmKeyPath := filepath.Join(pm.debrosDir, "swarm.key")
|
||||
swarmKeyPath := filepath.Join(pm.oramaDir, "swarm.key")
|
||||
if data, err := os.ReadFile(swarmKeyPath); err == nil {
|
||||
os.WriteFile(filepath.Join(nodes[i].ipfsPath, "swarm.key"), data, 0600)
|
||||
}
|
||||
}
|
||||
|
||||
// Always reapply address settings to ensure correct ports (before daemon starts)
|
||||
apiAddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", nodes[i].apiPort)
|
||||
gatewayAddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", nodes[i].gatewayPort)
|
||||
swarmAddrs := fmt.Sprintf("[\"/ip4/0.0.0.0/tcp/%d\", \"/ip6/::/tcp/%d\"]", nodes[i].swarmPort, nodes[i].swarmPort)
|
||||
|
||||
if err := exec.CommandContext(ctx, "ipfs", "config", "--repo-dir="+nodes[i].ipfsPath, "Addresses.API", apiAddr).Run(); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to set API address: %v\n", err)
|
||||
}
|
||||
if err := exec.CommandContext(ctx, "ipfs", "config", "--repo-dir="+nodes[i].ipfsPath, "Addresses.Gateway", gatewayAddr).Run(); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to set Gateway address: %v\n", err)
|
||||
}
|
||||
if err := exec.CommandContext(ctx, "ipfs", "config", "--repo-dir="+nodes[i].ipfsPath, "--json", "Addresses.Swarm", swarmAddrs).Run(); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to set Swarm addresses: %v\n", err)
|
||||
}
|
||||
|
||||
// Read peer ID from config BEFORE daemon starts
|
||||
peerID, err := readIPFSConfigValue(ctx, nodes[i].ipfsPath, "PeerID")
|
||||
// Configure the IPFS config directly (addresses, bootstrap, DNS, routing, CORS headers)
|
||||
// This replaces shell commands which can fail on some systems
|
||||
peerID, err := configureIPFSRepo(nodes[i].ipfsPath, nodes[i].apiPort, nodes[i].gatewayPort, nodes[i].swarmPort)
|
||||
if err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to read peer ID for %s: %v\n", nodes[i].name, err)
|
||||
fmt.Fprintf(pm.logWriter, " Warning: failed to configure IPFS repo for %s: %v\n", nodes[i].name, err)
|
||||
} else {
|
||||
nodes[i].peerID = peerID
|
||||
fmt.Fprintf(pm.logWriter, " Peer ID for %s: %s\n", nodes[i].name, peerID)
|
||||
@ -360,7 +540,7 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
|
||||
// Phase 2: Start all IPFS daemons
|
||||
for i := range nodes {
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-%s.pid", nodes[i].name))
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-%s.log", nodes[i].name))
|
||||
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-%s.log", nodes[i].name))
|
||||
|
||||
cmd := exec.CommandContext(ctx, "ipfs", "daemon", "--enable-pubsub-experiment", "--repo-dir="+nodes[i].ipfsPath)
|
||||
logFile, _ := os.Create(logPath)
|
||||
@ -393,25 +573,34 @@ func (pm *ProcessManager) startIPFS(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
|
||||
nodes := []struct {
|
||||
topology := DefaultTopology()
|
||||
var nodes []struct {
|
||||
name string
|
||||
clusterPath string
|
||||
restAPIPort int
|
||||
clusterPort int
|
||||
ipfsPort int
|
||||
}{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs-cluster"), 9094, 9096, 4501},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs-cluster"), 9104, 9106, 4502},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs-cluster"), 9114, 9116, 4503},
|
||||
}
|
||||
|
||||
for _, nodeSpec := range topology.Nodes {
|
||||
nodes = append(nodes, struct {
|
||||
name string
|
||||
clusterPath string
|
||||
restAPIPort int
|
||||
clusterPort int
|
||||
ipfsPort int
|
||||
}{
|
||||
nodeSpec.Name,
|
||||
filepath.Join(pm.oramaDir, nodeSpec.DataDir, "ipfs-cluster"),
|
||||
nodeSpec.ClusterAPIPort,
|
||||
nodeSpec.ClusterPort,
|
||||
nodeSpec.IPFSAPIPort,
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all IPFS daemons to be ready before starting cluster services
|
||||
fmt.Fprintf(pm.logWriter, " Waiting for IPFS daemons to be ready...\n")
|
||||
ipfsNodes := []ipfsNodeInfo{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/ipfs/repo"), 4501, 4101, 7501, ""},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/ipfs/repo"), 4502, 4102, 7502, ""},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/ipfs/repo"), 4503, 4103, 7503, ""},
|
||||
}
|
||||
ipfsNodes := pm.buildIPFSNodes(topology)
|
||||
for _, ipfsNode := range ipfsNodes {
|
||||
if err := pm.waitIPFSReady(ctx, ipfsNode); err != nil {
|
||||
fmt.Fprintf(pm.logWriter, " Warning: IPFS %s did not become ready: %v\n", ipfsNode.name, err)
|
||||
@ -419,7 +608,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Read cluster secret to ensure all nodes use the same PSK
|
||||
secretPath := filepath.Join(pm.debrosDir, "cluster-secret")
|
||||
secretPath := filepath.Join(pm.oramaDir, "cluster-secret")
|
||||
clusterSecret, err := os.ReadFile(secretPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read cluster secret: %w", err)
|
||||
@ -468,7 +657,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
|
||||
|
||||
// Start bootstrap cluster service
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
|
||||
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
|
||||
|
||||
cmd = exec.CommandContext(ctx, "ipfs-cluster-service", "daemon")
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("IPFS_CLUSTER_PATH=%s", node.clusterPath))
|
||||
@ -542,7 +731,7 @@ func (pm *ProcessManager) startIPFSCluster(ctx context.Context) error {
|
||||
|
||||
// Start follower cluster service with bootstrap flag
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("ipfs-cluster-%s.pid", node.name))
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
|
||||
logPath := filepath.Join(pm.oramaDir, "logs", fmt.Sprintf("ipfs-cluster-%s.log", node.name))
|
||||
|
||||
args := []string{"daemon"}
|
||||
if bootstrapMultiaddr != "" {
|
||||
@ -643,14 +832,24 @@ func (pm *ProcessManager) waitClusterFormed(ctx context.Context, bootstrapRestAP
|
||||
httpURL := fmt.Sprintf("http://127.0.0.1:%d/peers", bootstrapRestAPIPort)
|
||||
resp, err := http.Get(httpURL)
|
||||
if err == nil && resp.StatusCode == 200 {
|
||||
var peers []interface{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&peers); err == nil {
|
||||
resp.Body.Close()
|
||||
if len(peers) >= requiredPeers {
|
||||
return nil // All peers have formed
|
||||
// The /peers endpoint returns NDJSON (newline-delimited JSON), not a JSON array
|
||||
// We need to stream-read each peer object
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
peerCount := 0
|
||||
for {
|
||||
var peer interface{}
|
||||
err := dec.Decode(&peer)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
break // Stop on parse error
|
||||
}
|
||||
} else {
|
||||
resp.Body.Close()
|
||||
peerCount++
|
||||
}
|
||||
resp.Body.Close()
|
||||
if peerCount >= requiredPeers {
|
||||
return nil // All peers have formed
|
||||
}
|
||||
}
|
||||
if resp != nil {
|
||||
@ -777,63 +976,10 @@ func (pm *ProcessManager) ensureIPFSClusterPorts(clusterPath string, restAPIPort
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startRQLite(ctx context.Context) error {
|
||||
nodes := []struct {
|
||||
name string
|
||||
dataDir string
|
||||
httpPort int
|
||||
raftPort int
|
||||
joinAddr string
|
||||
}{
|
||||
{"bootstrap", filepath.Join(pm.debrosDir, "bootstrap/rqlite"), 5001, 7001, ""},
|
||||
{"node2", filepath.Join(pm.debrosDir, "node2/rqlite"), 5002, 7002, "localhost:7001"},
|
||||
{"node3", filepath.Join(pm.debrosDir, "node3/rqlite"), 5003, 7003, "localhost:7001"},
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
os.MkdirAll(node.dataDir, 0755)
|
||||
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("rqlite-%s.pid", node.name))
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", fmt.Sprintf("rqlite-%s.log", node.name))
|
||||
|
||||
var args []string
|
||||
args = append(args, fmt.Sprintf("-http-addr=0.0.0.0:%d", node.httpPort))
|
||||
args = append(args, fmt.Sprintf("-http-adv-addr=localhost:%d", node.httpPort))
|
||||
args = append(args, fmt.Sprintf("-raft-addr=0.0.0.0:%d", node.raftPort))
|
||||
args = append(args, fmt.Sprintf("-raft-adv-addr=localhost:%d", node.raftPort))
|
||||
if node.joinAddr != "" {
|
||||
args = append(args, "-join", node.joinAddr, "-join-attempts", "30", "-join-interval", "10s")
|
||||
}
|
||||
args = append(args, node.dataDir)
|
||||
cmd := exec.CommandContext(ctx, "rqlited", args...)
|
||||
|
||||
logFile, _ := os.Create(logPath)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start rqlite-%s: %w", node.name, err)
|
||||
}
|
||||
|
||||
os.WriteFile(pidPath, []byte(fmt.Sprintf("%d", cmd.Process.Pid)), 0644)
|
||||
pm.processes[fmt.Sprintf("rqlite-%s", node.name)] = &ManagedProcess{
|
||||
Name: fmt.Sprintf("rqlite-%s", node.name),
|
||||
PID: cmd.Process.Pid,
|
||||
StartTime: time.Now(),
|
||||
LogPath: logPath,
|
||||
}
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "✓ RQLite (%s) started (PID: %d, HTTP: %d, Raft: %d)\n", node.name, cmd.Process.Pid, node.httpPort, node.raftPort)
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startOlric(ctx context.Context) error {
|
||||
pidPath := filepath.Join(pm.pidsDir, "olric.pid")
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", "olric.log")
|
||||
configPath := filepath.Join(pm.debrosDir, "olric-config.yaml")
|
||||
logPath := filepath.Join(pm.oramaDir, "logs", "olric.log")
|
||||
configPath := filepath.Join(pm.oramaDir, "olric-config.yaml")
|
||||
|
||||
cmd := exec.CommandContext(ctx, "olric-server")
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("OLRIC_SERVER_CONFIG=%s", configPath))
|
||||
@ -858,7 +1004,7 @@ func (pm *ProcessManager) startAnon(ctx context.Context) error {
|
||||
}
|
||||
|
||||
pidPath := filepath.Join(pm.pidsDir, "anon.pid")
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", "anon.log")
|
||||
logPath := filepath.Join(pm.oramaDir, "logs", "anon.log")
|
||||
|
||||
cmd := exec.CommandContext(ctx, "npx", "anyone-client")
|
||||
logFile, _ := os.Create(logPath)
|
||||
@ -876,21 +1022,9 @@ func (pm *ProcessManager) startAnon(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startBootstrapNode(ctx context.Context) error {
|
||||
return pm.startNode("bootstrap", "bootstrap.yaml", filepath.Join(pm.debrosDir, "logs", "bootstrap.log"))
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startNode2(ctx context.Context) error {
|
||||
return pm.startNode("node2", "node2.yaml", filepath.Join(pm.debrosDir, "logs", "node2.log"))
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startNode3(ctx context.Context) error {
|
||||
return pm.startNode("node3", "node3.yaml", filepath.Join(pm.debrosDir, "logs", "node3.log"))
|
||||
}
|
||||
|
||||
func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
|
||||
cmd := exec.Command("./bin/node", "--config", configFile)
|
||||
cmd := exec.Command("./bin/orama-node", "--config", configFile)
|
||||
logFile, _ := os.Create(logPath)
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
@ -908,7 +1042,7 @@ func (pm *ProcessManager) startNode(name, configFile, logPath string) error {
|
||||
|
||||
func (pm *ProcessManager) startGateway(ctx context.Context) error {
|
||||
pidPath := filepath.Join(pm.pidsDir, "gateway.pid")
|
||||
logPath := filepath.Join(pm.debrosDir, "logs", "gateway.log")
|
||||
logPath := filepath.Join(pm.oramaDir, "logs", "gateway.log")
|
||||
|
||||
cmd := exec.Command("./bin/gateway", "--config", "gateway.yaml")
|
||||
logFile, _ := os.Create(logPath)
|
||||
@ -925,7 +1059,7 @@ func (pm *ProcessManager) startGateway(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopProcess terminates a managed process
|
||||
// stopProcess terminates a managed process and its children
|
||||
func (pm *ProcessManager) stopProcess(name string) error {
|
||||
pidPath := filepath.Join(pm.pidsDir, fmt.Sprintf("%s.pid", name))
|
||||
pidBytes, err := os.ReadFile(pidPath)
|
||||
@ -933,8 +1067,16 @@ func (pm *ProcessManager) stopProcess(name string) error {
|
||||
return nil // Process not running or PID not found
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(string(pidBytes))
|
||||
pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes)))
|
||||
if err != nil {
|
||||
os.Remove(pidPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if process exists before trying to kill
|
||||
if !checkProcessRunning(pid) {
|
||||
os.Remove(pidPath)
|
||||
fmt.Fprintf(pm.logWriter, "✓ %s (not running)\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -944,10 +1086,43 @@ func (pm *ProcessManager) stopProcess(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try graceful shutdown first (SIGTERM)
|
||||
proc.Signal(os.Interrupt)
|
||||
|
||||
// Wait up to 2 seconds for graceful shutdown
|
||||
gracefulShutdown := false
|
||||
for i := 0; i < 20; i++ {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if !checkProcessRunning(pid) {
|
||||
gracefulShutdown = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Force kill if still running after graceful attempt
|
||||
if !gracefulShutdown && checkProcessRunning(pid) {
|
||||
proc.Signal(os.Kill)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Kill any child processes (platform-specific)
|
||||
if runtime.GOOS != "windows" {
|
||||
exec.Command("pkill", "-9", "-P", fmt.Sprintf("%d", pid)).Run()
|
||||
}
|
||||
|
||||
// Final force kill attempt if somehow still alive
|
||||
if checkProcessRunning(pid) {
|
||||
exec.Command("kill", "-9", fmt.Sprintf("%d", pid)).Run()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
os.Remove(pidPath)
|
||||
|
||||
fmt.Fprintf(pm.logWriter, "✓ %s stopped\n", name)
|
||||
if gracefulShutdown {
|
||||
fmt.Fprintf(pm.logWriter, "✓ %s stopped gracefully\n", name)
|
||||
} else {
|
||||
fmt.Fprintf(pm.logWriter, "✓ %s stopped (forced)\n", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
202
pkg/environments/development/topology.go
Normal file
202
pkg/environments/development/topology.go
Normal file
@ -0,0 +1,202 @@
|
||||
package development
|
||||
|
||||
import "fmt"
|
||||
|
||||
// NodeSpec defines configuration for a single dev environment node
|
||||
type NodeSpec struct {
|
||||
Name string // node-1, node-2, node-3, node-4, node-5
|
||||
ConfigFilename string // node-1.yaml, node-2.yaml, etc.
|
||||
DataDir string // relative path from .orama root
|
||||
P2PPort int // LibP2P listen port
|
||||
IPFSAPIPort int // IPFS API port
|
||||
IPFSSwarmPort int // IPFS Swarm port
|
||||
IPFSGatewayPort int // IPFS HTTP Gateway port
|
||||
RQLiteHTTPPort int // RQLite HTTP API port
|
||||
RQLiteRaftPort int // RQLite Raft consensus port
|
||||
ClusterAPIPort int // IPFS Cluster REST API port
|
||||
ClusterPort int // IPFS Cluster P2P port
|
||||
UnifiedGatewayPort int // Unified gateway port (proxies all services)
|
||||
RQLiteJoinTarget string // which node's RQLite Raft port to join (empty for first node)
|
||||
ClusterJoinTarget string // which node's cluster to join (empty for first node)
|
||||
}
|
||||
|
||||
// Topology defines the complete development environment topology
|
||||
type Topology struct {
|
||||
Nodes []NodeSpec
|
||||
GatewayPort int
|
||||
OlricHTTPPort int
|
||||
OlricMemberPort int
|
||||
AnonSOCKSPort int
|
||||
}
|
||||
|
||||
// DefaultTopology returns the default five-node dev environment topology
|
||||
func DefaultTopology() *Topology {
|
||||
return &Topology{
|
||||
Nodes: []NodeSpec{
|
||||
{
|
||||
Name: "node-1",
|
||||
ConfigFilename: "node-1.yaml",
|
||||
DataDir: "node-1",
|
||||
P2PPort: 4001,
|
||||
IPFSAPIPort: 4501,
|
||||
IPFSSwarmPort: 4101,
|
||||
IPFSGatewayPort: 7501,
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
ClusterAPIPort: 9094,
|
||||
ClusterPort: 9096,
|
||||
UnifiedGatewayPort: 6001,
|
||||
RQLiteJoinTarget: "", // First node - creates cluster
|
||||
ClusterJoinTarget: "",
|
||||
},
|
||||
{
|
||||
Name: "node-2",
|
||||
ConfigFilename: "node-2.yaml",
|
||||
DataDir: "node-2",
|
||||
P2PPort: 4011,
|
||||
IPFSAPIPort: 4511,
|
||||
IPFSSwarmPort: 4111,
|
||||
IPFSGatewayPort: 7511,
|
||||
RQLiteHTTPPort: 5011,
|
||||
RQLiteRaftPort: 7011,
|
||||
ClusterAPIPort: 9104,
|
||||
ClusterPort: 9106,
|
||||
UnifiedGatewayPort: 6002,
|
||||
RQLiteJoinTarget: "localhost:7001",
|
||||
ClusterJoinTarget: "localhost:9096",
|
||||
},
|
||||
{
|
||||
Name: "node-3",
|
||||
ConfigFilename: "node-3.yaml",
|
||||
DataDir: "node-3",
|
||||
P2PPort: 4002,
|
||||
IPFSAPIPort: 4502,
|
||||
IPFSSwarmPort: 4102,
|
||||
IPFSGatewayPort: 7502,
|
||||
RQLiteHTTPPort: 5002,
|
||||
RQLiteRaftPort: 7002,
|
||||
ClusterAPIPort: 9114,
|
||||
ClusterPort: 9116,
|
||||
UnifiedGatewayPort: 6003,
|
||||
RQLiteJoinTarget: "localhost:7001",
|
||||
ClusterJoinTarget: "localhost:9096",
|
||||
},
|
||||
{
|
||||
Name: "node-4",
|
||||
ConfigFilename: "node-4.yaml",
|
||||
DataDir: "node-4",
|
||||
P2PPort: 4003,
|
||||
IPFSAPIPort: 4503,
|
||||
IPFSSwarmPort: 4103,
|
||||
IPFSGatewayPort: 7503,
|
||||
RQLiteHTTPPort: 5003,
|
||||
RQLiteRaftPort: 7003,
|
||||
ClusterAPIPort: 9124,
|
||||
ClusterPort: 9126,
|
||||
UnifiedGatewayPort: 6004,
|
||||
RQLiteJoinTarget: "localhost:7001",
|
||||
ClusterJoinTarget: "localhost:9096",
|
||||
},
|
||||
{
|
||||
Name: "node-5",
|
||||
ConfigFilename: "node-5.yaml",
|
||||
DataDir: "node-5",
|
||||
P2PPort: 4004,
|
||||
IPFSAPIPort: 4504,
|
||||
IPFSSwarmPort: 4104,
|
||||
IPFSGatewayPort: 7504,
|
||||
RQLiteHTTPPort: 5004,
|
||||
RQLiteRaftPort: 7004,
|
||||
ClusterAPIPort: 9134,
|
||||
ClusterPort: 9136,
|
||||
UnifiedGatewayPort: 6005,
|
||||
RQLiteJoinTarget: "localhost:7001",
|
||||
ClusterJoinTarget: "localhost:9096",
|
||||
},
|
||||
},
|
||||
GatewayPort: 6000, // Main gateway on 6000 (nodes use 6001-6005)
|
||||
OlricHTTPPort: 3320,
|
||||
OlricMemberPort: 3322,
|
||||
AnonSOCKSPort: 9050,
|
||||
}
|
||||
}
|
||||
|
||||
// AllPorts returns a slice of all ports used in the topology
|
||||
func (t *Topology) AllPorts() []int {
|
||||
var ports []int
|
||||
|
||||
// Node-specific ports
|
||||
for _, node := range t.Nodes {
|
||||
ports = append(ports,
|
||||
node.P2PPort,
|
||||
node.IPFSAPIPort,
|
||||
node.IPFSSwarmPort,
|
||||
node.IPFSGatewayPort,
|
||||
node.RQLiteHTTPPort,
|
||||
node.RQLiteRaftPort,
|
||||
node.ClusterAPIPort,
|
||||
node.ClusterPort,
|
||||
node.UnifiedGatewayPort,
|
||||
)
|
||||
}
|
||||
|
||||
// Shared service ports
|
||||
ports = append(ports,
|
||||
t.GatewayPort,
|
||||
t.OlricHTTPPort,
|
||||
t.OlricMemberPort,
|
||||
t.AnonSOCKSPort,
|
||||
)
|
||||
|
||||
return ports
|
||||
}
|
||||
|
||||
// PortMap returns a human-readable mapping of ports to services
|
||||
func (t *Topology) PortMap() map[int]string {
|
||||
portMap := make(map[int]string)
|
||||
|
||||
for _, node := range t.Nodes {
|
||||
portMap[node.P2PPort] = fmt.Sprintf("%s P2P", node.Name)
|
||||
portMap[node.IPFSAPIPort] = fmt.Sprintf("%s IPFS API", node.Name)
|
||||
portMap[node.IPFSSwarmPort] = fmt.Sprintf("%s IPFS Swarm", node.Name)
|
||||
portMap[node.IPFSGatewayPort] = fmt.Sprintf("%s IPFS Gateway", node.Name)
|
||||
portMap[node.RQLiteHTTPPort] = fmt.Sprintf("%s RQLite HTTP", node.Name)
|
||||
portMap[node.RQLiteRaftPort] = fmt.Sprintf("%s RQLite Raft", node.Name)
|
||||
portMap[node.ClusterAPIPort] = fmt.Sprintf("%s IPFS Cluster API", node.Name)
|
||||
portMap[node.ClusterPort] = fmt.Sprintf("%s IPFS Cluster P2P", node.Name)
|
||||
portMap[node.UnifiedGatewayPort] = fmt.Sprintf("%s Unified Gateway", node.Name)
|
||||
}
|
||||
|
||||
portMap[t.GatewayPort] = "Gateway"
|
||||
portMap[t.OlricHTTPPort] = "Olric HTTP API"
|
||||
portMap[t.OlricMemberPort] = "Olric Memberlist"
|
||||
portMap[t.AnonSOCKSPort] = "Anon SOCKS Proxy"
|
||||
|
||||
return portMap
|
||||
}
|
||||
|
||||
// GetFirstNode returns the first node (the one that creates the cluster)
|
||||
func (t *Topology) GetFirstNode() *NodeSpec {
|
||||
if len(t.Nodes) > 0 {
|
||||
return &t.Nodes[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetJoiningNodes returns all nodes except the first one (they join the cluster)
|
||||
func (t *Topology) GetJoiningNodes() []NodeSpec {
|
||||
if len(t.Nodes) > 1 {
|
||||
return t.Nodes[1:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodeByName returns a node by its name, or nil if not found
|
||||
func (t *Topology) GetNodeByName(name string) *NodeSpec {
|
||||
for i, node := range t.Nodes {
|
||||
if node.Name == name {
|
||||
return &t.Nodes[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -2,10 +2,14 @@ package production
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// OSInfo contains detected operating system information
|
||||
@ -213,3 +217,116 @@ func (etc *ExternalToolChecker) CheckGoAvailable() bool {
|
||||
_, err := exec.LookPath("go")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// ResourceChecker validates system resources for production deployment
|
||||
type ResourceChecker struct{}
|
||||
|
||||
// NewResourceChecker creates a new resource checker
|
||||
func NewResourceChecker() *ResourceChecker {
|
||||
return &ResourceChecker{}
|
||||
}
|
||||
|
||||
// CheckDiskSpace validates sufficient disk space (minimum 10GB free)
|
||||
func (rc *ResourceChecker) CheckDiskSpace(path string) error {
|
||||
checkPath := path
|
||||
|
||||
// If the path doesn't exist, check the parent directory instead
|
||||
for checkPath != "/" {
|
||||
if _, err := os.Stat(checkPath); err == nil {
|
||||
break
|
||||
}
|
||||
checkPath = filepath.Dir(checkPath)
|
||||
}
|
||||
|
||||
var stat syscall.Statfs_t
|
||||
if err := syscall.Statfs(checkPath, &stat); err != nil {
|
||||
return fmt.Errorf("failed to check disk space: %w", err)
|
||||
}
|
||||
|
||||
// Available space in bytes
|
||||
availableBytes := stat.Bavail * uint64(stat.Bsize)
|
||||
minRequiredBytes := uint64(10 * 1024 * 1024 * 1024) // 10GB
|
||||
|
||||
if availableBytes < minRequiredBytes {
|
||||
availableGB := float64(availableBytes) / (1024 * 1024 * 1024)
|
||||
return fmt.Errorf("insufficient disk space: %.1fGB available, minimum 10GB required", availableGB)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckRAM validates sufficient RAM (minimum 2GB total)
|
||||
func (rc *ResourceChecker) CheckRAM() error {
|
||||
data, err := os.ReadFile("/proc/meminfo")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read memory info: %w", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(data), "\n")
|
||||
totalKB := uint64(0)
|
||||
|
||||
for _, line := range lines {
|
||||
if strings.HasPrefix(line, "MemTotal:") {
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 2 {
|
||||
if kb, err := strconv.ParseUint(parts[1], 10, 64); err == nil {
|
||||
totalKB = kb
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if totalKB == 0 {
|
||||
return fmt.Errorf("could not determine total RAM")
|
||||
}
|
||||
|
||||
minRequiredKB := uint64(2 * 1024 * 1024) // 2GB in KB
|
||||
if totalKB < minRequiredKB {
|
||||
totalGB := float64(totalKB) / (1024 * 1024)
|
||||
return fmt.Errorf("insufficient RAM: %.1fGB total, minimum 2GB required", totalGB)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckCPU validates sufficient CPU cores (minimum 2 cores)
|
||||
func (rc *ResourceChecker) CheckCPU() error {
|
||||
cores := runtime.NumCPU()
|
||||
if cores < 2 {
|
||||
return fmt.Errorf("insufficient CPU cores: %d available, minimum 2 required", cores)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PortChecker checks if ports are available or in use
|
||||
type PortChecker struct{}
|
||||
|
||||
// NewPortChecker creates a new port checker
|
||||
func NewPortChecker() *PortChecker {
|
||||
return &PortChecker{}
|
||||
}
|
||||
|
||||
// IsPortInUse checks if a specific port is already in use
|
||||
func (pc *PortChecker) IsPortInUse(port int) bool {
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
// Port is not in use
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
// Port is in use
|
||||
return true
|
||||
}
|
||||
|
||||
// IsPortInUseOnHost checks if a port is in use on a specific host
|
||||
func (pc *PortChecker) IsPortInUseOnHost(host string, port int) bool {
|
||||
addr := net.JoinHostPort(host, fmt.Sprintf("%d", port))
|
||||
conn, err := net.Dial("tcp", addr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
@ -4,80 +4,214 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/environments/templates"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// ConfigGenerator manages generation of node, gateway, and service configs
|
||||
type ConfigGenerator struct {
|
||||
debrosDir string
|
||||
oramaDir string
|
||||
}
|
||||
|
||||
// NewConfigGenerator creates a new config generator
|
||||
func NewConfigGenerator(debrosDir string) *ConfigGenerator {
|
||||
func NewConfigGenerator(oramaDir string) *ConfigGenerator {
|
||||
return &ConfigGenerator{
|
||||
debrosDir: debrosDir,
|
||||
oramaDir: oramaDir,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateNodeConfig generates node.yaml configuration
|
||||
func (cg *ConfigGenerator) GenerateNodeConfig(isBootstrap bool, bootstrapPeers []string, vpsIP string) (string, error) {
|
||||
var nodeID string
|
||||
if isBootstrap {
|
||||
nodeID = "bootstrap"
|
||||
} else {
|
||||
nodeID = "node"
|
||||
// extractIPFromMultiaddr extracts the IP address from a peer multiaddr
|
||||
// Supports IP4, IP6, DNS4, DNS6, and DNSADDR protocols
|
||||
// Returns the IP address as a string, or empty string if extraction/resolution fails
|
||||
func extractIPFromMultiaddr(multiaddrStr string) string {
|
||||
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if isBootstrap {
|
||||
data := templates.BootstrapConfigData{
|
||||
NodeID: nodeID,
|
||||
P2PPort: 4001,
|
||||
DataDir: filepath.Join(cg.debrosDir, "data", "bootstrap"),
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
// First, try to extract direct IP address
|
||||
var ip net.IP
|
||||
var dnsName string
|
||||
multiaddr.ForEach(ma, func(c multiaddr.Component) bool {
|
||||
switch c.Protocol().Code {
|
||||
case multiaddr.P_IP4, multiaddr.P_IP6:
|
||||
ip = net.ParseIP(c.Value())
|
||||
return false // Stop iteration - found IP
|
||||
case multiaddr.P_DNS4, multiaddr.P_DNS6, multiaddr.P_DNSADDR:
|
||||
dnsName = c.Value()
|
||||
// Continue to check for IP, but remember DNS name as fallback
|
||||
}
|
||||
return templates.RenderBootstrapConfig(data)
|
||||
return true
|
||||
})
|
||||
|
||||
// If we found a direct IP, return it
|
||||
if ip != nil {
|
||||
return ip.String()
|
||||
}
|
||||
|
||||
// Regular node
|
||||
rqliteJoinAddr := "localhost:7001"
|
||||
// If we found a DNS name, try to resolve it
|
||||
if dnsName != "" {
|
||||
if resolvedIPs, err := net.LookupIP(dnsName); err == nil && len(resolvedIPs) > 0 {
|
||||
// Prefer IPv4 addresses, but accept IPv6 if that's all we have
|
||||
for _, resolvedIP := range resolvedIPs {
|
||||
if resolvedIP.To4() != nil {
|
||||
return resolvedIP.String()
|
||||
}
|
||||
}
|
||||
// Return first IPv6 address if no IPv4 found
|
||||
return resolvedIPs[0].String()
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// inferPeerIP extracts the IP address from peer multiaddrs
|
||||
// Iterates through all peers to find a valid IP (supports DNS resolution)
|
||||
// Falls back to vpsIP if provided, otherwise returns empty string
|
||||
func inferPeerIP(peers []string, vpsIP string) string {
|
||||
// Try to extract IP from each peer (in order)
|
||||
for _, peer := range peers {
|
||||
if ip := extractIPFromMultiaddr(peer); ip != "" {
|
||||
return ip
|
||||
}
|
||||
}
|
||||
// Fall back to vpsIP if provided
|
||||
if vpsIP != "" {
|
||||
rqliteJoinAddr = vpsIP + ":7001"
|
||||
return vpsIP
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GenerateNodeConfig generates node.yaml configuration (unified architecture)
|
||||
func (cg *ConfigGenerator) GenerateNodeConfig(peerAddresses []string, vpsIP string, joinAddress string, domain string, enableHTTPS bool) (string, error) {
|
||||
// Generate node ID from domain or use default
|
||||
nodeID := "node"
|
||||
if domain != "" {
|
||||
// Extract node identifier from domain (e.g., "node-123" from "node-123.orama.network")
|
||||
parts := strings.Split(domain, ".")
|
||||
if len(parts) > 0 {
|
||||
nodeID = parts[0]
|
||||
}
|
||||
}
|
||||
|
||||
// Determine advertise addresses - use vpsIP if provided
|
||||
// When HTTPS is enabled, RQLite uses native TLS on port 7002 (not SNI gateway)
|
||||
// This avoids conflicts between SNI gateway TLS termination and RQLite's native TLS
|
||||
var httpAdvAddr, raftAdvAddr string
|
||||
if vpsIP != "" {
|
||||
httpAdvAddr = net.JoinHostPort(vpsIP, "5001")
|
||||
if enableHTTPS {
|
||||
// Use direct IP:7002 for Raft - RQLite handles TLS natively via -node-cert
|
||||
// This bypasses the SNI gateway which would cause TLS termination conflicts
|
||||
raftAdvAddr = net.JoinHostPort(vpsIP, "7002")
|
||||
} else {
|
||||
raftAdvAddr = net.JoinHostPort(vpsIP, "7001")
|
||||
}
|
||||
} else {
|
||||
// Fallback to localhost if no vpsIP
|
||||
httpAdvAddr = "localhost:5001"
|
||||
raftAdvAddr = "localhost:7001"
|
||||
}
|
||||
|
||||
// Determine RQLite join address
|
||||
// When HTTPS is enabled, use port 7002 (direct RQLite TLS) instead of 7001 (SNI gateway)
|
||||
joinPort := "7001"
|
||||
if enableHTTPS {
|
||||
joinPort = "7002"
|
||||
}
|
||||
|
||||
var rqliteJoinAddr string
|
||||
if joinAddress != "" {
|
||||
// Use explicitly provided join address
|
||||
// If it contains :7001 and HTTPS is enabled, update to :7002
|
||||
if enableHTTPS && strings.Contains(joinAddress, ":7001") {
|
||||
rqliteJoinAddr = strings.Replace(joinAddress, ":7001", ":7002", 1)
|
||||
} else {
|
||||
rqliteJoinAddr = joinAddress
|
||||
}
|
||||
} else if len(peerAddresses) > 0 {
|
||||
// Infer join address from peers
|
||||
peerIP := inferPeerIP(peerAddresses, "")
|
||||
if peerIP != "" {
|
||||
rqliteJoinAddr = net.JoinHostPort(peerIP, joinPort)
|
||||
// Validate that join address doesn't match this node's own raft address (would cause self-join)
|
||||
if rqliteJoinAddr == raftAdvAddr {
|
||||
rqliteJoinAddr = "" // Clear it - this is the first node
|
||||
}
|
||||
}
|
||||
}
|
||||
// If no join address and no peers, this is the first node - it will create the cluster
|
||||
|
||||
// TLS/ACME configuration
|
||||
tlsCacheDir := ""
|
||||
httpPort := 80
|
||||
httpsPort := 443
|
||||
if enableHTTPS {
|
||||
tlsCacheDir = filepath.Join(cg.oramaDir, "tls-cache")
|
||||
}
|
||||
|
||||
// Unified data directory (all nodes equal)
|
||||
// When HTTPS/SNI is enabled, use internal port 7002 for RQLite Raft (SNI gateway listens on 7001)
|
||||
raftInternalPort := 7001
|
||||
if enableHTTPS {
|
||||
raftInternalPort = 7002 // Internal port when SNI is enabled
|
||||
}
|
||||
|
||||
data := templates.NodeConfigData{
|
||||
NodeID: nodeID,
|
||||
P2PPort: 4001,
|
||||
DataDir: filepath.Join(cg.debrosDir, "data", "node"),
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
RQLiteJoinAddress: rqliteJoinAddr,
|
||||
BootstrapPeers: bootstrapPeers,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
NodeID: nodeID,
|
||||
P2PPort: 4001,
|
||||
DataDir: filepath.Join(cg.oramaDir, "data"),
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001, // External SNI port
|
||||
RQLiteRaftInternalPort: raftInternalPort, // Internal RQLite binding port
|
||||
RQLiteJoinAddress: rqliteJoinAddr,
|
||||
BootstrapPeers: peerAddresses,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
HTTPAdvAddress: httpAdvAddr,
|
||||
RaftAdvAddress: raftAdvAddr,
|
||||
UnifiedGatewayPort: 6001,
|
||||
Domain: domain,
|
||||
EnableHTTPS: enableHTTPS,
|
||||
TLSCacheDir: tlsCacheDir,
|
||||
HTTPPort: httpPort,
|
||||
HTTPSPort: httpsPort,
|
||||
}
|
||||
|
||||
// When HTTPS is enabled, configure RQLite node-to-node TLS encryption
|
||||
// RQLite handles TLS natively on port 7002, bypassing the SNI gateway
|
||||
// This avoids TLS termination conflicts between SNI gateway and RQLite
|
||||
if enableHTTPS && domain != "" {
|
||||
data.NodeCert = filepath.Join(tlsCacheDir, domain+".crt")
|
||||
data.NodeKey = filepath.Join(tlsCacheDir, domain+".key")
|
||||
// Skip verification since nodes may have different domain certificates
|
||||
data.NodeNoVerify = true
|
||||
}
|
||||
|
||||
return templates.RenderNodeConfig(data)
|
||||
}
|
||||
|
||||
// GenerateGatewayConfig generates gateway.yaml configuration
|
||||
func (cg *ConfigGenerator) GenerateGatewayConfig(bootstrapPeers []string, enableHTTPS bool, domain string, olricServers []string) (string, error) {
|
||||
func (cg *ConfigGenerator) GenerateGatewayConfig(peerAddresses []string, enableHTTPS bool, domain string, olricServers []string) (string, error) {
|
||||
tlsCacheDir := ""
|
||||
if enableHTTPS {
|
||||
tlsCacheDir = filepath.Join(cg.debrosDir, "tls-cache")
|
||||
tlsCacheDir = filepath.Join(cg.oramaDir, "tls-cache")
|
||||
}
|
||||
|
||||
data := templates.GatewayConfigData{
|
||||
ListenPort: 6001,
|
||||
BootstrapPeers: bootstrapPeers,
|
||||
BootstrapPeers: peerAddresses,
|
||||
OlricServers: olricServers,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 4501,
|
||||
@ -90,41 +224,65 @@ func (cg *ConfigGenerator) GenerateGatewayConfig(bootstrapPeers []string, enable
|
||||
}
|
||||
|
||||
// GenerateOlricConfig generates Olric configuration
|
||||
func (cg *ConfigGenerator) GenerateOlricConfig(bindAddr string, httpPort, memberlistPort int) (string, error) {
|
||||
func (cg *ConfigGenerator) GenerateOlricConfig(serverBindAddr string, httpPort int, memberlistBindAddr string, memberlistPort int, memberlistEnv string) (string, error) {
|
||||
data := templates.OlricConfigData{
|
||||
BindAddr: bindAddr,
|
||||
HTTPPort: httpPort,
|
||||
MemberlistPort: memberlistPort,
|
||||
ServerBindAddr: serverBindAddr,
|
||||
HTTPPort: httpPort,
|
||||
MemberlistBindAddr: memberlistBindAddr,
|
||||
MemberlistPort: memberlistPort,
|
||||
MemberlistEnvironment: memberlistEnv,
|
||||
}
|
||||
return templates.RenderOlricConfig(data)
|
||||
}
|
||||
|
||||
// SecretGenerator manages generation of shared secrets and keys
|
||||
type SecretGenerator struct {
|
||||
debrosDir string
|
||||
oramaDir string
|
||||
}
|
||||
|
||||
// NewSecretGenerator creates a new secret generator
|
||||
func NewSecretGenerator(debrosDir string) *SecretGenerator {
|
||||
func NewSecretGenerator(oramaDir string) *SecretGenerator {
|
||||
return &SecretGenerator{
|
||||
debrosDir: debrosDir,
|
||||
oramaDir: oramaDir,
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateClusterSecret ensures a cluster secret is 32 bytes of hex
|
||||
func ValidateClusterSecret(secret string) error {
|
||||
secret = strings.TrimSpace(secret)
|
||||
if secret == "" {
|
||||
return fmt.Errorf("cluster secret cannot be empty")
|
||||
}
|
||||
if len(secret) != 64 {
|
||||
return fmt.Errorf("cluster secret must be 64 hex characters (32 bytes)")
|
||||
}
|
||||
if _, err := hex.DecodeString(secret); err != nil {
|
||||
return fmt.Errorf("cluster secret must be valid hex: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureClusterSecret gets or generates the IPFS Cluster secret
|
||||
func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
|
||||
secretPath := filepath.Join(sg.debrosDir, "secrets", "cluster-secret")
|
||||
secretPath := filepath.Join(sg.oramaDir, "secrets", "cluster-secret")
|
||||
secretDir := filepath.Dir(secretPath)
|
||||
|
||||
// Ensure secrets directory exists
|
||||
if err := os.MkdirAll(secretDir, 0755); err != nil {
|
||||
// Ensure secrets directory exists with restricted permissions (0700)
|
||||
if err := os.MkdirAll(secretDir, 0700); err != nil {
|
||||
return "", fmt.Errorf("failed to create secrets directory: %w", err)
|
||||
}
|
||||
// Ensure directory permissions are correct even if it already existed
|
||||
if err := os.Chmod(secretDir, 0700); err != nil {
|
||||
return "", fmt.Errorf("failed to set secrets directory permissions: %w", err)
|
||||
}
|
||||
|
||||
// Try to read existing secret
|
||||
if data, err := os.ReadFile(secretPath); err == nil {
|
||||
secret := strings.TrimSpace(string(data))
|
||||
if len(secret) == 64 {
|
||||
if err := ensureSecretFilePermissions(secretPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return secret, nil
|
||||
}
|
||||
}
|
||||
@ -140,19 +298,48 @@ func (sg *SecretGenerator) EnsureClusterSecret() (string, error) {
|
||||
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
|
||||
return "", fmt.Errorf("failed to save cluster secret: %w", err)
|
||||
}
|
||||
if err := ensureSecretFilePermissions(secretPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
func ensureSecretFilePermissions(secretPath string) error {
|
||||
if err := os.Chmod(secretPath, 0600); err != nil {
|
||||
return fmt.Errorf("failed to set permissions on %s: %w", secretPath, err)
|
||||
}
|
||||
|
||||
if usr, err := user.Lookup("debros"); err == nil {
|
||||
uid, err := strconv.Atoi(usr.Uid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse debros UID: %w", err)
|
||||
}
|
||||
gid, err := strconv.Atoi(usr.Gid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse debros GID: %w", err)
|
||||
}
|
||||
if err := os.Chown(secretPath, uid, gid); err != nil {
|
||||
return fmt.Errorf("failed to change ownership of %s: %w", secretPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureSwarmKey gets or generates the IPFS private swarm key
|
||||
func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
|
||||
swarmKeyPath := filepath.Join(sg.debrosDir, "secrets", "swarm.key")
|
||||
swarmKeyPath := filepath.Join(sg.oramaDir, "secrets", "swarm.key")
|
||||
secretDir := filepath.Dir(swarmKeyPath)
|
||||
|
||||
// Ensure secrets directory exists
|
||||
if err := os.MkdirAll(secretDir, 0755); err != nil {
|
||||
// Ensure secrets directory exists with restricted permissions (0700)
|
||||
if err := os.MkdirAll(secretDir, 0700); err != nil {
|
||||
return nil, fmt.Errorf("failed to create secrets directory: %w", err)
|
||||
}
|
||||
// Ensure directory permissions are correct even if it already existed
|
||||
if err := os.Chmod(secretDir, 0700); err != nil {
|
||||
return nil, fmt.Errorf("failed to set secrets directory permissions: %w", err)
|
||||
}
|
||||
|
||||
// Try to read existing key
|
||||
if data, err := os.ReadFile(swarmKeyPath); err == nil {
|
||||
@ -178,9 +365,10 @@ func (sg *SecretGenerator) EnsureSwarmKey() ([]byte, error) {
|
||||
return []byte(content), nil
|
||||
}
|
||||
|
||||
// EnsureNodeIdentity gets or generates the node's LibP2P identity
|
||||
func (sg *SecretGenerator) EnsureNodeIdentity(nodeType string) (peer.ID, error) {
|
||||
keyDir := filepath.Join(sg.debrosDir, "data", nodeType)
|
||||
// EnsureNodeIdentity gets or generates the node's LibP2P identity (unified - no bootstrap/node distinction)
|
||||
func (sg *SecretGenerator) EnsureNodeIdentity() (peer.ID, error) {
|
||||
// Unified data directory (no bootstrap/node distinction)
|
||||
keyDir := filepath.Join(sg.oramaDir, "data")
|
||||
keyPath := filepath.Join(keyDir, "identity.key")
|
||||
|
||||
// Ensure data directory exists
|
||||
@ -221,9 +409,16 @@ func (sg *SecretGenerator) EnsureNodeIdentity(nodeType string) (peer.ID, error)
|
||||
|
||||
// SaveConfig writes a configuration file to disk
|
||||
func (sg *SecretGenerator) SaveConfig(filename string, content string) error {
|
||||
configDir := filepath.Join(sg.debrosDir, "configs")
|
||||
var configDir string
|
||||
// gateway.yaml goes to data/ directory, other configs go to configs/
|
||||
if filename == "gateway.yaml" {
|
||||
configDir = filepath.Join(sg.oramaDir, "data")
|
||||
} else {
|
||||
configDir = filepath.Join(sg.oramaDir, "configs")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create configs directory: %w", err)
|
||||
return fmt.Errorf("failed to create config directory: %w", err)
|
||||
}
|
||||
|
||||
configPath := filepath.Join(configDir, filename)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -5,55 +5,98 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ProductionSetup orchestrates the entire production deployment
|
||||
type ProductionSetup struct {
|
||||
osInfo *OSInfo
|
||||
arch string
|
||||
debrosHome string
|
||||
debrosDir string
|
||||
logWriter io.Writer
|
||||
forceReconfigure bool
|
||||
skipOptionalDeps bool
|
||||
privChecker *PrivilegeChecker
|
||||
osDetector *OSDetector
|
||||
archDetector *ArchitectureDetector
|
||||
fsProvisioner *FilesystemProvisioner
|
||||
userProvisioner *UserProvisioner
|
||||
stateDetector *StateDetector
|
||||
configGenerator *ConfigGenerator
|
||||
secretGenerator *SecretGenerator
|
||||
serviceGenerator *SystemdServiceGenerator
|
||||
serviceController *SystemdController
|
||||
binaryInstaller *BinaryInstaller
|
||||
branch string
|
||||
osInfo *OSInfo
|
||||
arch string
|
||||
oramaHome string
|
||||
oramaDir string
|
||||
logWriter io.Writer
|
||||
forceReconfigure bool
|
||||
skipOptionalDeps bool
|
||||
skipResourceChecks bool
|
||||
privChecker *PrivilegeChecker
|
||||
osDetector *OSDetector
|
||||
archDetector *ArchitectureDetector
|
||||
resourceChecker *ResourceChecker
|
||||
portChecker *PortChecker
|
||||
fsProvisioner *FilesystemProvisioner
|
||||
userProvisioner *UserProvisioner
|
||||
stateDetector *StateDetector
|
||||
configGenerator *ConfigGenerator
|
||||
secretGenerator *SecretGenerator
|
||||
serviceGenerator *SystemdServiceGenerator
|
||||
serviceController *SystemdController
|
||||
binaryInstaller *BinaryInstaller
|
||||
branch string
|
||||
skipRepoUpdate bool
|
||||
NodePeerID string // Captured during Phase3 for later display
|
||||
}
|
||||
|
||||
// ReadBranchPreference reads the stored branch preference from disk
|
||||
func ReadBranchPreference(oramaDir string) string {
|
||||
branchFile := filepath.Join(oramaDir, ".branch")
|
||||
data, err := os.ReadFile(branchFile)
|
||||
if err != nil {
|
||||
return "main" // Default to main if file doesn't exist
|
||||
}
|
||||
branch := strings.TrimSpace(string(data))
|
||||
if branch == "" {
|
||||
return "main"
|
||||
}
|
||||
return branch
|
||||
}
|
||||
|
||||
// SaveBranchPreference saves the branch preference to disk
|
||||
func SaveBranchPreference(oramaDir, branch string) error {
|
||||
branchFile := filepath.Join(oramaDir, ".branch")
|
||||
if err := os.MkdirAll(oramaDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create debros directory: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(branchFile, []byte(branch), 0644); err != nil {
|
||||
return fmt.Errorf("failed to save branch preference: %w", err)
|
||||
}
|
||||
exec.Command("chown", "debros:debros", branchFile).Run()
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewProductionSetup creates a new production setup orchestrator
|
||||
func NewProductionSetup(debrosHome string, logWriter io.Writer, forceReconfigure bool) *ProductionSetup {
|
||||
debrosDir := debrosHome + "/.debros"
|
||||
func NewProductionSetup(oramaHome string, logWriter io.Writer, forceReconfigure bool, branch string, skipRepoUpdate bool, skipResourceChecks bool) *ProductionSetup {
|
||||
oramaDir := filepath.Join(oramaHome, ".orama")
|
||||
arch, _ := (&ArchitectureDetector{}).Detect()
|
||||
|
||||
// If branch is empty, try to read from stored preference, otherwise default to main
|
||||
if branch == "" {
|
||||
branch = ReadBranchPreference(oramaDir)
|
||||
}
|
||||
|
||||
return &ProductionSetup{
|
||||
debrosHome: debrosHome,
|
||||
debrosDir: debrosDir,
|
||||
logWriter: logWriter,
|
||||
forceReconfigure: forceReconfigure,
|
||||
arch: arch,
|
||||
branch: "main",
|
||||
privChecker: &PrivilegeChecker{},
|
||||
osDetector: &OSDetector{},
|
||||
archDetector: &ArchitectureDetector{},
|
||||
fsProvisioner: NewFilesystemProvisioner(debrosHome),
|
||||
userProvisioner: NewUserProvisioner("debros", debrosHome, "/bin/bash"),
|
||||
stateDetector: NewStateDetector(debrosDir),
|
||||
configGenerator: NewConfigGenerator(debrosDir),
|
||||
secretGenerator: NewSecretGenerator(debrosDir),
|
||||
serviceGenerator: NewSystemdServiceGenerator(debrosHome, debrosDir),
|
||||
serviceController: NewSystemdController(),
|
||||
binaryInstaller: NewBinaryInstaller(arch, logWriter),
|
||||
oramaHome: oramaHome,
|
||||
oramaDir: oramaDir,
|
||||
logWriter: logWriter,
|
||||
forceReconfigure: forceReconfigure,
|
||||
arch: arch,
|
||||
branch: branch,
|
||||
skipRepoUpdate: skipRepoUpdate,
|
||||
skipResourceChecks: skipResourceChecks,
|
||||
privChecker: &PrivilegeChecker{},
|
||||
osDetector: &OSDetector{},
|
||||
archDetector: &ArchitectureDetector{},
|
||||
resourceChecker: NewResourceChecker(),
|
||||
portChecker: NewPortChecker(),
|
||||
fsProvisioner: NewFilesystemProvisioner(oramaHome),
|
||||
userProvisioner: NewUserProvisioner("debros", oramaHome, "/bin/bash"),
|
||||
stateDetector: NewStateDetector(oramaDir),
|
||||
configGenerator: NewConfigGenerator(oramaDir),
|
||||
secretGenerator: NewSecretGenerator(oramaDir),
|
||||
serviceGenerator: NewSystemdServiceGenerator(oramaHome, oramaDir),
|
||||
serviceController: NewSystemdController(),
|
||||
binaryInstaller: NewBinaryInstaller(arch, logWriter),
|
||||
}
|
||||
}
|
||||
|
||||
@ -64,6 +107,11 @@ func (ps *ProductionSetup) logf(format string, args ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// IsUpdate detects if this is an update to an existing installation
|
||||
func (ps *ProductionSetup) IsUpdate() bool {
|
||||
return ps.stateDetector.IsConfigured() || ps.stateDetector.HasIPFSData()
|
||||
}
|
||||
|
||||
// Phase1CheckPrerequisites performs initial environment validation
|
||||
func (ps *ProductionSetup) Phase1CheckPrerequisites() error {
|
||||
ps.logf("Phase 1: Checking prerequisites...")
|
||||
@ -113,6 +161,29 @@ func (ps *ProductionSetup) Phase1CheckPrerequisites() error {
|
||||
}
|
||||
ps.logf(" ✓ Basic dependencies available")
|
||||
|
||||
// Check system resources
|
||||
if ps.skipResourceChecks {
|
||||
ps.logf(" ⚠️ Skipping system resource checks (disk, RAM, CPU) due to --ignore-resource-checks flag")
|
||||
} else {
|
||||
if err := ps.resourceChecker.CheckDiskSpace(ps.oramaHome); err != nil {
|
||||
ps.logf(" ❌ %v", err)
|
||||
return err
|
||||
}
|
||||
ps.logf(" ✓ Sufficient disk space available")
|
||||
|
||||
if err := ps.resourceChecker.CheckRAM(); err != nil {
|
||||
ps.logf(" ❌ %v", err)
|
||||
return err
|
||||
}
|
||||
ps.logf(" ✓ Sufficient RAM available")
|
||||
|
||||
if err := ps.resourceChecker.CheckCPU(); err != nil {
|
||||
ps.logf(" ❌ %v", err)
|
||||
return err
|
||||
}
|
||||
ps.logf(" ✓ Sufficient CPU cores available")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -140,7 +211,7 @@ func (ps *ProductionSetup) Phase2ProvisionEnvironment() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Create directory structure
|
||||
// Create directory structure (unified structure)
|
||||
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
|
||||
return fmt.Errorf("failed to create directory structure: %w", err)
|
||||
}
|
||||
@ -186,8 +257,13 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
|
||||
ps.logf(" ⚠️ Olric install warning: %v", err)
|
||||
}
|
||||
|
||||
// Install anyone-client for SOCKS5 proxy
|
||||
if err := ps.binaryInstaller.InstallAnyoneClient(); err != nil {
|
||||
ps.logf(" ⚠️ anyone-client install warning: %v", err)
|
||||
}
|
||||
|
||||
// Install DeBros binaries
|
||||
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.debrosHome); err != nil {
|
||||
if err := ps.binaryInstaller.InstallDeBrosBinaries(ps.branch, ps.oramaHome, ps.skipRepoUpdate); err != nil {
|
||||
return fmt.Errorf("failed to install DeBros binaries: %w", err)
|
||||
}
|
||||
|
||||
@ -196,40 +272,80 @@ func (ps *ProductionSetup) Phase2bInstallBinaries() error {
|
||||
}
|
||||
|
||||
// Phase2cInitializeServices initializes service repositories and configurations
|
||||
func (ps *ProductionSetup) Phase2cInitializeServices(nodeType string) error {
|
||||
// ipfsPeer can be nil for the first node, or contain peer info for joining nodes
|
||||
// ipfsClusterPeer can be nil for the first node, or contain IPFS Cluster peer info for joining nodes
|
||||
func (ps *ProductionSetup) Phase2cInitializeServices(peerAddresses []string, vpsIP string, ipfsPeer *IPFSPeerInfo, ipfsClusterPeer *IPFSClusterPeerInfo) error {
|
||||
ps.logf("Phase 2c: Initializing services...")
|
||||
|
||||
// Get cluster secret for IPFS
|
||||
clusterSecret, err := os.ReadFile(ps.debrosDir + "/secrets/cluster-secret")
|
||||
// Ensure directories exist (unified structure)
|
||||
if err := ps.fsProvisioner.EnsureDirectoryStructure(); err != nil {
|
||||
return fmt.Errorf("failed to create directories: %w", err)
|
||||
}
|
||||
|
||||
// Build paths - unified data directory (all nodes equal)
|
||||
dataDir := filepath.Join(ps.oramaDir, "data")
|
||||
|
||||
// Initialize IPFS repo with correct path structure
|
||||
// Use port 4501 for API (to avoid conflict with RQLite on 5001), 8080 for gateway (standard), 4101 for swarm (to avoid conflict with LibP2P on 4001)
|
||||
ipfsRepoPath := filepath.Join(dataDir, "ipfs", "repo")
|
||||
if err := ps.binaryInstaller.InitializeIPFSRepo(ipfsRepoPath, filepath.Join(ps.oramaDir, "secrets", "swarm.key"), 4501, 8080, 4101, ipfsPeer); err != nil {
|
||||
return fmt.Errorf("failed to initialize IPFS repo: %w", err)
|
||||
}
|
||||
|
||||
// Initialize IPFS Cluster config (runs ipfs-cluster-service init)
|
||||
clusterPath := filepath.Join(dataDir, "ipfs-cluster")
|
||||
clusterSecret, err := ps.secretGenerator.EnsureClusterSecret()
|
||||
if err != nil {
|
||||
clusterSecret = []byte("")
|
||||
return fmt.Errorf("failed to get cluster secret: %w", err)
|
||||
}
|
||||
|
||||
// Initialize IPFS repo
|
||||
ipfsRepoPath := ps.debrosDir + "/data/ipfs"
|
||||
if err := ps.binaryInstaller.InitializeIPFSRepo(nodeType, ipfsRepoPath, ps.debrosDir+"/secrets/swarm.key"); err != nil {
|
||||
ps.logf(" ⚠️ IPFS initialization warning: %v", err)
|
||||
// Get cluster peer addresses from IPFS Cluster peer info if available
|
||||
var clusterPeers []string
|
||||
if ipfsClusterPeer != nil && ipfsClusterPeer.PeerID != "" {
|
||||
// Construct cluster peer multiaddress using the discovered peer ID
|
||||
// Format: /ip4/<ip>/tcp/9098/p2p/<cluster-peer-id>
|
||||
peerIP := inferPeerIP(peerAddresses, vpsIP)
|
||||
if peerIP != "" {
|
||||
// Construct the bootstrap multiaddress for IPFS Cluster
|
||||
// Note: IPFS Cluster listens on port 9098 for cluster communication
|
||||
clusterBootstrapAddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", peerIP, ipfsClusterPeer.PeerID)
|
||||
clusterPeers = []string{clusterBootstrapAddr}
|
||||
ps.logf(" ℹ️ IPFS Cluster will connect to peer: %s", clusterBootstrapAddr)
|
||||
} else if len(ipfsClusterPeer.Addrs) > 0 {
|
||||
// Fallback: use the addresses from discovery (if they include peer ID)
|
||||
for _, addr := range ipfsClusterPeer.Addrs {
|
||||
if strings.Contains(addr, ipfsClusterPeer.PeerID) {
|
||||
clusterPeers = append(clusterPeers, addr)
|
||||
}
|
||||
}
|
||||
if len(clusterPeers) > 0 {
|
||||
ps.logf(" ℹ️ IPFS Cluster will connect to discovered peers: %v", clusterPeers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize IPFS Cluster config
|
||||
clusterPath := ps.debrosDir + "/data/ipfs-cluster"
|
||||
ipfsAPIPort := 4501
|
||||
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(nodeType, clusterPath, string(clusterSecret), ipfsAPIPort); err != nil {
|
||||
ps.logf(" ⚠️ IPFS Cluster initialization warning: %v", err)
|
||||
if err := ps.binaryInstaller.InitializeIPFSClusterConfig(clusterPath, clusterSecret, 4501, clusterPeers); err != nil {
|
||||
return fmt.Errorf("failed to initialize IPFS Cluster: %w", err)
|
||||
}
|
||||
|
||||
// Initialize RQLite data directory
|
||||
rqliteDataDir := ps.debrosDir + "/data/rqlite"
|
||||
if err := ps.binaryInstaller.InitializeRQLiteDataDir(nodeType, rqliteDataDir); err != nil {
|
||||
rqliteDataDir := filepath.Join(dataDir, "rqlite")
|
||||
if err := ps.binaryInstaller.InitializeRQLiteDataDir(rqliteDataDir); err != nil {
|
||||
ps.logf(" ⚠️ RQLite initialization warning: %v", err)
|
||||
}
|
||||
|
||||
// Ensure all directories and files created during service initialization have correct ownership
|
||||
// This is critical because directories/files created as root need to be owned by debros user
|
||||
if err := ps.fsProvisioner.FixOwnership(); err != nil {
|
||||
return fmt.Errorf("failed to fix ownership after service initialization: %w", err)
|
||||
}
|
||||
|
||||
ps.logf(" ✓ Services initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Phase3GenerateSecrets generates shared secrets and keys
|
||||
func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
|
||||
func (ps *ProductionSetup) Phase3GenerateSecrets() error {
|
||||
ps.logf("Phase 3: Generating secrets...")
|
||||
|
||||
// Cluster secret
|
||||
@ -244,63 +360,59 @@ func (ps *ProductionSetup) Phase3GenerateSecrets(isBootstrap bool) error {
|
||||
}
|
||||
ps.logf(" ✓ IPFS swarm key ensured")
|
||||
|
||||
// Node identity
|
||||
nodeType := "node"
|
||||
if isBootstrap {
|
||||
nodeType = "bootstrap"
|
||||
}
|
||||
|
||||
peerID, err := ps.secretGenerator.EnsureNodeIdentity(nodeType)
|
||||
// Node identity (unified architecture)
|
||||
peerID, err := ps.secretGenerator.EnsureNodeIdentity()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure node identity: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Node identity ensured (Peer ID: %s)", peerID.String())
|
||||
peerIDStr := peerID.String()
|
||||
ps.NodePeerID = peerIDStr // Capture for later display
|
||||
ps.logf(" ✓ Node identity ensured (Peer ID: %s)", peerIDStr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Phase4GenerateConfigs generates node, gateway, and service configs
|
||||
func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeers []string, vpsIP string, enableHTTPS bool, domain string) error {
|
||||
ps.logf("Phase 4: Generating configurations...")
|
||||
func (ps *ProductionSetup) Phase4GenerateConfigs(peerAddresses []string, vpsIP string, enableHTTPS bool, domain string, joinAddress string) error {
|
||||
if ps.IsUpdate() {
|
||||
ps.logf("Phase 4: Updating configurations...")
|
||||
ps.logf(" (Existing configs will be updated to latest format)")
|
||||
} else {
|
||||
ps.logf("Phase 4: Generating configurations...")
|
||||
}
|
||||
|
||||
// Node config
|
||||
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(isBootstrap, bootstrapPeers, vpsIP)
|
||||
// Node config (unified architecture)
|
||||
nodeConfig, err := ps.configGenerator.GenerateNodeConfig(peerAddresses, vpsIP, joinAddress, domain, enableHTTPS)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate node config: %w", err)
|
||||
}
|
||||
|
||||
var configFile string
|
||||
if isBootstrap {
|
||||
configFile = "bootstrap.yaml"
|
||||
} else {
|
||||
configFile = "node.yaml"
|
||||
}
|
||||
|
||||
configFile := "node.yaml"
|
||||
if err := ps.secretGenerator.SaveConfig(configFile, nodeConfig); err != nil {
|
||||
return fmt.Errorf("failed to save node config: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Node config generated: %s", configFile)
|
||||
|
||||
// Gateway config
|
||||
olricServers := []string{"127.0.0.1:3320"}
|
||||
gatewayConfig, err := ps.configGenerator.GenerateGatewayConfig(bootstrapPeers, enableHTTPS, domain, olricServers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate gateway config: %w", err)
|
||||
}
|
||||
// Gateway configuration is now embedded in each node's config
|
||||
// No separate gateway.yaml needed - each node runs its own embedded gateway
|
||||
|
||||
if err := ps.secretGenerator.SaveConfig("gateway.yaml", gatewayConfig); err != nil {
|
||||
return fmt.Errorf("failed to save gateway config: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Gateway config generated")
|
||||
|
||||
// Olric config
|
||||
olricConfig, err := ps.configGenerator.GenerateOlricConfig("localhost", 3320, 3322)
|
||||
// Olric config:
|
||||
// - HTTP API binds to localhost for security (accessed via gateway)
|
||||
// - Memberlist binds to 0.0.0.0 for cluster communication across nodes
|
||||
// - Environment "lan" for production multi-node clustering
|
||||
olricConfig, err := ps.configGenerator.GenerateOlricConfig(
|
||||
"127.0.0.1", // HTTP API on localhost
|
||||
3320,
|
||||
"0.0.0.0", // Memberlist on all interfaces for clustering
|
||||
3322,
|
||||
"lan", // Production environment
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate olric config: %w", err)
|
||||
}
|
||||
|
||||
// Create olric config directory
|
||||
olricConfigDir := ps.debrosDir + "/configs/olric"
|
||||
olricConfigDir := ps.oramaDir + "/configs/olric"
|
||||
if err := os.MkdirAll(olricConfigDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create olric config directory: %w", err)
|
||||
}
|
||||
@ -316,54 +428,60 @@ func (ps *ProductionSetup) Phase4GenerateConfigs(isBootstrap bool, bootstrapPeer
|
||||
}
|
||||
|
||||
// Phase5CreateSystemdServices creates and enables systemd units
|
||||
func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string) error {
|
||||
// enableHTTPS determines the RQLite Raft port (7002 when SNI is enabled, 7001 otherwise)
|
||||
func (ps *ProductionSetup) Phase5CreateSystemdServices(enableHTTPS bool) error {
|
||||
ps.logf("Phase 5: Creating systemd services...")
|
||||
|
||||
// IPFS service
|
||||
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(nodeType)
|
||||
unitName := fmt.Sprintf("debros-ipfs-%s.service", nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit(unitName, ipfsUnit); err != nil {
|
||||
// Validate all required binaries are available before creating services
|
||||
ipfsBinary, err := ps.binaryInstaller.ResolveBinaryPath("ipfs", "/usr/local/bin/ipfs", "/usr/bin/ipfs")
|
||||
if err != nil {
|
||||
return fmt.Errorf("ipfs binary not available: %w", err)
|
||||
}
|
||||
clusterBinary, err := ps.binaryInstaller.ResolveBinaryPath("ipfs-cluster-service", "/usr/local/bin/ipfs-cluster-service", "/usr/bin/ipfs-cluster-service")
|
||||
if err != nil {
|
||||
return fmt.Errorf("ipfs-cluster-service binary not available: %w", err)
|
||||
}
|
||||
olricBinary, err := ps.binaryInstaller.ResolveBinaryPath("olric-server", "/usr/local/bin/olric-server", "/usr/bin/olric-server")
|
||||
if err != nil {
|
||||
return fmt.Errorf("olric-server binary not available: %w", err)
|
||||
}
|
||||
|
||||
// IPFS service (unified - no bootstrap/node distinction)
|
||||
ipfsUnit := ps.serviceGenerator.GenerateIPFSService(ipfsBinary)
|
||||
if err := ps.serviceController.WriteServiceUnit("debros-ipfs.service", ipfsUnit); err != nil {
|
||||
return fmt.Errorf("failed to write IPFS service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ IPFS service created: %s", unitName)
|
||||
ps.logf(" ✓ IPFS service created: debros-ipfs.service")
|
||||
|
||||
// IPFS Cluster service
|
||||
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(nodeType)
|
||||
clusterUnitName := fmt.Sprintf("debros-ipfs-cluster-%s.service", nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit(clusterUnitName, clusterUnit); err != nil {
|
||||
clusterUnit := ps.serviceGenerator.GenerateIPFSClusterService(clusterBinary)
|
||||
if err := ps.serviceController.WriteServiceUnit("debros-ipfs-cluster.service", clusterUnit); err != nil {
|
||||
return fmt.Errorf("failed to write IPFS Cluster service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ IPFS Cluster service created: %s", clusterUnitName)
|
||||
ps.logf(" ✓ IPFS Cluster service created: debros-ipfs-cluster.service")
|
||||
|
||||
// RQLite service (only for bootstrap in single-node, or conditionally)
|
||||
rqliteUnit := ps.serviceGenerator.GenerateRQLiteService(nodeType, 5001, 7001, "")
|
||||
rqliteUnitName := fmt.Sprintf("debros-rqlite-%s.service", nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit(rqliteUnitName, rqliteUnit); err != nil {
|
||||
return fmt.Errorf("failed to write RQLite service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ RQLite service created: %s", rqliteUnitName)
|
||||
// RQLite is managed internally by each node - no separate systemd service needed
|
||||
|
||||
// Olric service
|
||||
olricUnit := ps.serviceGenerator.GenerateOlricService()
|
||||
olricUnit := ps.serviceGenerator.GenerateOlricService(olricBinary)
|
||||
if err := ps.serviceController.WriteServiceUnit("debros-olric.service", olricUnit); err != nil {
|
||||
return fmt.Errorf("failed to write Olric service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Olric service created")
|
||||
|
||||
// Node service
|
||||
nodeUnit := ps.serviceGenerator.GenerateNodeService(nodeType)
|
||||
nodeUnitName := fmt.Sprintf("debros-node-%s.service", nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit(nodeUnitName, nodeUnit); err != nil {
|
||||
// Node service (unified - includes embedded gateway)
|
||||
nodeUnit := ps.serviceGenerator.GenerateNodeService()
|
||||
if err := ps.serviceController.WriteServiceUnit("debros-node.service", nodeUnit); err != nil {
|
||||
return fmt.Errorf("failed to write Node service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Node service created: %s", nodeUnitName)
|
||||
ps.logf(" ✓ Node service created: debros-node.service (with embedded gateway)")
|
||||
|
||||
// Gateway service (optional, only on specific nodes)
|
||||
gatewayUnit := ps.serviceGenerator.GenerateGatewayService(nodeType)
|
||||
if err := ps.serviceController.WriteServiceUnit("debros-gateway.service", gatewayUnit); err != nil {
|
||||
return fmt.Errorf("failed to write Gateway service: %w", err)
|
||||
// Anyone Client service (SOCKS5 proxy)
|
||||
anyoneUnit := ps.serviceGenerator.GenerateAnyoneClientService()
|
||||
if err := ps.serviceController.WriteServiceUnit("debros-anyone-client.service", anyoneUnit); err != nil {
|
||||
return fmt.Errorf("failed to write Anyone Client service: %w", err)
|
||||
}
|
||||
ps.logf(" ✓ Gateway service created")
|
||||
ps.logf(" ✓ Anyone Client service created")
|
||||
|
||||
// Reload systemd daemon
|
||||
if err := ps.serviceController.DaemonReload(); err != nil {
|
||||
@ -371,8 +489,10 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string) error {
|
||||
}
|
||||
ps.logf(" ✓ Systemd daemon reloaded")
|
||||
|
||||
// Enable services
|
||||
services := []string{unitName, clusterUnitName, rqliteUnitName, "debros-olric.service", nodeUnitName, "debros-gateway.service"}
|
||||
// Enable services (unified names - no bootstrap/node distinction)
|
||||
// Note: debros-gateway.service is no longer needed - each node has an embedded gateway
|
||||
// Note: debros-rqlite.service is NOT created - RQLite is managed by each node internally
|
||||
services := []string{"debros-ipfs.service", "debros-ipfs-cluster.service", "debros-olric.service", "debros-node.service", "debros-anyone-client.service"}
|
||||
for _, svc := range services {
|
||||
if err := ps.serviceController.EnableService(svc); err != nil {
|
||||
ps.logf(" ⚠️ Failed to enable %s: %v", svc, err)
|
||||
@ -384,8 +504,17 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string) error {
|
||||
// Start services in dependency order
|
||||
ps.logf(" Starting services...")
|
||||
|
||||
// Start infrastructure first (IPFS, RQLite, Olric)
|
||||
infraServices := []string{unitName, rqliteUnitName, "debros-olric.service"}
|
||||
// Start infrastructure first (IPFS, Olric, Anyone Client) - RQLite is managed internally by each node
|
||||
infraServices := []string{"debros-ipfs.service", "debros-olric.service"}
|
||||
|
||||
// Check if port 9050 is already in use (e.g., another anyone-client or similar service)
|
||||
if ps.portChecker.IsPortInUse(9050) {
|
||||
ps.logf(" ℹ️ Port 9050 is already in use (anyone-client or similar service running)")
|
||||
ps.logf(" ℹ️ Skipping debros-anyone-client startup - using existing service")
|
||||
} else {
|
||||
infraServices = append(infraServices, "debros-anyone-client.service")
|
||||
}
|
||||
|
||||
for _, svc := range infraServices {
|
||||
if err := ps.serviceController.StartService(svc); err != nil {
|
||||
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
|
||||
@ -395,23 +524,20 @@ func (ps *ProductionSetup) Phase5CreateSystemdServices(nodeType string) error {
|
||||
}
|
||||
|
||||
// Wait a moment for infrastructure to stabilize
|
||||
exec.Command("sleep", "2").Run()
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Start IPFS Cluster
|
||||
if err := ps.serviceController.StartService(clusterUnitName); err != nil {
|
||||
ps.logf(" ⚠️ Failed to start %s: %v", clusterUnitName, err)
|
||||
if err := ps.serviceController.StartService("debros-ipfs-cluster.service"); err != nil {
|
||||
ps.logf(" ⚠️ Failed to start debros-ipfs-cluster.service: %v", err)
|
||||
} else {
|
||||
ps.logf(" - %s started", clusterUnitName)
|
||||
ps.logf(" - debros-ipfs-cluster.service started")
|
||||
}
|
||||
|
||||
// Start application services
|
||||
appServices := []string{nodeUnitName, "debros-gateway.service"}
|
||||
for _, svc := range appServices {
|
||||
if err := ps.serviceController.StartService(svc); err != nil {
|
||||
ps.logf(" ⚠️ Failed to start %s: %v", svc, err)
|
||||
} else {
|
||||
ps.logf(" - %s started", svc)
|
||||
}
|
||||
// Start node service (gateway is embedded in node, no separate service needed)
|
||||
if err := ps.serviceController.StartService("debros-node.service"); err != nil {
|
||||
ps.logf(" ⚠️ Failed to start debros-node.service: %v", err)
|
||||
} else {
|
||||
ps.logf(" - debros-node.service started (with embedded gateway)")
|
||||
}
|
||||
|
||||
ps.logf(" ✓ All services started")
|
||||
@ -425,12 +551,20 @@ func (ps *ProductionSetup) LogSetupComplete(peerID string) {
|
||||
ps.logf(strings.Repeat("=", 70))
|
||||
ps.logf("\nNode Peer ID: %s", peerID)
|
||||
ps.logf("\nService Management:")
|
||||
ps.logf(" systemctl status debros-ipfs-bootstrap")
|
||||
ps.logf(" systemctl logs debros-node-bootstrap")
|
||||
ps.logf(" sudo tail -f %s/logs/node.log", ps.debrosDir)
|
||||
ps.logf(" systemctl status debros-ipfs")
|
||||
ps.logf(" journalctl -u debros-node -f")
|
||||
ps.logf(" tail -f %s/logs/node.log", ps.oramaDir)
|
||||
ps.logf("\nLog Files:")
|
||||
ps.logf(" %s/logs/ipfs.log", ps.oramaDir)
|
||||
ps.logf(" %s/logs/ipfs-cluster.log", ps.oramaDir)
|
||||
ps.logf(" %s/logs/olric.log", ps.oramaDir)
|
||||
ps.logf(" %s/logs/node.log", ps.oramaDir)
|
||||
ps.logf(" %s/logs/gateway.log", ps.oramaDir)
|
||||
ps.logf(" %s/logs/anyone-client.log", ps.oramaDir)
|
||||
ps.logf("\nStart All Services:")
|
||||
ps.logf(" systemctl start debros-ipfs-bootstrap debros-ipfs-cluster-bootstrap debros-rqlite-bootstrap debros-olric debros-node-bootstrap debros-gateway")
|
||||
ps.logf(" systemctl start debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-client debros-node")
|
||||
ps.logf("\nVerify Installation:")
|
||||
ps.logf(" curl http://localhost:6001/health")
|
||||
ps.logf(" curl http://localhost:5001/status\n")
|
||||
ps.logf(" curl http://localhost:5001/status")
|
||||
ps.logf(" # Anyone Client SOCKS5 proxy on localhost:9050\n")
|
||||
}
|
||||
|
||||
@ -10,37 +10,36 @@ import (
|
||||
|
||||
// FilesystemProvisioner manages directory creation and permissions
|
||||
type FilesystemProvisioner struct {
|
||||
debrosHome string
|
||||
debrosDir string
|
||||
oramaHome string
|
||||
oramaDir string
|
||||
logWriter interface{} // Can be io.Writer for logging
|
||||
}
|
||||
|
||||
// NewFilesystemProvisioner creates a new provisioner
|
||||
func NewFilesystemProvisioner(debrosHome string) *FilesystemProvisioner {
|
||||
func NewFilesystemProvisioner(oramaHome string) *FilesystemProvisioner {
|
||||
return &FilesystemProvisioner{
|
||||
debrosHome: debrosHome,
|
||||
debrosDir: filepath.Join(debrosHome, ".debros"),
|
||||
oramaHome: oramaHome,
|
||||
oramaDir: filepath.Join(oramaHome, ".orama"),
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureDirectoryStructure creates all required directories
|
||||
// EnsureDirectoryStructure creates all required directories (unified structure)
|
||||
func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
|
||||
// All directories needed for unified node structure
|
||||
dirs := []string{
|
||||
fp.debrosDir,
|
||||
filepath.Join(fp.debrosDir, "configs"),
|
||||
filepath.Join(fp.debrosDir, "secrets"),
|
||||
filepath.Join(fp.debrosDir, "data"),
|
||||
filepath.Join(fp.debrosDir, "data", "bootstrap", "ipfs", "repo"),
|
||||
filepath.Join(fp.debrosDir, "data", "bootstrap", "ipfs-cluster"),
|
||||
filepath.Join(fp.debrosDir, "data", "bootstrap", "rqlite"),
|
||||
filepath.Join(fp.debrosDir, "data", "node", "ipfs", "repo"),
|
||||
filepath.Join(fp.debrosDir, "data", "node", "ipfs-cluster"),
|
||||
filepath.Join(fp.debrosDir, "data", "node", "rqlite"),
|
||||
filepath.Join(fp.debrosDir, "logs"),
|
||||
filepath.Join(fp.debrosDir, "tls-cache"),
|
||||
filepath.Join(fp.debrosDir, "backups"),
|
||||
filepath.Join(fp.debrosHome, "bin"),
|
||||
filepath.Join(fp.debrosHome, "src"),
|
||||
fp.oramaDir,
|
||||
filepath.Join(fp.oramaDir, "configs"),
|
||||
filepath.Join(fp.oramaDir, "secrets"),
|
||||
filepath.Join(fp.oramaDir, "data"),
|
||||
filepath.Join(fp.oramaDir, "data", "ipfs", "repo"),
|
||||
filepath.Join(fp.oramaDir, "data", "ipfs-cluster"),
|
||||
filepath.Join(fp.oramaDir, "data", "rqlite"),
|
||||
filepath.Join(fp.oramaDir, "logs"),
|
||||
filepath.Join(fp.oramaDir, "tls-cache"),
|
||||
filepath.Join(fp.oramaDir, "backups"),
|
||||
filepath.Join(fp.oramaHome, "bin"),
|
||||
filepath.Join(fp.oramaHome, "src"),
|
||||
filepath.Join(fp.oramaHome, ".npm"),
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
@ -49,27 +48,65 @@ func (fp *FilesystemProvisioner) EnsureDirectoryStructure() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Remove any stray cluster-secret file from root .orama directory
|
||||
// The correct location is .orama/secrets/cluster-secret
|
||||
strayClusterSecret := filepath.Join(fp.oramaDir, "cluster-secret")
|
||||
if _, err := os.Stat(strayClusterSecret); err == nil {
|
||||
if err := os.Remove(strayClusterSecret); err != nil {
|
||||
return fmt.Errorf("failed to remove stray cluster-secret file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create log files with correct permissions so systemd can write to them
|
||||
logsDir := filepath.Join(fp.oramaDir, "logs")
|
||||
logFiles := []string{
|
||||
"olric.log",
|
||||
"gateway.log",
|
||||
"ipfs.log",
|
||||
"ipfs-cluster.log",
|
||||
"node.log",
|
||||
"anyone-client.log",
|
||||
}
|
||||
|
||||
for _, logFile := range logFiles {
|
||||
logPath := filepath.Join(logsDir, logFile)
|
||||
// Create empty file if it doesn't exist
|
||||
if _, err := os.Stat(logPath); os.IsNotExist(err) {
|
||||
if err := os.WriteFile(logPath, []byte{}, 0644); err != nil {
|
||||
return fmt.Errorf("failed to create log file %s: %w", logPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FixOwnership changes ownership of .debros directory to debros user
|
||||
// FixOwnership changes ownership of .orama directory to debros user
|
||||
func (fp *FilesystemProvisioner) FixOwnership() error {
|
||||
cmd := exec.Command("chown", "-R", "debros:debros", fp.debrosDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w", fp.debrosDir, err)
|
||||
// Fix entire .orama directory recursively (includes all data, configs, logs, etc.)
|
||||
cmd := exec.Command("chown", "-R", "debros:debros", fp.oramaDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaDir, err, string(output))
|
||||
}
|
||||
|
||||
// Also fix home directory ownership
|
||||
cmd = exec.Command("chown", "debros:debros", fp.debrosHome)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w", fp.debrosHome, err)
|
||||
cmd = exec.Command("chown", "debros:debros", fp.oramaHome)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", fp.oramaHome, err, string(output))
|
||||
}
|
||||
|
||||
// Fix bin directory
|
||||
binDir := filepath.Join(fp.debrosHome, "bin")
|
||||
binDir := filepath.Join(fp.oramaHome, "bin")
|
||||
cmd = exec.Command("chown", "-R", "debros:debros", binDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w", binDir, err)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", binDir, err, string(output))
|
||||
}
|
||||
|
||||
// Fix npm cache directory
|
||||
npmDir := filepath.Join(fp.oramaHome, ".npm")
|
||||
cmd = exec.Command("chown", "-R", "debros:debros", npmDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to set ownership for %s: %w\nOutput: %s", npmDir, err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -147,20 +184,20 @@ func (up *UserProvisioner) SetupSudoersAccess(invokerUser string) error {
|
||||
|
||||
// StateDetector checks for existing production state
|
||||
type StateDetector struct {
|
||||
debrosDir string
|
||||
oramaDir string
|
||||
}
|
||||
|
||||
// NewStateDetector creates a state detector
|
||||
func NewStateDetector(debrosDir string) *StateDetector {
|
||||
func NewStateDetector(oramaDir string) *StateDetector {
|
||||
return &StateDetector{
|
||||
debrosDir: debrosDir,
|
||||
oramaDir: oramaDir,
|
||||
}
|
||||
}
|
||||
|
||||
// IsConfigured checks if basic configs exist
|
||||
func (sd *StateDetector) IsConfigured() bool {
|
||||
nodeConfig := filepath.Join(sd.debrosDir, "configs", "node.yaml")
|
||||
gatewayConfig := filepath.Join(sd.debrosDir, "configs", "gateway.yaml")
|
||||
nodeConfig := filepath.Join(sd.oramaDir, "configs", "node.yaml")
|
||||
gatewayConfig := filepath.Join(sd.oramaDir, "configs", "gateway.yaml")
|
||||
_, err1 := os.Stat(nodeConfig)
|
||||
_, err2 := os.Stat(gatewayConfig)
|
||||
return err1 == nil || err2 == nil
|
||||
@ -168,24 +205,36 @@ func (sd *StateDetector) IsConfigured() bool {
|
||||
|
||||
// HasSecrets checks if cluster secret and swarm key exist
|
||||
func (sd *StateDetector) HasSecrets() bool {
|
||||
clusterSecret := filepath.Join(sd.debrosDir, "secrets", "cluster-secret")
|
||||
swarmKey := filepath.Join(sd.debrosDir, "secrets", "swarm.key")
|
||||
clusterSecret := filepath.Join(sd.oramaDir, "secrets", "cluster-secret")
|
||||
swarmKey := filepath.Join(sd.oramaDir, "secrets", "swarm.key")
|
||||
_, err1 := os.Stat(clusterSecret)
|
||||
_, err2 := os.Stat(swarmKey)
|
||||
return err1 == nil && err2 == nil
|
||||
}
|
||||
|
||||
// HasIPFSData checks if IPFS repo is initialized
|
||||
// HasIPFSData checks if IPFS repo is initialized (unified path)
|
||||
func (sd *StateDetector) HasIPFSData() bool {
|
||||
ipfsRepoPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "ipfs", "repo", "config")
|
||||
_, err := os.Stat(ipfsRepoPath)
|
||||
// Check unified path first
|
||||
ipfsRepoPath := filepath.Join(sd.oramaDir, "data", "ipfs", "repo", "config")
|
||||
if _, err := os.Stat(ipfsRepoPath); err == nil {
|
||||
return true
|
||||
}
|
||||
// Fallback: check legacy bootstrap path for migration
|
||||
legacyPath := filepath.Join(sd.oramaDir, "data", "bootstrap", "ipfs", "repo", "config")
|
||||
_, err := os.Stat(legacyPath)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// HasRQLiteData checks if RQLite data exists
|
||||
// HasRQLiteData checks if RQLite data exists (unified path)
|
||||
func (sd *StateDetector) HasRQLiteData() bool {
|
||||
rqliteDataPath := filepath.Join(sd.debrosDir, "data", "bootstrap", "rqlite")
|
||||
info, err := os.Stat(rqliteDataPath)
|
||||
// Check unified path first
|
||||
rqliteDataPath := filepath.Join(sd.oramaDir, "data", "rqlite")
|
||||
if info, err := os.Stat(rqliteDataPath); err == nil && info.IsDir() {
|
||||
return true
|
||||
}
|
||||
// Fallback: check legacy bootstrap path for migration
|
||||
legacyPath := filepath.Join(sd.oramaDir, "data", "bootstrap", "rqlite")
|
||||
info, err := os.Stat(legacyPath)
|
||||
return err == nil && info.IsDir()
|
||||
}
|
||||
|
||||
|
||||
@ -10,29 +10,25 @@ import (
|
||||
|
||||
// SystemdServiceGenerator generates systemd unit files
|
||||
type SystemdServiceGenerator struct {
|
||||
debrosHome string
|
||||
debrosDir string
|
||||
oramaHome string
|
||||
oramaDir string
|
||||
}
|
||||
|
||||
// NewSystemdServiceGenerator creates a new service generator
|
||||
func NewSystemdServiceGenerator(debrosHome, debrosDir string) *SystemdServiceGenerator {
|
||||
func NewSystemdServiceGenerator(oramaHome, oramaDir string) *SystemdServiceGenerator {
|
||||
return &SystemdServiceGenerator{
|
||||
debrosHome: debrosHome,
|
||||
debrosDir: debrosDir,
|
||||
oramaHome: oramaHome,
|
||||
oramaDir: oramaDir,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateIPFSService generates the IPFS daemon systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateIPFSService(nodeType string) string {
|
||||
var ipfsRepoPath string
|
||||
if nodeType == "bootstrap" {
|
||||
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs", "repo")
|
||||
} else {
|
||||
ipfsRepoPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs", "repo")
|
||||
}
|
||||
func (ssg *SystemdServiceGenerator) GenerateIPFSService(ipfsBinary string) string {
|
||||
ipfsRepoPath := filepath.Join(ssg.oramaDir, "data", "ipfs", "repo")
|
||||
logFile := filepath.Join(ssg.oramaDir, "logs", "ipfs.log")
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=IPFS Daemon (%s)
|
||||
Description=IPFS Daemon
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
@ -40,77 +36,96 @@ Wants=network-online.target
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME=%s
|
||||
Environment=IPFS_PATH=%s
|
||||
ExecStartPre=/bin/bash -c 'if [ -f %s/secrets/swarm.key ] && [ ! -f %s/swarm.key ]; then cp %s/secrets/swarm.key %s/swarm.key && chmod 600 %s/swarm.key; fi'
|
||||
ExecStart=/usr/bin/ipfs daemon --enable-pubsub-experiment --repo-dir=%s
|
||||
Environment=HOME=%[1]s
|
||||
Environment=IPFS_PATH=%[2]s
|
||||
ExecStartPre=/bin/bash -c 'if [ -f %[3]s/secrets/swarm.key ] && [ ! -f %[2]s/swarm.key ]; then cp %[3]s/secrets/swarm.key %[2]s/swarm.key && chmod 600 %[2]s/swarm.key; fi'
|
||||
ExecStart=%[5]s daemon --enable-pubsub-experiment --repo-dir=%[2]s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=ipfs-%s
|
||||
StandardOutput=append:%[4]s
|
||||
StandardError=append:%[4]s
|
||||
SyslogIdentifier=debros-ipfs
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
ProtectHome=read-only
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
ReadWritePaths=%[3]s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeType, ssg.debrosHome, ipfsRepoPath, ssg.debrosDir, ipfsRepoPath, ssg.debrosDir, ipfsRepoPath, ipfsRepoPath, ipfsRepoPath, nodeType, ssg.debrosDir)
|
||||
`, ssg.oramaHome, ipfsRepoPath, ssg.oramaDir, logFile, ipfsBinary)
|
||||
}
|
||||
|
||||
// GenerateIPFSClusterService generates the IPFS Cluster systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(nodeType string) string {
|
||||
var clusterPath string
|
||||
if nodeType == "bootstrap" {
|
||||
clusterPath = filepath.Join(ssg.debrosDir, "data", "bootstrap", "ipfs-cluster")
|
||||
} else {
|
||||
clusterPath = filepath.Join(ssg.debrosDir, "data", "node", "ipfs-cluster")
|
||||
func (ssg *SystemdServiceGenerator) GenerateIPFSClusterService(clusterBinary string) string {
|
||||
clusterPath := filepath.Join(ssg.oramaDir, "data", "ipfs-cluster")
|
||||
logFile := filepath.Join(ssg.oramaDir, "logs", "ipfs-cluster.log")
|
||||
|
||||
// Read cluster secret from file to pass to daemon
|
||||
clusterSecretPath := filepath.Join(ssg.oramaDir, "secrets", "cluster-secret")
|
||||
clusterSecret := ""
|
||||
if data, err := os.ReadFile(clusterSecretPath); err == nil {
|
||||
clusterSecret = strings.TrimSpace(string(data))
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=IPFS Cluster Service (%s)
|
||||
After=debros-ipfs-%s.service
|
||||
Wants=debros-ipfs-%s.service
|
||||
Requires=debros-ipfs-%s.service
|
||||
Description=IPFS Cluster Service
|
||||
After=debros-ipfs.service
|
||||
Wants=debros-ipfs.service
|
||||
Requires=debros-ipfs.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
WorkingDirectory=%s
|
||||
Environment=HOME=%s
|
||||
Environment=CLUSTER_PATH=%s
|
||||
ExecStart=/usr/local/bin/ipfs-cluster-service daemon --config %s/service.json
|
||||
WorkingDirectory=%[1]s
|
||||
Environment=HOME=%[1]s
|
||||
Environment=IPFS_CLUSTER_PATH=%[2]s
|
||||
Environment=CLUSTER_SECRET=%[5]s
|
||||
ExecStartPre=/bin/bash -c 'mkdir -p %[2]s && chmod 700 %[2]s'
|
||||
ExecStart=%[4]s daemon
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=ipfs-cluster-%s
|
||||
StandardOutput=append:%[3]s
|
||||
StandardError=append:%[3]s
|
||||
SyslogIdentifier=debros-ipfs-cluster
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
ProtectHome=read-only
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
ReadWritePaths=%[1]s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeType, nodeType, nodeType, nodeType, ssg.debrosHome, ssg.debrosHome, clusterPath, clusterPath, nodeType, ssg.debrosDir)
|
||||
`, ssg.oramaHome, clusterPath, logFile, clusterBinary, clusterSecret)
|
||||
}
|
||||
|
||||
// GenerateRQLiteService generates the RQLite systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(nodeType string, httpPort, raftPort int, joinAddr string) string {
|
||||
var dataDir string
|
||||
if nodeType == "bootstrap" {
|
||||
dataDir = filepath.Join(ssg.debrosDir, "data", "bootstrap", "rqlite")
|
||||
} else {
|
||||
dataDir = filepath.Join(ssg.debrosDir, "data", "node", "rqlite")
|
||||
func (ssg *SystemdServiceGenerator) GenerateRQLiteService(rqliteBinary string, httpPort, raftPort int, joinAddr string, advertiseIP string) string {
|
||||
dataDir := filepath.Join(ssg.oramaDir, "data", "rqlite")
|
||||
logFile := filepath.Join(ssg.oramaDir, "logs", "rqlite.log")
|
||||
|
||||
// Use public IP for advertise if provided, otherwise default to localhost
|
||||
if advertiseIP == "" {
|
||||
advertiseIP = "127.0.0.1"
|
||||
}
|
||||
|
||||
// Bind RQLite to localhost only - external access via SNI gateway
|
||||
args := fmt.Sprintf(
|
||||
`-http-addr 0.0.0.0:%d -http-adv-addr 127.0.0.1:%d -raft-adv-addr 127.0.0.1:%d -raft-addr 0.0.0.0:%d`,
|
||||
httpPort, httpPort, raftPort, raftPort,
|
||||
`-http-addr 127.0.0.1:%d -http-adv-addr %s:%d -raft-adv-addr %s:%d -raft-addr 127.0.0.1:%d`,
|
||||
httpPort, advertiseIP, httpPort, advertiseIP, raftPort, raftPort,
|
||||
)
|
||||
|
||||
if joinAddr != "" {
|
||||
@ -120,7 +135,7 @@ func (ssg *SystemdServiceGenerator) GenerateRQLiteService(nodeType string, httpP
|
||||
args += fmt.Sprintf(` %s`, dataDir)
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=RQLite Database (%s)
|
||||
Description=RQLite Database
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
@ -128,27 +143,34 @@ Wants=network-online.target
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME=%s
|
||||
ExecStart=/usr/local/bin/rqlited %s
|
||||
Environment=HOME=%[1]s
|
||||
ExecStart=%[5]s %[2]s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=rqlite-%s
|
||||
StandardOutput=append:%[3]s
|
||||
StandardError=append:%[3]s
|
||||
SyslogIdentifier=debros-rqlite
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
ProtectHome=read-only
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
ReadWritePaths=%[4]s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeType, ssg.debrosHome, args, nodeType, ssg.debrosDir)
|
||||
`, ssg.oramaHome, args, logFile, dataDir, rqliteBinary)
|
||||
}
|
||||
|
||||
// GenerateOlricService generates the Olric systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateOlricService() string {
|
||||
olricConfigPath := filepath.Join(ssg.debrosDir, "configs", "olric", "config.yaml")
|
||||
func (ssg *SystemdServiceGenerator) GenerateOlricService(olricBinary string) string {
|
||||
olricConfigPath := filepath.Join(ssg.oramaDir, "configs", "olric", "config.yaml")
|
||||
logFile := filepath.Join(ssg.oramaDir, "logs", "olric.log")
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=Olric Cache Server
|
||||
@ -159,95 +181,152 @@ Wants=network-online.target
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME=%s
|
||||
Environment=OLRIC_SERVER_CONFIG=%s
|
||||
ExecStart=/usr/local/bin/olric-server
|
||||
Environment=HOME=%[1]s
|
||||
Environment=OLRIC_SERVER_CONFIG=%[2]s
|
||||
ExecStart=%[5]s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
StandardOutput=append:%[3]s
|
||||
StandardError=append:%[3]s
|
||||
SyslogIdentifier=olric
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
ProtectHome=read-only
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
ReadWritePaths=%[4]s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, ssg.debrosHome, olricConfigPath, ssg.debrosDir)
|
||||
`, ssg.oramaHome, olricConfigPath, logFile, ssg.oramaDir, olricBinary)
|
||||
}
|
||||
|
||||
// GenerateNodeService generates the DeBros Node systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateNodeService(nodeType string) string {
|
||||
var configFile string
|
||||
if nodeType == "bootstrap" {
|
||||
configFile = "bootstrap.yaml"
|
||||
} else {
|
||||
configFile = "node.yaml"
|
||||
}
|
||||
func (ssg *SystemdServiceGenerator) GenerateNodeService() string {
|
||||
configFile := "node.yaml"
|
||||
logFile := filepath.Join(ssg.oramaDir, "logs", "node.log")
|
||||
// Note: systemd StandardOutput/StandardError paths should not contain substitution variables
|
||||
// Use absolute paths directly as they will be resolved by systemd at runtime
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=DeBros Network Node (%s)
|
||||
After=debros-ipfs-cluster-%s.service
|
||||
Wants=debros-ipfs-cluster-%s.service
|
||||
Requires=debros-ipfs-cluster-%s.service
|
||||
Description=DeBros Network Node
|
||||
After=debros-ipfs-cluster.service debros-olric.service
|
||||
Wants=debros-ipfs-cluster.service debros-olric.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
WorkingDirectory=%s
|
||||
Environment=HOME=%s
|
||||
ExecStart=%s/bin/node --config %s/configs/%s
|
||||
WorkingDirectory=%[1]s
|
||||
Environment=HOME=%[1]s
|
||||
ExecStart=%[1]s/bin/orama-node --config %[2]s/configs/%[3]s
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=debros-node-%s
|
||||
StandardOutput=append:%[4]s
|
||||
StandardError=append:%[4]s
|
||||
SyslogIdentifier=debros-node
|
||||
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
ProtectHome=read-only
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
ReadWritePaths=%[2]s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeType, nodeType, nodeType, nodeType, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, configFile, nodeType, ssg.debrosDir)
|
||||
`, ssg.oramaHome, ssg.oramaDir, configFile, logFile)
|
||||
}
|
||||
|
||||
// GenerateGatewayService generates the DeBros Gateway systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateGatewayService(nodeType string) string {
|
||||
nodeService := fmt.Sprintf("debros-node-%s.service", nodeType)
|
||||
func (ssg *SystemdServiceGenerator) GenerateGatewayService() string {
|
||||
logFile := filepath.Join(ssg.oramaDir, "logs", "gateway.log")
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=DeBros Gateway
|
||||
After=%s
|
||||
Wants=%s
|
||||
After=debros-node.service debros-olric.service
|
||||
Wants=debros-node.service debros-olric.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
WorkingDirectory=%s
|
||||
Environment=HOME=%s
|
||||
ExecStart=%s/bin/gateway --config %s/configs/gateway.yaml
|
||||
WorkingDirectory=%[1]s
|
||||
Environment=HOME=%[1]s
|
||||
ExecStart=%[1]s/bin/gateway --config %[2]s/data/gateway.yaml
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
StandardOutput=append:%[3]s
|
||||
StandardError=append:%[3]s
|
||||
SyslogIdentifier=debros-gateway
|
||||
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
|
||||
NoNewPrivileges=yes
|
||||
# Note: NoNewPrivileges is omitted because it conflicts with AmbientCapabilities
|
||||
# The service needs CAP_NET_BIND_SERVICE to bind to privileged ports (80, 443)
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths=%s
|
||||
ProtectHome=read-only
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
ReadWritePaths=%[2]s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, nodeService, nodeService, ssg.debrosHome, ssg.debrosHome, ssg.debrosHome, ssg.debrosDir, ssg.debrosDir)
|
||||
`, ssg.oramaHome, ssg.oramaDir, logFile)
|
||||
}
|
||||
|
||||
// GenerateAnyoneClientService generates the Anyone Client SOCKS5 proxy systemd unit
|
||||
func (ssg *SystemdServiceGenerator) GenerateAnyoneClientService() string {
|
||||
logFile := filepath.Join(ssg.oramaDir, "logs", "anyone-client.log")
|
||||
|
||||
return fmt.Sprintf(`[Unit]
|
||||
Description=Anyone Client SOCKS5 Proxy
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME=%[1]s
|
||||
Environment=PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/lib/node_modules/.bin
|
||||
WorkingDirectory=%[1]s
|
||||
ExecStart=/usr/bin/npx anyone-client
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=append:%[2]s
|
||||
StandardError=append:%[2]s
|
||||
SyslogIdentifier=anyone-client
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=no
|
||||
ProtectKernelTunables=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectControlGroups=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
ReadWritePaths=%[3]s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`, ssg.oramaHome, logFile, ssg.oramaDir)
|
||||
}
|
||||
|
||||
// SystemdController manages systemd service operations
|
||||
|
||||
109
pkg/environments/production/services_test.go
Normal file
109
pkg/environments/production/services_test.go
Normal file
@ -0,0 +1,109 @@
|
||||
package production
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestGenerateRQLiteService verifies RQLite service generation with advertise IP and join address
|
||||
func TestGenerateRQLiteService(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
joinAddr string
|
||||
advertiseIP string
|
||||
expectJoinInUnit bool
|
||||
expectAdvertiseIP string
|
||||
}{
|
||||
{
|
||||
name: "first node with localhost advertise",
|
||||
joinAddr: "",
|
||||
advertiseIP: "",
|
||||
expectJoinInUnit: false,
|
||||
expectAdvertiseIP: "127.0.0.1",
|
||||
},
|
||||
{
|
||||
name: "first node with public IP advertise",
|
||||
joinAddr: "",
|
||||
advertiseIP: "10.0.0.1",
|
||||
expectJoinInUnit: false,
|
||||
expectAdvertiseIP: "10.0.0.1",
|
||||
},
|
||||
{
|
||||
name: "node joining cluster",
|
||||
joinAddr: "10.0.0.1:7001",
|
||||
advertiseIP: "10.0.0.2",
|
||||
expectJoinInUnit: true,
|
||||
expectAdvertiseIP: "10.0.0.2",
|
||||
},
|
||||
{
|
||||
name: "node with localhost (should still include join)",
|
||||
joinAddr: "localhost:7001",
|
||||
advertiseIP: "127.0.0.1",
|
||||
expectJoinInUnit: true,
|
||||
expectAdvertiseIP: "127.0.0.1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ssg := &SystemdServiceGenerator{
|
||||
oramaHome: "/home/debros",
|
||||
oramaDir: "/home/debros/.orama",
|
||||
}
|
||||
|
||||
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, tt.joinAddr, tt.advertiseIP)
|
||||
|
||||
// Check advertise IP is present
|
||||
expectedAdvertise := tt.expectAdvertiseIP + ":5001"
|
||||
if !strings.Contains(unit, expectedAdvertise) {
|
||||
t.Errorf("expected advertise address %q in unit, got:\n%s", expectedAdvertise, unit)
|
||||
}
|
||||
|
||||
// Check raft advertise IP is present
|
||||
expectedRaftAdvertise := tt.expectAdvertiseIP + ":7001"
|
||||
if !strings.Contains(unit, expectedRaftAdvertise) {
|
||||
t.Errorf("expected raft advertise address %q in unit, got:\n%s", expectedRaftAdvertise, unit)
|
||||
}
|
||||
|
||||
// Check join flag presence
|
||||
hasJoin := strings.Contains(unit, "-join")
|
||||
if hasJoin != tt.expectJoinInUnit {
|
||||
t.Errorf("expected join in unit: %v, hasJoin: %v\nUnit:\n%s", tt.expectJoinInUnit, hasJoin, unit)
|
||||
}
|
||||
|
||||
if tt.expectJoinInUnit && tt.joinAddr != "" && !strings.Contains(unit, tt.joinAddr) {
|
||||
t.Errorf("expected join address %q in unit, not found", tt.joinAddr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGenerateRQLiteServiceArgs verifies the ExecStart command arguments
|
||||
func TestGenerateRQLiteServiceArgs(t *testing.T) {
|
||||
ssg := &SystemdServiceGenerator{
|
||||
oramaHome: "/home/debros",
|
||||
oramaDir: "/home/debros/.orama",
|
||||
}
|
||||
|
||||
unit := ssg.GenerateRQLiteService("/usr/local/bin/rqlited", 5001, 7001, "10.0.0.1:7001", "10.0.0.2")
|
||||
|
||||
// Verify essential flags are present (localhost binding for security)
|
||||
if !strings.Contains(unit, "-http-addr 127.0.0.1:5001") {
|
||||
t.Error("missing -http-addr 127.0.0.1:5001")
|
||||
}
|
||||
if !strings.Contains(unit, "-http-adv-addr 10.0.0.2:5001") {
|
||||
t.Error("missing -http-adv-addr 10.0.0.2:5001")
|
||||
}
|
||||
if !strings.Contains(unit, "-raft-addr 127.0.0.1:7001") {
|
||||
t.Error("missing -raft-addr 127.0.0.1:7001")
|
||||
}
|
||||
if !strings.Contains(unit, "-raft-adv-addr 10.0.0.2:7001") {
|
||||
t.Error("missing -raft-adv-addr 10.0.0.2:7001")
|
||||
}
|
||||
if !strings.Contains(unit, "-join 10.0.0.1:7001") {
|
||||
t.Error("missing -join 10.0.0.1:7001")
|
||||
}
|
||||
if !strings.Contains(unit, "-join-attempts 30") {
|
||||
t.Error("missing -join-attempts 30")
|
||||
}
|
||||
}
|
||||
@ -1,41 +0,0 @@
|
||||
node:
|
||||
id: "{{.NodeID}}"
|
||||
type: "bootstrap"
|
||||
listen_addresses:
|
||||
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
|
||||
data_dir: "{{.DataDir}}"
|
||||
max_connections: 50
|
||||
|
||||
database:
|
||||
data_dir: "{{.DataDir}}/rqlite"
|
||||
replication_factor: 3
|
||||
shard_count: 16
|
||||
max_database_size: 1073741824
|
||||
backup_interval: "24h"
|
||||
rqlite_port: { { .RQLiteHTTPPort } }
|
||||
rqlite_raft_port: { { .RQLiteRaftPort } }
|
||||
rqlite_join_address: ""
|
||||
cluster_sync_interval: "30s"
|
||||
peer_inactivity_limit: "24h"
|
||||
min_cluster_size: 1
|
||||
ipfs:
|
||||
cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
|
||||
api_url: "http://localhost:{{.IPFSAPIPort}}"
|
||||
timeout: "60s"
|
||||
replication_factor: 3
|
||||
enable_encryption: true
|
||||
|
||||
discovery:
|
||||
bootstrap_peers: []
|
||||
discovery_interval: "15s"
|
||||
bootstrap_port: { { .P2PPort } }
|
||||
http_adv_address: "localhost:{{.RQLiteHTTPPort}}"
|
||||
raft_adv_address: "localhost:{{.RQLiteRaftPort}}"
|
||||
node_namespace: "default"
|
||||
|
||||
security:
|
||||
enable_tls: false
|
||||
|
||||
logging:
|
||||
level: "info"
|
||||
format: "console"
|
||||
@ -1,10 +1,10 @@
|
||||
node:
|
||||
id: "{{.NodeID}}"
|
||||
type: "node"
|
||||
listen_addresses:
|
||||
- "/ip4/0.0.0.0/tcp/{{.P2PPort}}"
|
||||
data_dir: "{{.DataDir}}"
|
||||
max_connections: 50
|
||||
domain: "{{.Domain}}"
|
||||
|
||||
database:
|
||||
data_dir: "{{.DataDir}}/rqlite"
|
||||
@ -13,9 +13,14 @@ database:
|
||||
max_database_size: 1073741824
|
||||
backup_interval: "24h"
|
||||
rqlite_port: {{.RQLiteHTTPPort}}
|
||||
rqlite_raft_port: {{.RQLiteRaftPort}}
|
||||
rqlite_raft_port: {{.RQLiteRaftInternalPort}}
|
||||
rqlite_join_address: "{{.RQLiteJoinAddress}}"
|
||||
cluster_sync_interval: "30s"
|
||||
{{if .NodeCert}}# Node-to-node TLS encryption for Raft communication (direct RQLite TLS on port 7002)
|
||||
node_cert: "{{.NodeCert}}"
|
||||
node_key: "{{.NodeKey}}"
|
||||
{{if .NodeCACert}}node_ca_cert: "{{.NodeCACert}}"
|
||||
{{end}}{{if .NodeNoVerify}}node_no_verify: true
|
||||
{{end}}{{end}}cluster_sync_interval: "30s"
|
||||
peer_inactivity_limit: "24h"
|
||||
min_cluster_size: 1
|
||||
ipfs:
|
||||
@ -31,8 +36,8 @@ discovery:
|
||||
{{end}}
|
||||
discovery_interval: "15s"
|
||||
bootstrap_port: {{.P2PPort}}
|
||||
http_adv_address: "localhost:{{.RQLiteHTTPPort}}"
|
||||
raft_adv_address: "localhost:{{.RQLiteRaftPort}}"
|
||||
http_adv_address: "{{.HTTPAdvAddress}}"
|
||||
raft_adv_address: "{{.RaftAdvAddress}}"
|
||||
node_namespace: "default"
|
||||
|
||||
security:
|
||||
@ -42,3 +47,42 @@ logging:
|
||||
level: "info"
|
||||
format: "console"
|
||||
|
||||
http_gateway:
|
||||
enabled: true
|
||||
listen_addr: "{{if .EnableHTTPS}}:{{.HTTPSPort}}{{else}}:{{.UnifiedGatewayPort}}{{end}}"
|
||||
node_name: "{{.NodeID}}"
|
||||
|
||||
{{if .EnableHTTPS}}https:
|
||||
enabled: true
|
||||
domain: "{{.Domain}}"
|
||||
auto_cert: true
|
||||
cache_dir: "{{.TLSCacheDir}}"
|
||||
http_port: {{.HTTPPort}}
|
||||
https_port: {{.HTTPSPort}}
|
||||
email: "admin@{{.Domain}}"
|
||||
{{end}}
|
||||
|
||||
{{if .EnableHTTPS}}sni:
|
||||
enabled: true
|
||||
listen_addr: ":{{.RQLiteRaftPort}}"
|
||||
cert_file: "{{.TLSCacheDir}}/{{.Domain}}.crt"
|
||||
key_file: "{{.TLSCacheDir}}/{{.Domain}}.key"
|
||||
routes:
|
||||
# Note: Raft traffic bypasses SNI gateway - RQLite uses native TLS on port 7002
|
||||
ipfs.{{.Domain}}: "localhost:4101"
|
||||
ipfs-cluster.{{.Domain}}: "localhost:9098"
|
||||
olric.{{.Domain}}: "localhost:3322"
|
||||
{{end}}
|
||||
|
||||
# Full gateway configuration (for API, auth, pubsub, and internal service routing)
|
||||
client_namespace: "default"
|
||||
rqlite_dsn: "http://localhost:{{.RQLiteHTTPPort}}"
|
||||
olric_servers:
|
||||
- "127.0.0.1:3320"
|
||||
olric_timeout: "10s"
|
||||
ipfs_cluster_api_url: "http://localhost:{{.ClusterAPIPort}}"
|
||||
ipfs_api_url: "http://localhost:{{.IPFSAPIPort}}"
|
||||
ipfs_timeout: "60s"
|
||||
|
||||
# Routes for internal service reverse proxy (kept for backwards compatibility but not used by full gateway)
|
||||
routes: {}
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
server:
|
||||
bindAddr: "{{.BindAddr}}"
|
||||
bindAddr: "{{.ServerBindAddr}}"
|
||||
bindPort: { { .HTTPPort } }
|
||||
|
||||
memberlist:
|
||||
environment: local
|
||||
bindAddr: "{{.BindAddr}}"
|
||||
environment: { { .MemberlistEnvironment } }
|
||||
bindAddr: "{{.MemberlistBindAddr}}"
|
||||
bindPort: { { .MemberlistPort } }
|
||||
|
||||
@ -11,28 +11,33 @@ import (
|
||||
//go:embed *.yaml *.service
|
||||
var templatesFS embed.FS
|
||||
|
||||
// BootstrapConfigData holds parameters for bootstrap.yaml rendering
|
||||
type BootstrapConfigData struct {
|
||||
NodeID string
|
||||
P2PPort int
|
||||
DataDir string
|
||||
RQLiteHTTPPort int
|
||||
RQLiteRaftPort int
|
||||
ClusterAPIPort int
|
||||
IPFSAPIPort int // Default: 4501
|
||||
}
|
||||
|
||||
// NodeConfigData holds parameters for node.yaml rendering
|
||||
// NodeConfigData holds parameters for node.yaml rendering (unified - no bootstrap/node distinction)
|
||||
type NodeConfigData struct {
|
||||
NodeID string
|
||||
P2PPort int
|
||||
DataDir string
|
||||
RQLiteHTTPPort int
|
||||
RQLiteRaftPort int
|
||||
RQLiteJoinAddress string
|
||||
BootstrapPeers []string
|
||||
ClusterAPIPort int
|
||||
IPFSAPIPort int // Default: 4501+
|
||||
NodeID string
|
||||
P2PPort int
|
||||
DataDir string
|
||||
RQLiteHTTPPort int
|
||||
RQLiteRaftPort int // External Raft port for advertisement (7001 for SNI)
|
||||
RQLiteRaftInternalPort int // Internal Raft port for local binding (7002 when SNI enabled)
|
||||
RQLiteJoinAddress string // Optional: join address for joining existing cluster
|
||||
BootstrapPeers []string // List of peer multiaddrs to connect to
|
||||
ClusterAPIPort int
|
||||
IPFSAPIPort int // Default: 4501
|
||||
HTTPAdvAddress string // Advertised HTTP address (IP:port)
|
||||
RaftAdvAddress string // Advertised Raft address (IP:port or domain:port for SNI)
|
||||
UnifiedGatewayPort int // Unified gateway port for all node services
|
||||
Domain string // Domain for this node (e.g., node-123.orama.network)
|
||||
EnableHTTPS bool // Enable HTTPS/TLS with ACME
|
||||
TLSCacheDir string // Directory for ACME certificate cache
|
||||
HTTPPort int // HTTP port for ACME challenges (usually 80)
|
||||
HTTPSPort int // HTTPS port (usually 443)
|
||||
|
||||
// Node-to-node TLS encryption for RQLite Raft communication
|
||||
// Required when using SNI gateway for Raft traffic routing
|
||||
NodeCert string // Path to X.509 certificate for node-to-node communication
|
||||
NodeKey string // Path to X.509 private key for node-to-node communication
|
||||
NodeCACert string // Path to CA certificate (optional)
|
||||
NodeNoVerify bool // Skip certificate verification (for self-signed certs)
|
||||
}
|
||||
|
||||
// GatewayConfigData holds parameters for gateway.yaml rendering
|
||||
@ -50,63 +55,46 @@ type GatewayConfigData struct {
|
||||
|
||||
// OlricConfigData holds parameters for olric.yaml rendering
|
||||
type OlricConfigData struct {
|
||||
BindAddr string
|
||||
HTTPPort int
|
||||
MemberlistPort int
|
||||
ServerBindAddr string // HTTP API bind address (127.0.0.1 for security)
|
||||
HTTPPort int
|
||||
MemberlistBindAddr string // Memberlist bind address (0.0.0.0 for clustering)
|
||||
MemberlistPort int
|
||||
MemberlistEnvironment string // "local", "lan", or "wan"
|
||||
}
|
||||
|
||||
// SystemdIPFSData holds parameters for systemd IPFS service rendering
|
||||
type SystemdIPFSData struct {
|
||||
NodeType string
|
||||
HomeDir string
|
||||
IPFSRepoPath string
|
||||
SecretsDir string
|
||||
DebrosDir string
|
||||
OramaDir string
|
||||
}
|
||||
|
||||
// SystemdIPFSClusterData holds parameters for systemd IPFS Cluster service rendering
|
||||
type SystemdIPFSClusterData struct {
|
||||
NodeType string
|
||||
HomeDir string
|
||||
ClusterPath string
|
||||
DebrosDir string
|
||||
}
|
||||
|
||||
// SystemdRQLiteData holds parameters for systemd RQLite service rendering
|
||||
type SystemdRQLiteData struct {
|
||||
NodeType string
|
||||
HomeDir string
|
||||
HTTPPort int
|
||||
RaftPort int
|
||||
DataDir string
|
||||
JoinAddr string
|
||||
DebrosDir string
|
||||
OramaDir string
|
||||
}
|
||||
|
||||
// SystemdOlricData holds parameters for systemd Olric service rendering
|
||||
type SystemdOlricData struct {
|
||||
HomeDir string
|
||||
ConfigPath string
|
||||
DebrosDir string
|
||||
OramaDir string
|
||||
}
|
||||
|
||||
// SystemdNodeData holds parameters for systemd Node service rendering
|
||||
type SystemdNodeData struct {
|
||||
NodeType string
|
||||
HomeDir string
|
||||
ConfigFile string
|
||||
DebrosDir string
|
||||
OramaDir string
|
||||
}
|
||||
|
||||
// SystemdGatewayData holds parameters for systemd Gateway service rendering
|
||||
type SystemdGatewayData struct {
|
||||
HomeDir string
|
||||
DebrosDir string
|
||||
}
|
||||
|
||||
// RenderBootstrapConfig renders the bootstrap config template with the given data
|
||||
func RenderBootstrapConfig(data BootstrapConfigData) (string, error) {
|
||||
return renderTemplate("bootstrap.yaml", data)
|
||||
OramaDir string
|
||||
}
|
||||
|
||||
// RenderNodeConfig renders the node config template with the given data
|
||||
@ -134,11 +122,6 @@ func RenderIPFSClusterService(data SystemdIPFSClusterData) (string, error) {
|
||||
return renderTemplate("systemd_ipfs_cluster.service", data)
|
||||
}
|
||||
|
||||
// RenderRQLiteService renders the RQLite systemd service template
|
||||
func RenderRQLiteService(data SystemdRQLiteData) (string, error) {
|
||||
return renderTemplate("systemd_rqlite.service", data)
|
||||
}
|
||||
|
||||
// RenderOlricService renders the Olric systemd service template
|
||||
func RenderOlricService(data SystemdOlricData) (string, error) {
|
||||
return renderTemplate("systemd_olric.service", data)
|
||||
|
||||
@ -5,46 +5,12 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRenderBootstrapConfig(t *testing.T) {
|
||||
data := BootstrapConfigData{
|
||||
NodeID: "bootstrap",
|
||||
P2PPort: 4001,
|
||||
DataDir: "/home/debros/.debros/bootstrap",
|
||||
RQLiteHTTPPort: 5001,
|
||||
RQLiteRaftPort: 7001,
|
||||
ClusterAPIPort: 9094,
|
||||
IPFSAPIPort: 5001,
|
||||
}
|
||||
|
||||
result, err := RenderBootstrapConfig(data)
|
||||
if err != nil {
|
||||
t.Fatalf("RenderBootstrapConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Check for required fields
|
||||
checks := []string{
|
||||
"id: \"bootstrap\"",
|
||||
"type: \"bootstrap\"",
|
||||
"tcp/4001",
|
||||
"rqlite_port: 5001",
|
||||
"rqlite_raft_port: 7001",
|
||||
"cluster_api_url: \"http://localhost:9094\"",
|
||||
"api_url: \"http://localhost:5001\"",
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
if !strings.Contains(result, check) {
|
||||
t.Errorf("Bootstrap config missing: %s", check)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderNodeConfig(t *testing.T) {
|
||||
bootstrapMultiaddr := "/ip4/127.0.0.1/tcp/4001/p2p/Qm1234567890"
|
||||
data := NodeConfigData{
|
||||
NodeID: "node2",
|
||||
P2PPort: 4002,
|
||||
DataDir: "/home/debros/.debros/node2",
|
||||
DataDir: "/home/debros/.orama/node2",
|
||||
RQLiteHTTPPort: 5002,
|
||||
RQLiteRaftPort: 7002,
|
||||
RQLiteJoinAddress: "localhost:5001",
|
||||
@ -61,10 +27,8 @@ func TestRenderNodeConfig(t *testing.T) {
|
||||
// Check for required fields
|
||||
checks := []string{
|
||||
"id: \"node2\"",
|
||||
"type: \"node\"",
|
||||
"tcp/4002",
|
||||
"rqlite_port: 5002",
|
||||
"rqlite_raft_port: 7002",
|
||||
"rqlite_join_address: \"localhost:5001\"",
|
||||
bootstrapMultiaddr,
|
||||
"cluster_api_url: \"http://localhost:9104\"",
|
||||
@ -110,9 +74,11 @@ func TestRenderGatewayConfig(t *testing.T) {
|
||||
|
||||
func TestRenderOlricConfig(t *testing.T) {
|
||||
data := OlricConfigData{
|
||||
BindAddr: "127.0.0.1",
|
||||
HTTPPort: 3320,
|
||||
MemberlistPort: 3322,
|
||||
ServerBindAddr: "127.0.0.1",
|
||||
HTTPPort: 3320,
|
||||
MemberlistBindAddr: "0.0.0.0",
|
||||
MemberlistPort: 3322,
|
||||
MemberlistEnvironment: "lan",
|
||||
}
|
||||
|
||||
result, err := RenderOlricConfig(data)
|
||||
@ -126,6 +92,7 @@ func TestRenderOlricConfig(t *testing.T) {
|
||||
"bindPort: 3320",
|
||||
"memberlist",
|
||||
"bindPort: 3322",
|
||||
"environment: lan",
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
[Unit]
|
||||
Description=DeBros Gateway
|
||||
After=debros-node-node.service
|
||||
Wants=debros-node-node.service
|
||||
After=debros-node.service
|
||||
Wants=debros-node.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
@ -9,7 +9,7 @@ User=debros
|
||||
Group=debros
|
||||
WorkingDirectory={{.HomeDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
ExecStart={{.HomeDir}}/bin/gateway --config {{.DebrosDir}}/configs/gateway.yaml
|
||||
ExecStart={{.HomeDir}}/bin/gateway --config {{.OramaDir}}/data/gateway.yaml
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
@ -22,7 +22,7 @@ CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
ReadWritePaths={{.OramaDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@ -20,7 +20,7 @@ SyslogIdentifier=ipfs-{{.NodeType}}
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
ReadWritePaths={{.OramaDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@ -21,7 +21,7 @@ SyslogIdentifier=ipfs-cluster-{{.NodeType}}
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
ReadWritePaths={{.OramaDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@ -10,7 +10,7 @@ User=debros
|
||||
Group=debros
|
||||
WorkingDirectory={{.HomeDir}}
|
||||
Environment=HOME={{.HomeDir}}
|
||||
ExecStart={{.HomeDir}}/bin/node --config {{.DebrosDir}}/configs/{{.ConfigFile}}
|
||||
ExecStart={{.HomeDir}}/bin/orama-node --config {{.OramaDir}}/configs/{{.ConfigFile}}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
@ -20,7 +20,7 @@ SyslogIdentifier=debros-node-{{.NodeType}}
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
ReadWritePaths={{.OramaDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@ -19,7 +19,7 @@ SyslogIdentifier=olric
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
ReadWritePaths={{.OramaDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@ -1,25 +0,0 @@
|
||||
[Unit]
|
||||
Description=RQLite Database ({{.NodeType}})
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=debros
|
||||
Group=debros
|
||||
Environment=HOME={{.HomeDir}}
|
||||
ExecStart=/usr/local/bin/rqlited -http-addr 0.0.0.0:{{.HTTPPort}} -http-adv-addr 127.0.0.1:{{.HTTPPort}} -raft-adv-addr 127.0.0.1:{{.RaftPort}} -raft-addr 0.0.0.0:{{.RaftPort}}{{if .JoinAddr}} -join {{.JoinAddr}} -join-attempts 30 -join-interval 10s{{end}} {{.DataDir}}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=rqlite-{{.NodeType}}
|
||||
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ReadWritePaths={{.DebrosDir}}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@ -114,9 +114,11 @@ func (g *Gateway) challengeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
nsID := nres.Rows[0][0]
|
||||
|
||||
// Store nonce with 5 minute expiry
|
||||
// Normalize wallet address to lowercase for case-insensitive comparison
|
||||
walletLower := strings.ToLower(strings.TrimSpace(req.Wallet))
|
||||
if _, err := db.Query(internalCtx,
|
||||
"INSERT INTO nonces(namespace_id, wallet, nonce, purpose, expires_at) VALUES (?, ?, ?, ?, datetime('now', '+5 minutes'))",
|
||||
nsID, req.Wallet, nonce, req.Purpose,
|
||||
nsID, walletLower, nonce, req.Purpose,
|
||||
); err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
@ -171,8 +173,10 @@ func (g *Gateway) verifyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
q := "SELECT id FROM nonces WHERE namespace_id = ? AND wallet = ? AND nonce = ? AND used_at IS NULL AND (expires_at IS NULL OR expires_at > datetime('now')) LIMIT 1"
|
||||
nres, err := db.Query(internalCtx, q, nsID, req.Wallet, req.Nonce)
|
||||
// Normalize wallet address to lowercase for case-insensitive comparison
|
||||
walletLower := strings.ToLower(strings.TrimSpace(req.Wallet))
|
||||
q := "SELECT id FROM nonces WHERE namespace_id = ? AND LOWER(wallet) = LOWER(?) AND nonce = ? AND used_at IS NULL AND (expires_at IS NULL OR expires_at > datetime('now')) LIMIT 1"
|
||||
nres, err := db.Query(internalCtx, q, nsID, walletLower, req.Nonce)
|
||||
if err != nil || nres == nil || nres.Count == 0 {
|
||||
writeError(w, http.StatusBadRequest, "invalid or expired nonce")
|
||||
return
|
||||
@ -395,8 +399,10 @@ func (g *Gateway) issueAPIKeyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
// Validate nonce exists and not used/expired
|
||||
q := "SELECT id FROM nonces WHERE namespace_id = ? AND wallet = ? AND nonce = ? AND used_at IS NULL AND (expires_at IS NULL OR expires_at > datetime('now')) LIMIT 1"
|
||||
nres, err := db.Query(internalCtx, q, nsID, req.Wallet, req.Nonce)
|
||||
// Normalize wallet address to lowercase for case-insensitive comparison
|
||||
walletLower := strings.ToLower(strings.TrimSpace(req.Wallet))
|
||||
q := "SELECT id FROM nonces WHERE namespace_id = ? AND LOWER(wallet) = LOWER(?) AND nonce = ? AND used_at IS NULL AND (expires_at IS NULL OR expires_at > datetime('now')) LIMIT 1"
|
||||
nres, err := db.Query(internalCtx, q, nsID, walletLower, req.Nonce)
|
||||
if err != nil || nres == nil || nres.Count == 0 {
|
||||
writeError(w, http.StatusBadRequest, "invalid or expired nonce")
|
||||
return
|
||||
@ -1125,6 +1131,108 @@ func (g *Gateway) logoutHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, http.StatusBadRequest, "nothing to revoke: provide refresh_token or all=true")
|
||||
}
|
||||
|
||||
// simpleAPIKeyHandler creates an API key directly from a wallet address without signature verification
|
||||
// This is a simplified flow for development/testing
|
||||
// Requires: POST { wallet, namespace }
|
||||
func (g *Gateway) simpleAPIKeyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if g.client == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "client not initialized")
|
||||
return
|
||||
}
|
||||
if r.Method != http.MethodPost {
|
||||
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
|
||||
return
|
||||
}
|
||||
|
||||
var req struct {
|
||||
Wallet string `json:"wallet"`
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid json body")
|
||||
return
|
||||
}
|
||||
|
||||
if strings.TrimSpace(req.Wallet) == "" {
|
||||
writeError(w, http.StatusBadRequest, "wallet is required")
|
||||
return
|
||||
}
|
||||
|
||||
ns := strings.TrimSpace(req.Namespace)
|
||||
if ns == "" {
|
||||
ns = strings.TrimSpace(g.cfg.ClientNamespace)
|
||||
if ns == "" {
|
||||
ns = "default"
|
||||
}
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
internalCtx := client.WithInternalAuth(ctx)
|
||||
db := g.client.Database()
|
||||
|
||||
// Resolve or create namespace
|
||||
if _, err := db.Query(internalCtx, "INSERT OR IGNORE INTO namespaces(name) VALUES (?)", ns); err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
nres, err := db.Query(internalCtx, "SELECT id FROM namespaces WHERE name = ? LIMIT 1", ns)
|
||||
if err != nil || nres == nil || nres.Count == 0 || len(nres.Rows) == 0 || len(nres.Rows[0]) == 0 {
|
||||
writeError(w, http.StatusInternalServerError, "failed to resolve namespace")
|
||||
return
|
||||
}
|
||||
nsID := nres.Rows[0][0]
|
||||
|
||||
// Check if api key already exists for (namespace, wallet)
|
||||
var apiKey string
|
||||
r1, err := db.Query(internalCtx,
|
||||
"SELECT api_keys.key FROM wallet_api_keys JOIN api_keys ON wallet_api_keys.api_key_id = api_keys.id WHERE wallet_api_keys.namespace_id = ? AND LOWER(wallet_api_keys.wallet) = LOWER(?) LIMIT 1",
|
||||
nsID, req.Wallet,
|
||||
)
|
||||
if err == nil && r1 != nil && r1.Count > 0 && len(r1.Rows) > 0 && len(r1.Rows[0]) > 0 {
|
||||
if s, ok := r1.Rows[0][0].(string); ok {
|
||||
apiKey = s
|
||||
} else {
|
||||
b, _ := json.Marshal(r1.Rows[0][0])
|
||||
_ = json.Unmarshal(b, &apiKey)
|
||||
}
|
||||
}
|
||||
|
||||
// If no existing key, create a new one
|
||||
if strings.TrimSpace(apiKey) == "" {
|
||||
buf := make([]byte, 18)
|
||||
if _, err := rand.Read(buf); err != nil {
|
||||
writeError(w, http.StatusInternalServerError, "failed to generate api key")
|
||||
return
|
||||
}
|
||||
apiKey = "ak_" + base64.RawURLEncoding.EncodeToString(buf) + ":" + ns
|
||||
|
||||
if _, err := db.Query(internalCtx, "INSERT INTO api_keys(key, name, namespace_id) VALUES (?, ?, ?)", apiKey, "", nsID); err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Link wallet to api key
|
||||
rid, err := db.Query(internalCtx, "SELECT id FROM api_keys WHERE key = ? LIMIT 1", apiKey)
|
||||
if err == nil && rid != nil && rid.Count > 0 && len(rid.Rows) > 0 && len(rid.Rows[0]) > 0 {
|
||||
apiKeyID := rid.Rows[0][0]
|
||||
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO wallet_api_keys(namespace_id, wallet, api_key_id) VALUES (?, ?, ?)", nsID, strings.ToLower(req.Wallet), apiKeyID)
|
||||
}
|
||||
}
|
||||
|
||||
// Record ownerships (best-effort)
|
||||
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO namespace_ownership(namespace_id, owner_type, owner_id) VALUES (?, 'api_key', ?)", nsID, apiKey)
|
||||
_, _ = db.Query(internalCtx, "INSERT OR IGNORE INTO namespace_ownership(namespace_id, owner_type, owner_id) VALUES (?, 'wallet', ?)", nsID, req.Wallet)
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"api_key": apiKey,
|
||||
"namespace": ns,
|
||||
"wallet": strings.ToLower(strings.TrimPrefix(strings.TrimPrefix(req.Wallet, "0x"), "0X")),
|
||||
"created": time.Now().Format(time.RFC3339),
|
||||
})
|
||||
}
|
||||
|
||||
// base58Decode decodes a base58-encoded string (Bitcoin alphabet)
|
||||
// Used for decoding Solana public keys (base58-encoded 32-byte ed25519 public keys)
|
||||
func base58Decode(encoded string) ([]byte, error) {
|
||||
|
||||
@ -3,6 +3,7 @@ package gateway
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -16,7 +17,8 @@ import (
|
||||
// Cache HTTP handlers for Olric distributed cache
|
||||
|
||||
func (g *Gateway) cacheHealthHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if g.olricClient == nil {
|
||||
client := g.getOlricClient()
|
||||
if client == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||
return
|
||||
}
|
||||
@ -24,7 +26,7 @@ func (g *Gateway) cacheHealthHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := g.olricClient.Health(ctx)
|
||||
err := client.Health(ctx)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusServiceUnavailable, fmt.Sprintf("cache health check failed: %v", err))
|
||||
return
|
||||
@ -37,7 +39,8 @@ func (g *Gateway) cacheHealthHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if g.olricClient == nil {
|
||||
client := g.getOlricClient()
|
||||
if client == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||
return
|
||||
}
|
||||
@ -65,8 +68,8 @@ func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client := g.olricClient.GetClient()
|
||||
dm, err := client.NewDMap(req.DMap)
|
||||
olricCluster := client.GetClient()
|
||||
dm, err := olricCluster.NewDMap(req.DMap)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||
return
|
||||
@ -74,7 +77,8 @@ func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
gr, err := dm.Get(ctx, req.Key)
|
||||
if err != nil {
|
||||
if err == olriclib.ErrKeyNotFound {
|
||||
// Check for key not found error - handle both wrapped and direct errors
|
||||
if errors.Is(err, olriclib.ErrKeyNotFound) || err.Error() == "key not found" || strings.Contains(err.Error(), "key not found") {
|
||||
writeError(w, http.StatusNotFound, "key not found")
|
||||
return
|
||||
}
|
||||
@ -140,7 +144,8 @@ func decodeValueFromOlric(gr *olriclib.GetResponse) (any, error) {
|
||||
}
|
||||
|
||||
func (g *Gateway) cacheMultiGetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if g.olricClient == nil {
|
||||
client := g.getOlricClient()
|
||||
if client == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||
return
|
||||
}
|
||||
@ -173,8 +178,8 @@ func (g *Gateway) cacheMultiGetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client := g.olricClient.GetClient()
|
||||
dm, err := client.NewDMap(req.DMap)
|
||||
olricCluster := client.GetClient()
|
||||
dm, err := olricCluster.NewDMap(req.DMap)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||
return
|
||||
@ -218,7 +223,8 @@ func (g *Gateway) cacheMultiGetHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (g *Gateway) cachePutHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if g.olricClient == nil {
|
||||
client := g.getOlricClient()
|
||||
if client == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||
return
|
||||
}
|
||||
@ -253,8 +259,8 @@ func (g *Gateway) cachePutHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client := g.olricClient.GetClient()
|
||||
dm, err := client.NewDMap(req.DMap)
|
||||
olricCluster := client.GetClient()
|
||||
dm, err := olricCluster.NewDMap(req.DMap)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||
return
|
||||
@ -335,7 +341,8 @@ func (g *Gateway) cachePutHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if g.olricClient == nil {
|
||||
client := g.getOlricClient()
|
||||
if client == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||
return
|
||||
}
|
||||
@ -363,8 +370,8 @@ func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client := g.olricClient.GetClient()
|
||||
dm, err := client.NewDMap(req.DMap)
|
||||
olricCluster := client.GetClient()
|
||||
dm, err := olricCluster.NewDMap(req.DMap)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||
return
|
||||
@ -372,7 +379,8 @@ func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
deletedCount, err := dm.Delete(ctx, req.Key)
|
||||
if err != nil {
|
||||
if err == olriclib.ErrKeyNotFound {
|
||||
// Check for key not found error - handle both wrapped and direct errors
|
||||
if errors.Is(err, olriclib.ErrKeyNotFound) || err.Error() == "key not found" || strings.Contains(err.Error(), "key not found") {
|
||||
writeError(w, http.StatusNotFound, "key not found")
|
||||
return
|
||||
}
|
||||
@ -392,7 +400,8 @@ func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (g *Gateway) cacheScanHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if g.olricClient == nil {
|
||||
client := g.getOlricClient()
|
||||
if client == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "Olric cache client not initialized")
|
||||
return
|
||||
}
|
||||
@ -420,8 +429,8 @@ func (g *Gateway) cacheScanHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
client := g.olricClient.GetClient()
|
||||
dm, err := client.NewDMap(req.DMap)
|
||||
olricCluster := client.GetClient()
|
||||
dm, err := olricCluster.NewDMap(req.DMap)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to create DMap: %v", err))
|
||||
return
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -25,11 +26,18 @@ import (
|
||||
_ "github.com/rqlite/gorqlite/stdlib"
|
||||
)
|
||||
|
||||
const (
|
||||
olricInitMaxAttempts = 5
|
||||
olricInitInitialBackoff = 500 * time.Millisecond
|
||||
olricInitMaxBackoff = 5 * time.Second
|
||||
)
|
||||
|
||||
// Config holds configuration for the gateway server
|
||||
type Config struct {
|
||||
ListenAddr string
|
||||
ClientNamespace string
|
||||
BootstrapPeers []string
|
||||
NodePeerID string // The node's actual peer ID from its identity file
|
||||
|
||||
// Optional DSN for rqlite database/sql driver, e.g. "http://localhost:4001"
|
||||
// If empty, defaults to "http://localhost:4001".
|
||||
@ -38,7 +46,7 @@ type Config struct {
|
||||
// HTTPS configuration
|
||||
EnableHTTPS bool // Enable HTTPS with ACME (Let's Encrypt)
|
||||
DomainName string // Domain name for HTTPS certificate
|
||||
TLSCacheDir string // Directory to cache TLS certificates (default: ~/.debros/tls-cache)
|
||||
TLSCacheDir string // Directory to cache TLS certificates (default: ~/.orama/tls-cache)
|
||||
|
||||
// Olric cache configuration
|
||||
OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"]
|
||||
@ -53,12 +61,13 @@ type Config struct {
|
||||
}
|
||||
|
||||
type Gateway struct {
|
||||
logger *logging.ColoredLogger
|
||||
cfg *Config
|
||||
client client.NetworkClient
|
||||
startedAt time.Time
|
||||
signingKey *rsa.PrivateKey
|
||||
keyID string
|
||||
logger *logging.ColoredLogger
|
||||
cfg *Config
|
||||
client client.NetworkClient
|
||||
nodePeerID string // The node's actual peer ID from its identity file (overrides client's peer ID)
|
||||
startedAt time.Time
|
||||
signingKey *rsa.PrivateKey
|
||||
keyID string
|
||||
|
||||
// rqlite SQL connection and HTTP ORM gateway
|
||||
sqlDB *sql.DB
|
||||
@ -67,6 +76,7 @@ type Gateway struct {
|
||||
|
||||
// Olric cache client
|
||||
olricClient *olric.Client
|
||||
olricMu sync.RWMutex
|
||||
|
||||
// IPFS storage client
|
||||
ipfsClient ipfs.IPFSClient
|
||||
@ -107,7 +117,7 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
|
||||
|
||||
logger.ComponentInfo(logging.ComponentClient, "Network client connected",
|
||||
zap.String("namespace", cliCfg.AppName),
|
||||
zap.Int("bootstrap_peer_count", len(cliCfg.BootstrapPeers)),
|
||||
zap.Int("peer_count", len(cliCfg.BootstrapPeers)),
|
||||
)
|
||||
|
||||
logger.ComponentInfo(logging.ComponentGeneral, "Creating gateway instance...")
|
||||
@ -115,6 +125,7 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
|
||||
logger: logger,
|
||||
cfg: cfg,
|
||||
client: c,
|
||||
nodePeerID: cfg.NodePeerID,
|
||||
startedAt: time.Now(),
|
||||
localSubscribers: make(map[string][]*localSubscriber),
|
||||
}
|
||||
@ -182,11 +193,12 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
|
||||
Servers: olricServers,
|
||||
Timeout: cfg.OlricTimeout,
|
||||
}
|
||||
olricClient, olricErr := olric.NewClient(olricCfg, logger.Logger)
|
||||
olricClient, olricErr := initializeOlricClientWithRetry(olricCfg, logger)
|
||||
if olricErr != nil {
|
||||
logger.ComponentWarn(logging.ComponentGeneral, "failed to initialize Olric cache client; cache endpoints disabled", zap.Error(olricErr))
|
||||
gw.startOlricReconnectLoop(olricCfg)
|
||||
} else {
|
||||
gw.olricClient = olricClient
|
||||
gw.setOlricClient(olricClient)
|
||||
logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client ready",
|
||||
zap.Strings("servers", olricCfg.Servers),
|
||||
zap.Duration("timeout", olricCfg.Timeout),
|
||||
@ -305,10 +317,10 @@ func (g *Gateway) Close() {
|
||||
if g.sqlDB != nil {
|
||||
_ = g.sqlDB.Close()
|
||||
}
|
||||
if g.olricClient != nil {
|
||||
if client := g.getOlricClient(); client != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := g.olricClient.Close(ctx); err != nil {
|
||||
if err := client.Close(ctx); err != nil {
|
||||
g.logger.ComponentWarn(logging.ComponentGeneral, "error during Olric client close", zap.Error(err))
|
||||
}
|
||||
}
|
||||
@ -330,6 +342,78 @@ func (g *Gateway) getLocalSubscribers(topic, namespace string) []*localSubscribe
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Gateway) setOlricClient(client *olric.Client) {
|
||||
g.olricMu.Lock()
|
||||
defer g.olricMu.Unlock()
|
||||
g.olricClient = client
|
||||
}
|
||||
|
||||
func (g *Gateway) getOlricClient() *olric.Client {
|
||||
g.olricMu.RLock()
|
||||
defer g.olricMu.RUnlock()
|
||||
return g.olricClient
|
||||
}
|
||||
|
||||
func (g *Gateway) startOlricReconnectLoop(cfg olric.Config) {
|
||||
go func() {
|
||||
retryDelay := 5 * time.Second
|
||||
for {
|
||||
client, err := initializeOlricClientWithRetry(cfg, g.logger)
|
||||
if err == nil {
|
||||
g.setOlricClient(client)
|
||||
g.logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client connected after background retries",
|
||||
zap.Strings("servers", cfg.Servers),
|
||||
zap.Duration("timeout", cfg.Timeout))
|
||||
return
|
||||
}
|
||||
|
||||
g.logger.ComponentWarn(logging.ComponentGeneral, "Olric cache client reconnect failed",
|
||||
zap.Duration("retry_in", retryDelay),
|
||||
zap.Error(err))
|
||||
|
||||
time.Sleep(retryDelay)
|
||||
if retryDelay < olricInitMaxBackoff {
|
||||
retryDelay *= 2
|
||||
if retryDelay > olricInitMaxBackoff {
|
||||
retryDelay = olricInitMaxBackoff
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func initializeOlricClientWithRetry(cfg olric.Config, logger *logging.ColoredLogger) (*olric.Client, error) {
|
||||
backoff := olricInitInitialBackoff
|
||||
|
||||
for attempt := 1; attempt <= olricInitMaxAttempts; attempt++ {
|
||||
client, err := olric.NewClient(cfg, logger.Logger)
|
||||
if err == nil {
|
||||
if attempt > 1 {
|
||||
logger.ComponentInfo(logging.ComponentGeneral, "Olric cache client initialized after retries",
|
||||
zap.Int("attempts", attempt))
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
logger.ComponentWarn(logging.ComponentGeneral, "Olric cache client init attempt failed",
|
||||
zap.Int("attempt", attempt),
|
||||
zap.Duration("retry_in", backoff),
|
||||
zap.Error(err))
|
||||
|
||||
if attempt == olricInitMaxAttempts {
|
||||
return nil, fmt.Errorf("failed to initialize Olric cache client after %d attempts: %w", attempt, err)
|
||||
}
|
||||
|
||||
time.Sleep(backoff)
|
||||
backoff *= 2
|
||||
if backoff > olricInitMaxBackoff {
|
||||
backoff = olricInitMaxBackoff
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to initialize Olric cache client")
|
||||
}
|
||||
|
||||
// discoverOlricServers discovers Olric server addresses from LibP2P peers
|
||||
// Returns a list of IP:port addresses where Olric servers are expected to run (port 3320)
|
||||
func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger) []string {
|
||||
@ -384,10 +468,10 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger
|
||||
}
|
||||
}
|
||||
|
||||
// Also check bootstrap peers from config
|
||||
// Also check peers from config
|
||||
if cfg := networkClient.Config(); cfg != nil {
|
||||
for _, bootstrapAddr := range cfg.BootstrapPeers {
|
||||
ma, err := multiaddr.NewMultiaddr(bootstrapAddr)
|
||||
for _, peerAddr := range cfg.BootstrapPeers {
|
||||
ma, err := multiaddr.NewMultiaddr(peerAddr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@ -433,7 +517,7 @@ type ipfsDiscoveryResult struct {
|
||||
}
|
||||
|
||||
// discoverIPFSFromNodeConfigs discovers IPFS configuration from node.yaml files
|
||||
// Checks bootstrap.yaml first, then node.yaml, node2.yaml, etc.
|
||||
// Checks node-1.yaml through node-5.yaml for IPFS configuration
|
||||
func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
@ -441,10 +525,10 @@ func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
|
||||
return ipfsDiscoveryResult{}
|
||||
}
|
||||
|
||||
configDir := filepath.Join(homeDir, ".debros")
|
||||
configDir := filepath.Join(homeDir, ".orama")
|
||||
|
||||
// Try bootstrap.yaml first, then node.yaml, node2.yaml, etc.
|
||||
configFiles := []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"}
|
||||
// Try all node config files for IPFS settings
|
||||
configFiles := []string{"node-1.yaml", "node-2.yaml", "node-3.yaml", "node-4.yaml", "node-5.yaml"}
|
||||
|
||||
for _, filename := range configFiles {
|
||||
configPath := filepath.Join(configDir, filename)
|
||||
|
||||
257
pkg/gateway/http_gateway.go
Normal file
257
pkg/gateway/http_gateway.go
Normal file
@ -0,0 +1,257 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
)
|
||||
|
||||
// HTTPGateway is the main reverse proxy router
|
||||
type HTTPGateway struct {
|
||||
logger *logging.ColoredLogger
|
||||
config *config.HTTPGatewayConfig
|
||||
router chi.Router
|
||||
reverseProxies map[string]*httputil.ReverseProxy
|
||||
mu sync.RWMutex
|
||||
server *http.Server
|
||||
}
|
||||
|
||||
// NewHTTPGateway creates a new HTTP reverse proxy gateway
|
||||
func NewHTTPGateway(logger *logging.ColoredLogger, cfg *config.HTTPGatewayConfig) (*HTTPGateway, error) {
|
||||
if !cfg.Enabled {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
var err error
|
||||
logger, err = logging.NewColoredLogger(logging.ComponentGeneral, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create logger: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
gateway := &HTTPGateway{
|
||||
logger: logger,
|
||||
config: cfg,
|
||||
router: chi.NewRouter(),
|
||||
reverseProxies: make(map[string]*httputil.ReverseProxy),
|
||||
}
|
||||
|
||||
// Set up router middleware
|
||||
gateway.router.Use(middleware.RequestID)
|
||||
gateway.router.Use(middleware.Logger)
|
||||
gateway.router.Use(middleware.Recoverer)
|
||||
gateway.router.Use(middleware.Timeout(30 * time.Second))
|
||||
|
||||
// Add health check endpoint
|
||||
gateway.router.Get("/health", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, `{"status":"ok","node":"%s"}`, cfg.NodeName)
|
||||
})
|
||||
|
||||
// Initialize reverse proxies and routes
|
||||
if err := gateway.initializeRoutes(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize routes: %w", err)
|
||||
}
|
||||
|
||||
gateway.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway initialized",
|
||||
zap.String("node_name", cfg.NodeName),
|
||||
zap.String("listen_addr", cfg.ListenAddr),
|
||||
zap.Int("routes", len(cfg.Routes)),
|
||||
)
|
||||
|
||||
return gateway, nil
|
||||
}
|
||||
|
||||
// initializeRoutes sets up all reverse proxy routes
|
||||
func (hg *HTTPGateway) initializeRoutes() error {
|
||||
hg.mu.Lock()
|
||||
defer hg.mu.Unlock()
|
||||
|
||||
for routeName, routeConfig := range hg.config.Routes {
|
||||
// Validate backend URL
|
||||
_, err := url.Parse(routeConfig.BackendURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid backend URL for route %s: %w", routeName, err)
|
||||
}
|
||||
|
||||
// Create reverse proxy with custom transport
|
||||
proxy := &httputil.ReverseProxy{
|
||||
Rewrite: func(r *httputil.ProxyRequest) {
|
||||
// Keep original host for Host header
|
||||
r.Out.Host = r.In.Host
|
||||
// Set X-Forwarded-For header for logging
|
||||
r.Out.Header.Set("X-Forwarded-For", getClientIP(r.In))
|
||||
},
|
||||
ErrorHandler: hg.proxyErrorHandler(routeName),
|
||||
}
|
||||
|
||||
// Set timeout on transport
|
||||
if routeConfig.Timeout > 0 {
|
||||
proxy.Transport = &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: routeConfig.Timeout,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: routeConfig.Timeout,
|
||||
}
|
||||
}
|
||||
|
||||
hg.reverseProxies[routeName] = proxy
|
||||
|
||||
// Register route handler
|
||||
hg.registerRouteHandler(routeName, routeConfig, proxy)
|
||||
|
||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "Route initialized",
|
||||
zap.String("name", routeName),
|
||||
zap.String("path", routeConfig.PathPrefix),
|
||||
zap.String("backend", routeConfig.BackendURL),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// registerRouteHandler registers a route handler with the router
|
||||
func (hg *HTTPGateway) registerRouteHandler(name string, routeConfig config.RouteConfig, proxy *httputil.ReverseProxy) {
|
||||
pathPrefix := strings.TrimSuffix(routeConfig.PathPrefix, "/")
|
||||
|
||||
// Use Mount instead of Route for wildcard path handling
|
||||
hg.router.Mount(pathPrefix, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
hg.handleProxyRequest(w, req, routeConfig, proxy)
|
||||
}))
|
||||
}
|
||||
|
||||
// handleProxyRequest handles a reverse proxy request
|
||||
func (hg *HTTPGateway) handleProxyRequest(w http.ResponseWriter, req *http.Request, routeConfig config.RouteConfig, proxy *httputil.ReverseProxy) {
|
||||
// Strip path prefix before forwarding
|
||||
originalPath := req.URL.Path
|
||||
pathPrefix := strings.TrimSuffix(routeConfig.PathPrefix, "/")
|
||||
|
||||
if strings.HasPrefix(req.URL.Path, pathPrefix) {
|
||||
// Remove the prefix but keep leading slash
|
||||
strippedPath := strings.TrimPrefix(req.URL.Path, pathPrefix)
|
||||
if strippedPath == "" {
|
||||
strippedPath = "/"
|
||||
}
|
||||
req.URL.Path = strippedPath
|
||||
}
|
||||
|
||||
// Update request URL to point to backend
|
||||
backendURL, _ := url.Parse(routeConfig.BackendURL)
|
||||
req.URL.Scheme = backendURL.Scheme
|
||||
req.URL.Host = backendURL.Host
|
||||
|
||||
// Log the proxy request
|
||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "Proxy request",
|
||||
zap.String("original_path", originalPath),
|
||||
zap.String("stripped_path", req.URL.Path),
|
||||
zap.String("backend", routeConfig.BackendURL),
|
||||
zap.String("method", req.Method),
|
||||
zap.String("client_ip", getClientIP(req)),
|
||||
)
|
||||
|
||||
// Handle WebSocket upgrades if configured
|
||||
if routeConfig.WebSocket && isWebSocketRequest(req) {
|
||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "WebSocket upgrade detected",
|
||||
zap.String("path", originalPath),
|
||||
)
|
||||
}
|
||||
|
||||
// Forward the request
|
||||
proxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
// proxyErrorHandler returns an error handler for the reverse proxy
|
||||
func (hg *HTTPGateway) proxyErrorHandler(routeName string) func(http.ResponseWriter, *http.Request, error) {
|
||||
return func(w http.ResponseWriter, r *http.Request, err error) {
|
||||
hg.logger.ComponentError(logging.ComponentGeneral, "Proxy error",
|
||||
zap.String("route", routeName),
|
||||
zap.String("path", r.URL.Path),
|
||||
zap.String("method", r.Method),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusBadGateway)
|
||||
fmt.Fprintf(w, `{"error":"gateway error","route":"%s","detail":"%s"}`, routeName, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the HTTP gateway server
|
||||
func (hg *HTTPGateway) Start(ctx context.Context) error {
|
||||
if hg == nil || !hg.config.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
hg.server = &http.Server{
|
||||
Addr: hg.config.ListenAddr,
|
||||
Handler: hg.router,
|
||||
}
|
||||
|
||||
// Listen for connections
|
||||
listener, err := net.Listen("tcp", hg.config.ListenAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen on %s: %w", hg.config.ListenAddr, err)
|
||||
}
|
||||
|
||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway server starting",
|
||||
zap.String("node_name", hg.config.NodeName),
|
||||
zap.String("listen_addr", hg.config.ListenAddr),
|
||||
)
|
||||
|
||||
// Serve in a goroutine
|
||||
go func() {
|
||||
if err := hg.server.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||
hg.logger.ComponentError(logging.ComponentGeneral, "HTTP Gateway server error", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for context cancellation
|
||||
<-ctx.Done()
|
||||
return hg.Stop()
|
||||
}
|
||||
|
||||
// Stop gracefully stops the HTTP gateway server
|
||||
func (hg *HTTPGateway) Stop() error {
|
||||
if hg == nil || hg.server == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway shutting down")
|
||||
|
||||
if err := hg.server.Shutdown(ctx); err != nil {
|
||||
hg.logger.ComponentError(logging.ComponentGeneral, "HTTP Gateway shutdown error", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
hg.logger.ComponentInfo(logging.ComponentGeneral, "HTTP Gateway shutdown complete")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Router returns the chi router for testing or extension
|
||||
func (hg *HTTPGateway) Router() chi.Router {
|
||||
return hg.router
|
||||
}
|
||||
|
||||
// isWebSocketRequest checks if a request is a WebSocket upgrade request
|
||||
func isWebSocketRequest(r *http.Request) bool {
|
||||
return r.Header.Get("Connection") == "Upgrade" &&
|
||||
r.Header.Get("Upgrade") == "websocket"
|
||||
}
|
||||
@ -1,11 +1,11 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type statusResponseWriter struct {
|
||||
@ -28,23 +28,23 @@ func (w *statusResponseWriter) Write(b []byte) (int, error) {
|
||||
// Ensure websocket upgrades work by preserving Hijacker/Flusher/Pusher
|
||||
// interfaces when the underlying ResponseWriter supports them.
|
||||
func (w *statusResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
if h, ok := w.ResponseWriter.(http.Hijacker); ok {
|
||||
return h.Hijack()
|
||||
}
|
||||
return nil, nil, fmt.Errorf("hijacker not supported")
|
||||
if h, ok := w.ResponseWriter.(http.Hijacker); ok {
|
||||
return h.Hijack()
|
||||
}
|
||||
return nil, nil, fmt.Errorf("hijacker not supported")
|
||||
}
|
||||
|
||||
func (w *statusResponseWriter) Flush() {
|
||||
if f, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
if f, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *statusResponseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
if p, ok := w.ResponseWriter.(http.Pusher); ok {
|
||||
return p.Push(target, opts)
|
||||
}
|
||||
return http.ErrNotSupported
|
||||
if p, ok := w.ResponseWriter.(http.Pusher); ok {
|
||||
return p.Push(target, opts)
|
||||
}
|
||||
return http.ErrNotSupported
|
||||
}
|
||||
|
||||
// writeJSON writes JSON with status code
|
||||
|
||||
237
pkg/gateway/https.go
Normal file
237
pkg/gateway/https.go
Normal file
@ -0,0 +1,237 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/crypto/acme"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
)
|
||||
|
||||
// HTTPSGateway extends HTTPGateway with HTTPS/TLS support
|
||||
type HTTPSGateway struct {
|
||||
*HTTPGateway
|
||||
httpsConfig *config.HTTPSConfig
|
||||
certManager *autocert.Manager
|
||||
httpsServer *http.Server
|
||||
httpServer *http.Server // For ACME challenge and redirect
|
||||
}
|
||||
|
||||
// NewHTTPSGateway creates a new HTTPS gateway with Let's Encrypt autocert
|
||||
func NewHTTPSGateway(logger *logging.ColoredLogger, cfg *config.HTTPGatewayConfig) (*HTTPSGateway, error) {
|
||||
// First create the base HTTP gateway
|
||||
base, err := NewHTTPGateway(logger, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if base == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if !cfg.HTTPS.Enabled {
|
||||
// Return base gateway wrapped in HTTPSGateway for consistent interface
|
||||
return &HTTPSGateway{HTTPGateway: base}, nil
|
||||
}
|
||||
|
||||
gateway := &HTTPSGateway{
|
||||
HTTPGateway: base,
|
||||
httpsConfig: &cfg.HTTPS,
|
||||
}
|
||||
|
||||
// Check if using self-signed certificates or Let's Encrypt
|
||||
if cfg.HTTPS.UseSelfSigned || (cfg.HTTPS.CertFile != "" && cfg.HTTPS.KeyFile != "") {
|
||||
// Using self-signed or pre-existing certificates
|
||||
logger.ComponentInfo(logging.ComponentGeneral, "Using self-signed or pre-configured certificates for HTTPS",
|
||||
zap.String("domain", cfg.HTTPS.Domain),
|
||||
zap.String("cert_file", cfg.HTTPS.CertFile),
|
||||
zap.String("key_file", cfg.HTTPS.KeyFile),
|
||||
)
|
||||
// Don't set certManager - will use CertFile/KeyFile from config
|
||||
} else if cfg.HTTPS.AutoCert {
|
||||
// Use Let's Encrypt STAGING (consistent with SNI gateway)
|
||||
cacheDir := cfg.HTTPS.CacheDir
|
||||
if cacheDir == "" {
|
||||
cacheDir = "/home/debros/.orama/tls-cache"
|
||||
}
|
||||
|
||||
// Use Let's Encrypt STAGING - provides higher rate limits for testing/development
|
||||
directoryURL := "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
logger.ComponentWarn(logging.ComponentGeneral,
|
||||
"Using Let's Encrypt STAGING - certificates will not be trusted by production clients",
|
||||
zap.String("domain", cfg.HTTPS.Domain),
|
||||
)
|
||||
|
||||
gateway.certManager = &autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: autocert.HostWhitelist(cfg.HTTPS.Domain),
|
||||
Cache: autocert.DirCache(cacheDir),
|
||||
Email: cfg.HTTPS.Email,
|
||||
Client: &acme.Client{
|
||||
DirectoryURL: directoryURL,
|
||||
},
|
||||
}
|
||||
|
||||
logger.ComponentInfo(logging.ComponentGeneral, "Let's Encrypt autocert configured",
|
||||
zap.String("domain", cfg.HTTPS.Domain),
|
||||
zap.String("cache_dir", cacheDir),
|
||||
zap.String("acme_environment", "staging"),
|
||||
)
|
||||
}
|
||||
|
||||
return gateway, nil
|
||||
}
|
||||
|
||||
// Start starts both HTTP (for ACME) and HTTPS servers
|
||||
func (g *HTTPSGateway) Start(ctx context.Context) error {
|
||||
if g == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If HTTPS is not enabled, just start the base HTTP gateway
|
||||
if !g.httpsConfig.Enabled {
|
||||
return g.HTTPGateway.Start(ctx)
|
||||
}
|
||||
|
||||
httpPort := g.httpsConfig.HTTPPort
|
||||
if httpPort == 0 {
|
||||
httpPort = 80
|
||||
}
|
||||
httpsPort := g.httpsConfig.HTTPSPort
|
||||
if httpsPort == 0 {
|
||||
httpsPort = 443
|
||||
}
|
||||
|
||||
// Start HTTP server for ACME challenge and redirect
|
||||
g.httpServer = &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", httpPort),
|
||||
Handler: g.httpHandler(),
|
||||
}
|
||||
|
||||
go func() {
|
||||
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTP server starting (ACME/redirect)",
|
||||
zap.Int("port", httpPort),
|
||||
)
|
||||
if err := g.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
g.logger.ComponentError(logging.ComponentGeneral, "HTTP server error", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// Set up TLS config
|
||||
tlsConfig := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
|
||||
if g.certManager != nil {
|
||||
tlsConfig.GetCertificate = g.certManager.GetCertificate
|
||||
} else if g.httpsConfig.CertFile != "" && g.httpsConfig.KeyFile != "" {
|
||||
cert, err := tls.LoadX509KeyPair(g.httpsConfig.CertFile, g.httpsConfig.KeyFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load TLS certificate: %w", err)
|
||||
}
|
||||
tlsConfig.Certificates = []tls.Certificate{cert}
|
||||
} else {
|
||||
return fmt.Errorf("HTTPS enabled but no certificate source configured")
|
||||
}
|
||||
|
||||
// Start HTTPS server
|
||||
g.httpsServer = &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", httpsPort),
|
||||
Handler: g.router,
|
||||
TLSConfig: tlsConfig,
|
||||
}
|
||||
|
||||
listener, err := tls.Listen("tcp", g.httpsServer.Addr, tlsConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create TLS listener: %w", err)
|
||||
}
|
||||
|
||||
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway starting",
|
||||
zap.String("domain", g.httpsConfig.Domain),
|
||||
zap.Int("port", httpsPort),
|
||||
)
|
||||
|
||||
go func() {
|
||||
if err := g.httpsServer.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||
g.logger.ComponentError(logging.ComponentGeneral, "HTTPS server error", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for context cancellation
|
||||
<-ctx.Done()
|
||||
return g.Stop()
|
||||
}
|
||||
|
||||
// httpHandler returns a handler for the HTTP server (ACME challenge + redirect)
|
||||
func (g *HTTPSGateway) httpHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Handle ACME challenge
|
||||
if g.certManager != nil && strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") {
|
||||
g.certManager.HTTPHandler(nil).ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Redirect HTTP to HTTPS
|
||||
httpsPort := g.httpsConfig.HTTPSPort
|
||||
if httpsPort == 0 {
|
||||
httpsPort = 443
|
||||
}
|
||||
|
||||
target := "https://" + r.Host + r.URL.RequestURI()
|
||||
if httpsPort != 443 {
|
||||
host := r.Host
|
||||
if idx := strings.LastIndex(host, ":"); idx > 0 {
|
||||
host = host[:idx]
|
||||
}
|
||||
target = fmt.Sprintf("https://%s:%d%s", host, httpsPort, r.URL.RequestURI())
|
||||
}
|
||||
|
||||
http.Redirect(w, r, target, http.StatusMovedPermanently)
|
||||
})
|
||||
}
|
||||
|
||||
// Stop gracefully stops both HTTP and HTTPS servers
|
||||
func (g *HTTPSGateway) Stop() error {
|
||||
if g == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway shutting down")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var errs []error
|
||||
|
||||
if g.httpServer != nil {
|
||||
if err := g.httpServer.Shutdown(ctx); err != nil {
|
||||
errs = append(errs, fmt.Errorf("HTTP server shutdown: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if g.httpsServer != nil {
|
||||
if err := g.httpsServer.Shutdown(ctx); err != nil {
|
||||
errs = append(errs, fmt.Errorf("HTTPS server shutdown: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if g.HTTPGateway.server != nil {
|
||||
if err := g.HTTPGateway.Stop(); err != nil {
|
||||
errs = append(errs, fmt.Errorf("base gateway shutdown: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("shutdown errors: %v", errs)
|
||||
}
|
||||
|
||||
g.logger.ComponentInfo(logging.ComponentGeneral, "HTTPS Gateway shutdown complete")
|
||||
return nil
|
||||
}
|
||||
@ -178,8 +178,13 @@ func extractAPIKey(r *http.Request) string {
|
||||
|
||||
// isPublicPath returns true for routes that should be accessible without API key auth
|
||||
func isPublicPath(p string) bool {
|
||||
// Allow ACME challenges for Let's Encrypt certificate provisioning
|
||||
if strings.HasPrefix(p, "/.well-known/acme-challenge/") {
|
||||
return true
|
||||
}
|
||||
|
||||
switch p {
|
||||
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key":
|
||||
case "/health", "/v1/health", "/status", "/v1/status", "/v1/auth/jwks", "/.well-known/jwks.json", "/v1/version", "/v1/auth/login", "/v1/auth/challenge", "/v1/auth/verify", "/v1/auth/register", "/v1/auth/refresh", "/v1/auth/logout", "/v1/auth/api-key", "/v1/auth/simple-key", "/v1/network/status", "/v1/network/peers":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
||||
@ -60,24 +60,24 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// Channel to deliver PubSub messages to WS writer
|
||||
msgs := make(chan []byte, 128)
|
||||
|
||||
|
||||
// NEW: Register as local subscriber for direct message delivery
|
||||
localSub := &localSubscriber{
|
||||
msgChan: msgs,
|
||||
namespace: ns,
|
||||
}
|
||||
topicKey := fmt.Sprintf("%s.%s", ns, topic)
|
||||
|
||||
|
||||
g.mu.Lock()
|
||||
g.localSubscribers[topicKey] = append(g.localSubscribers[topicKey], localSub)
|
||||
subscriberCount := len(g.localSubscribers[topicKey])
|
||||
g.mu.Unlock()
|
||||
|
||||
|
||||
g.logger.ComponentInfo("gateway", "pubsub ws: registered local subscriber",
|
||||
zap.String("topic", topic),
|
||||
zap.String("namespace", ns),
|
||||
zap.Int("total_subscribers", subscriberCount))
|
||||
|
||||
|
||||
// Unregister on close
|
||||
defer func() {
|
||||
g.mu.Lock()
|
||||
@ -97,12 +97,12 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
|
||||
zap.String("topic", topic),
|
||||
zap.Int("remaining_subscribers", remainingCount))
|
||||
}()
|
||||
|
||||
|
||||
// Use internal auth context when interacting with client to avoid circular auth requirements
|
||||
ctx := client.WithInternalAuth(r.Context())
|
||||
// Apply namespace isolation
|
||||
ctx = pubsub.WithNamespace(ctx, ns)
|
||||
|
||||
|
||||
// Writer loop - START THIS FIRST before libp2p subscription
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
@ -122,11 +122,11 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
g.logger.ComponentInfo("gateway", "pubsub ws: sending message to client",
|
||||
zap.String("topic", topic),
|
||||
zap.Int("data_len", len(b)))
|
||||
|
||||
|
||||
// Format message as JSON envelope with data (base64 encoded), timestamp, and topic
|
||||
// This matches the SDK's Message interface: {data: string, timestamp: number, topic: string}
|
||||
envelope := map[string]interface{}{
|
||||
@ -141,11 +141,11 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
g.logger.ComponentDebug("gateway", "pubsub ws: envelope created",
|
||||
zap.String("topic", topic),
|
||||
zap.Int("envelope_len", len(envelopeJSON)))
|
||||
|
||||
|
||||
conn.SetWriteDeadline(time.Now().Add(30 * time.Second))
|
||||
if err := conn.WriteMessage(websocket.TextMessage, envelopeJSON); err != nil {
|
||||
g.logger.ComponentWarn("gateway", "pubsub ws: failed to write to websocket",
|
||||
@ -154,7 +154,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
g.logger.ComponentInfo("gateway", "pubsub ws: message sent successfully",
|
||||
zap.String("topic", topic))
|
||||
case <-ticker.C:
|
||||
@ -173,7 +173,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
|
||||
g.logger.ComponentInfo("gateway", "pubsub ws: received message from libp2p",
|
||||
zap.String("topic", topic),
|
||||
zap.Int("data_len", len(data)))
|
||||
|
||||
|
||||
select {
|
||||
case msgs <- data:
|
||||
g.logger.ComponentInfo("gateway", "pubsub ws: forwarded to client",
|
||||
@ -195,7 +195,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
g.logger.ComponentInfo("gateway", "pubsub ws: libp2p subscription established",
|
||||
zap.String("topic", topic))
|
||||
|
||||
|
||||
// Keep subscription alive until done
|
||||
<-done
|
||||
_ = g.client.PubSub().Unsubscribe(ctx, topic)
|
||||
@ -212,7 +212,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
|
||||
if mt != websocket.TextMessage && mt != websocket.BinaryMessage {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
// Filter out WebSocket heartbeat messages
|
||||
// Don't publish them to the topic
|
||||
var msg map[string]interface{}
|
||||
@ -222,7 +222,7 @@ func (g *Gateway) pubsubWebsocketHandler(w http.ResponseWriter, r *http.Request)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if err := g.client.PubSub().Publish(ctx, topic, data); err != nil {
|
||||
// Best-effort notify client
|
||||
_ = conn.WriteMessage(websocket.TextMessage, []byte("publish_error"))
|
||||
@ -259,12 +259,12 @@ func (g *Gateway) pubsubPublishHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, http.StatusBadRequest, "invalid base64 data")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
// NEW: Check for local websocket subscribers FIRST and deliver directly
|
||||
g.mu.RLock()
|
||||
localSubs := g.getLocalSubscribers(body.Topic, ns)
|
||||
g.mu.RUnlock()
|
||||
|
||||
|
||||
localDeliveryCount := 0
|
||||
if len(localSubs) > 0 {
|
||||
for _, sub := range localSubs {
|
||||
@ -280,20 +280,20 @@ func (g *Gateway) pubsubPublishHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
g.logger.ComponentInfo("gateway", "pubsub publish: processing message",
|
||||
zap.String("topic", body.Topic),
|
||||
zap.String("namespace", ns),
|
||||
zap.Int("data_len", len(data)),
|
||||
zap.Int("local_subscribers", len(localSubs)),
|
||||
zap.Int("local_delivered", localDeliveryCount))
|
||||
|
||||
|
||||
// Publish to libp2p asynchronously for cross-node delivery
|
||||
// This prevents blocking the HTTP response if libp2p network is slow
|
||||
go func() {
|
||||
publishCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
|
||||
ctx := pubsub.WithNamespace(client.WithInternalAuth(publishCtx), ns)
|
||||
if err := g.client.PubSub().Publish(ctx, body.Topic, data); err != nil {
|
||||
g.logger.ComponentWarn("gateway", "async libp2p publish failed",
|
||||
@ -304,7 +304,7 @@ func (g *Gateway) pubsubPublishHandler(w http.ResponseWriter, r *http.Request) {
|
||||
zap.String("topic", body.Topic))
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
// Return immediately after local delivery
|
||||
// Local WebSocket subscribers already received the message
|
||||
writeJSON(w, http.StatusOK, map[string]any{"status": "ok"})
|
||||
|
||||
183
pkg/gateway/push_notifications.go
Normal file
183
pkg/gateway/push_notifications.go
Normal file
@ -0,0 +1,183 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// PushNotificationService handles sending push notifications via Expo
|
||||
type PushNotificationService struct {
|
||||
logger *zap.Logger
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// ExpoTicket represents the response from Expo API
|
||||
type ExpoTicket struct {
|
||||
ID string `json:"id"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ExpoPushMessage represents a message to send via Expo
|
||||
type ExpoPushMessage struct {
|
||||
To string `json:"to"`
|
||||
Title string `json:"title"`
|
||||
Body string `json:"body"`
|
||||
Data map[string]interface{} `json:"data,omitempty"`
|
||||
Sound string `json:"sound,omitempty"`
|
||||
Badge int `json:"badge,omitempty"`
|
||||
Priority string `json:"priority,omitempty"`
|
||||
// iOS specific
|
||||
MutableContent bool `json:"mutableContent,omitempty"`
|
||||
IosIcon string `json:"iosIcon,omitempty"`
|
||||
// Android specific
|
||||
AndroidBigLargeIcon string `json:"androidBigLargeIcon,omitempty"`
|
||||
ChannelID string `json:"channelId,omitempty"`
|
||||
}
|
||||
|
||||
// NewPushNotificationService creates a new push notification service
|
||||
func NewPushNotificationService(logger *zap.Logger) *PushNotificationService {
|
||||
return &PushNotificationService{
|
||||
logger: logger,
|
||||
client: &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SendNotification sends a push notification via Expo
|
||||
func (pns *PushNotificationService) SendNotification(
|
||||
ctx context.Context,
|
||||
expoPushToken string,
|
||||
title string,
|
||||
body string,
|
||||
data map[string]interface{},
|
||||
avatarURL string,
|
||||
) error {
|
||||
if expoPushToken == "" {
|
||||
return fmt.Errorf("empty expo push token")
|
||||
}
|
||||
|
||||
message := ExpoPushMessage{
|
||||
To: expoPushToken,
|
||||
Title: title,
|
||||
Body: body,
|
||||
Data: data,
|
||||
Sound: "default",
|
||||
Priority: "high",
|
||||
// Enable mutable content for iOS to allow Notification Service Extension
|
||||
MutableContent: true,
|
||||
ChannelID: "messages",
|
||||
AndroidBigLargeIcon: avatarURL,
|
||||
}
|
||||
|
||||
// For iOS, include avatar in data so Notification Service Extension can fetch it
|
||||
if avatarURL != "" {
|
||||
if message.Data == nil {
|
||||
message.Data = make(map[string]interface{})
|
||||
}
|
||||
message.Data["avatar_url"] = avatarURL
|
||||
}
|
||||
|
||||
return pns.sendExpoRequest(ctx, message)
|
||||
}
|
||||
|
||||
// SendBulkNotifications sends notifications to multiple users
|
||||
func (pns *PushNotificationService) SendBulkNotifications(
|
||||
ctx context.Context,
|
||||
expoPushTokens []string,
|
||||
title string,
|
||||
body string,
|
||||
data map[string]interface{},
|
||||
avatarURL string,
|
||||
) []error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
for _, token := range expoPushTokens {
|
||||
if err := pns.SendNotification(ctx, token, title, body, data, avatarURL); err != nil {
|
||||
errors = append(errors, fmt.Errorf("failed to send to token %s: %w", token, err))
|
||||
}
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// sendExpoRequest sends a request to the Expo push notification API
|
||||
func (pns *PushNotificationService) sendExpoRequest(ctx context.Context, message ExpoPushMessage) error {
|
||||
const expoAPIURL = "https://exp.host/--/api/v2/push/send"
|
||||
|
||||
body, err := json.Marshal(message)
|
||||
if err != nil {
|
||||
pns.logger.Error("failed to marshal push notification",
|
||||
zap.Error(err),
|
||||
zap.String("to", message.To))
|
||||
return fmt.Errorf("marshal error: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, expoAPIURL, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
pns.logger.Error("failed to create push notification request",
|
||||
zap.Error(err),
|
||||
zap.String("to", message.To))
|
||||
return fmt.Errorf("request creation error: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := pns.client.Do(req)
|
||||
if err != nil {
|
||||
pns.logger.Error("failed to send push notification",
|
||||
zap.Error(err),
|
||||
zap.String("to", message.To))
|
||||
return fmt.Errorf("send error: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
pns.logger.Error("failed to read push notification response",
|
||||
zap.Error(err),
|
||||
zap.String("to", message.To))
|
||||
return fmt.Errorf("response read error: %w", err)
|
||||
}
|
||||
|
||||
// Check for API errors
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
pns.logger.Warn("push notification API error",
|
||||
zap.Int("status_code", resp.StatusCode),
|
||||
zap.String("response", string(respBody)),
|
||||
zap.String("to", message.To))
|
||||
return fmt.Errorf("API returned status %d: %s", resp.StatusCode, string(respBody))
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var tickets []ExpoTicket
|
||||
if err := json.Unmarshal(respBody, &tickets); err != nil {
|
||||
pns.logger.Error("failed to parse push notification response",
|
||||
zap.Error(err),
|
||||
zap.String("response", string(respBody)))
|
||||
return fmt.Errorf("parse error: %w", err)
|
||||
}
|
||||
|
||||
// Check for errors in tickets
|
||||
for _, ticket := range tickets {
|
||||
if ticket.Error != "" {
|
||||
pns.logger.Warn("push notification error in ticket",
|
||||
zap.String("error", ticket.Error),
|
||||
zap.String("to", message.To))
|
||||
return fmt.Errorf("ticket error: %s", ticket.Error)
|
||||
}
|
||||
}
|
||||
|
||||
pns.logger.Info("push notification sent successfully",
|
||||
zap.String("to", message.To),
|
||||
zap.String("title", message.Title))
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -22,6 +22,7 @@ func (g *Gateway) Routes() http.Handler {
|
||||
// New: issue JWT from API key; new: create or return API key for a wallet after verification
|
||||
mux.HandleFunc("/v1/auth/token", g.apiKeyToJWTHandler)
|
||||
mux.HandleFunc("/v1/auth/api-key", g.issueAPIKeyHandler)
|
||||
mux.HandleFunc("/v1/auth/simple-key", g.simpleAPIKeyHandler)
|
||||
mux.HandleFunc("/v1/auth/register", g.registerHandler)
|
||||
mux.HandleFunc("/v1/auth/refresh", g.refreshHandler)
|
||||
mux.HandleFunc("/v1/auth/logout", g.logoutHandler)
|
||||
|
||||
@ -386,6 +386,11 @@ func (g *Gateway) networkStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
// Override with the node's actual peer ID if available
|
||||
// (the client's embedded host has a different temporary peer ID)
|
||||
if g.nodePeerID != "" {
|
||||
status.PeerID = g.nodePeerID
|
||||
}
|
||||
writeJSON(w, http.StatusOK, status)
|
||||
}
|
||||
|
||||
@ -401,7 +406,19 @@ func (g *Gateway) networkPeersHandler(w http.ResponseWriter, r *http.Request) {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, peers)
|
||||
// Flatten peer addresses into a list of multiaddr strings
|
||||
// Each PeerInfo can have multiple addresses, so we collect all of them
|
||||
peerAddrs := make([]string, 0)
|
||||
for _, peer := range peers {
|
||||
// Add peer ID as /p2p/ multiaddr format
|
||||
if peer.ID != "" {
|
||||
peerAddrs = append(peerAddrs, "/p2p/"+peer.ID)
|
||||
}
|
||||
// Add all addresses for this peer
|
||||
peerAddrs = append(peerAddrs, peer.Addresses...)
|
||||
}
|
||||
// Return peers in expected format: {"peers": ["/p2p/...", "/ip4/...", ...]}
|
||||
writeJSON(w, http.StatusOK, map[string]any{"peers": peerAddrs})
|
||||
}
|
||||
|
||||
func (g *Gateway) networkConnectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
211
pkg/gateway/tcp_sni_gateway.go
Normal file
211
pkg/gateway/tcp_sni_gateway.go
Normal file
@ -0,0 +1,211 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
)
|
||||
|
||||
// TCPSNIGateway handles SNI-based TCP routing for services like RQLite Raft, IPFS, etc.
|
||||
type TCPSNIGateway struct {
|
||||
logger *logging.ColoredLogger
|
||||
config *config.SNIConfig
|
||||
listener net.Listener
|
||||
routes map[string]string
|
||||
mu sync.RWMutex
|
||||
running bool
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
// NewTCPSNIGateway creates a new TCP SNI-based gateway
|
||||
func NewTCPSNIGateway(logger *logging.ColoredLogger, cfg *config.SNIConfig) (*TCPSNIGateway, error) {
|
||||
if !cfg.Enabled {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
var err error
|
||||
logger, err = logging.NewColoredLogger(logging.ComponentGeneral, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create logger: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load TLS certificate: %w", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
gateway := &TCPSNIGateway{
|
||||
logger: logger,
|
||||
config: cfg,
|
||||
routes: make(map[string]string),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
tlsConfig: &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
},
|
||||
}
|
||||
|
||||
for hostname, backend := range cfg.Routes {
|
||||
gateway.routes[strings.ToLower(hostname)] = backend
|
||||
}
|
||||
|
||||
logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway initialized",
|
||||
zap.String("listen_addr", cfg.ListenAddr),
|
||||
zap.Int("routes", len(cfg.Routes)),
|
||||
)
|
||||
|
||||
return gateway, nil
|
||||
}
|
||||
|
||||
// Start starts the TCP SNI gateway server
|
||||
func (g *TCPSNIGateway) Start(ctx context.Context) error {
|
||||
if g == nil || !g.config.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
listener, err := tls.Listen("tcp", g.config.ListenAddr, g.tlsConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen on %s: %w", g.config.ListenAddr, err)
|
||||
}
|
||||
g.listener = listener
|
||||
g.running = true
|
||||
|
||||
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway starting",
|
||||
zap.String("listen_addr", g.config.ListenAddr),
|
||||
)
|
||||
|
||||
g.wg.Add(1)
|
||||
go func() {
|
||||
defer g.wg.Done()
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
default:
|
||||
g.logger.ComponentError(logging.ComponentGeneral, "Accept error", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
g.wg.Add(1)
|
||||
go func(c net.Conn) {
|
||||
defer g.wg.Done()
|
||||
g.handleConnection(c)
|
||||
}(conn)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-g.ctx.Done():
|
||||
}
|
||||
|
||||
return g.Stop()
|
||||
}
|
||||
|
||||
// handleConnection routes a TCP connection based on SNI
|
||||
func (g *TCPSNIGateway) handleConnection(conn net.Conn) {
|
||||
defer conn.Close()
|
||||
|
||||
tlsConn, ok := conn.(*tls.Conn)
|
||||
if !ok {
|
||||
g.logger.ComponentError(logging.ComponentGeneral, "Expected TLS connection")
|
||||
return
|
||||
}
|
||||
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
g.logger.ComponentError(logging.ComponentGeneral, "TLS handshake failed", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
serverName := strings.ToLower(tlsConn.ConnectionState().ServerName)
|
||||
if serverName == "" {
|
||||
g.logger.ComponentError(logging.ComponentGeneral, "No SNI provided")
|
||||
return
|
||||
}
|
||||
|
||||
g.mu.RLock()
|
||||
backend, found := g.routes[serverName]
|
||||
if !found {
|
||||
for prefix, be := range g.routes {
|
||||
if strings.HasPrefix(serverName, prefix+".") {
|
||||
backend = be
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
g.mu.RUnlock()
|
||||
|
||||
if !found {
|
||||
g.logger.ComponentError(logging.ComponentGeneral, "No route for SNI",
|
||||
zap.String("server_name", serverName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
g.logger.ComponentInfo(logging.ComponentGeneral, "Routing connection",
|
||||
zap.String("server_name", serverName),
|
||||
zap.String("backend", backend),
|
||||
)
|
||||
|
||||
backendConn, err := net.DialTimeout("tcp", backend, 10*time.Second)
|
||||
if err != nil {
|
||||
g.logger.ComponentError(logging.ComponentGeneral, "Backend connect failed",
|
||||
zap.String("backend", backend),
|
||||
zap.Error(err),
|
||||
)
|
||||
return
|
||||
}
|
||||
defer backendConn.Close()
|
||||
|
||||
errc := make(chan error, 2)
|
||||
go func() { _, err := io.Copy(backendConn, tlsConn); errc <- err }()
|
||||
go func() { _, err := io.Copy(tlsConn, backendConn); errc <- err }()
|
||||
<-errc
|
||||
}
|
||||
|
||||
// Stop gracefully stops the TCP SNI gateway
|
||||
func (g *TCPSNIGateway) Stop() error {
|
||||
if g == nil || !g.running {
|
||||
return nil
|
||||
}
|
||||
|
||||
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway shutting down")
|
||||
g.cancel()
|
||||
|
||||
if g.listener != nil {
|
||||
g.listener.Close()
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() { g.wg.Wait(); close(done) }()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(10 * time.Second):
|
||||
g.logger.ComponentWarn(logging.ComponentGeneral, "Shutdown timeout")
|
||||
}
|
||||
|
||||
g.running = false
|
||||
g.logger.ComponentInfo(logging.ComponentGeneral, "TCP SNI Gateway shutdown complete")
|
||||
return nil
|
||||
}
|
||||
956
pkg/installer/installer.go
Normal file
956
pkg/installer/installer.go
Normal file
@ -0,0 +1,956 @@
|
||||
// Package installer provides an interactive TUI installer for Orama Network
|
||||
package installer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/charmbracelet/bubbles/textinput"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/certutil"
|
||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
||||
)
|
||||
|
||||
// InstallerConfig holds the configuration gathered from the TUI
|
||||
type InstallerConfig struct {
|
||||
VpsIP string
|
||||
Domain string
|
||||
PeerDomain string // Domain of existing node to join
|
||||
PeerIP string // Resolved IP of peer domain (for Raft join)
|
||||
JoinAddress string // Auto-populated: {PeerIP}:7002 (direct RQLite TLS)
|
||||
Peers []string // Auto-populated: /dns4/{PeerDomain}/tcp/4001/p2p/{PeerID}
|
||||
ClusterSecret string
|
||||
SwarmKeyHex string // 64-hex IPFS swarm key (for joining private network)
|
||||
IPFSPeerID string // IPFS peer ID (auto-discovered from peer domain)
|
||||
IPFSSwarmAddrs []string // IPFS swarm addresses (auto-discovered from peer domain)
|
||||
// IPFS Cluster peer info for cluster discovery
|
||||
IPFSClusterPeerID string // IPFS Cluster peer ID (auto-discovered from peer domain)
|
||||
IPFSClusterAddrs []string // IPFS Cluster addresses (auto-discovered from peer domain)
|
||||
Branch string
|
||||
IsFirstNode bool
|
||||
NoPull bool
|
||||
}
|
||||
|
||||
// Step represents a step in the installation wizard
|
||||
type Step int
|
||||
|
||||
const (
|
||||
StepWelcome Step = iota
|
||||
StepNodeType
|
||||
StepVpsIP
|
||||
StepDomain
|
||||
StepPeerDomain // Domain of existing node to join (replaces StepJoinAddress)
|
||||
StepClusterSecret
|
||||
StepSwarmKey // 64-hex swarm key for IPFS private network
|
||||
StepBranch
|
||||
StepNoPull
|
||||
StepConfirm
|
||||
StepInstalling
|
||||
StepDone
|
||||
)
|
||||
|
||||
// Model is the bubbletea model for the installer
|
||||
type Model struct {
|
||||
step Step
|
||||
config InstallerConfig
|
||||
textInput textinput.Model
|
||||
err error
|
||||
width int
|
||||
height int
|
||||
installing bool
|
||||
installOutput []string
|
||||
cursor int // For selection menus
|
||||
discovering bool // Whether domain discovery is in progress
|
||||
discoveryInfo string // Info message during discovery
|
||||
discoveredPeer string // Discovered peer ID from domain
|
||||
sniWarning string // Warning about missing SNI DNS records (non-blocking)
|
||||
}
|
||||
|
||||
// Styles
|
||||
var (
|
||||
titleStyle = lipgloss.NewStyle().
|
||||
Bold(true).
|
||||
Foreground(lipgloss.Color("#00D4AA")).
|
||||
MarginBottom(1)
|
||||
|
||||
subtitleStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#888888")).
|
||||
MarginBottom(1)
|
||||
|
||||
focusedStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#00D4AA"))
|
||||
|
||||
blurredStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#666666"))
|
||||
|
||||
cursorStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#00D4AA"))
|
||||
|
||||
helpStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#626262")).
|
||||
MarginTop(1)
|
||||
|
||||
errorStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#FF6B6B")).
|
||||
Bold(true)
|
||||
|
||||
successStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#00D4AA")).
|
||||
Bold(true)
|
||||
|
||||
boxStyle = lipgloss.NewStyle().
|
||||
Border(lipgloss.RoundedBorder()).
|
||||
BorderForeground(lipgloss.Color("#00D4AA")).
|
||||
Padding(1, 2)
|
||||
)
|
||||
|
||||
// NewModel creates a new installer model
|
||||
func NewModel() Model {
|
||||
ti := textinput.New()
|
||||
ti.Focus()
|
||||
ti.CharLimit = 256
|
||||
ti.Width = 50
|
||||
|
||||
return Model{
|
||||
step: StepWelcome,
|
||||
textInput: ti,
|
||||
config: InstallerConfig{
|
||||
Branch: "main",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes the model
|
||||
func (m Model) Init() tea.Cmd {
|
||||
return textinput.Blink
|
||||
}
|
||||
|
||||
// Update handles messages
|
||||
func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case tea.WindowSizeMsg:
|
||||
m.width = msg.Width
|
||||
m.height = msg.Height
|
||||
return m, nil
|
||||
|
||||
case installCompleteMsg:
|
||||
m.step = StepDone
|
||||
return m, nil
|
||||
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "ctrl+c", "q":
|
||||
if m.step != StepInstalling {
|
||||
return m, tea.Quit
|
||||
}
|
||||
|
||||
case "enter":
|
||||
return m.handleEnter()
|
||||
|
||||
case "up", "k":
|
||||
if m.step == StepNodeType || m.step == StepBranch || m.step == StepNoPull {
|
||||
if m.cursor > 0 {
|
||||
m.cursor--
|
||||
}
|
||||
}
|
||||
|
||||
case "down", "j":
|
||||
if m.step == StepNodeType || m.step == StepBranch || m.step == StepNoPull {
|
||||
if m.cursor < 1 {
|
||||
m.cursor++
|
||||
}
|
||||
}
|
||||
|
||||
case "esc":
|
||||
if m.step > StepWelcome && m.step < StepInstalling {
|
||||
m.step--
|
||||
m.err = nil
|
||||
m.setupStepInput()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update text input for input steps
|
||||
if m.step == StepVpsIP || m.step == StepDomain || m.step == StepPeerDomain || m.step == StepClusterSecret || m.step == StepSwarmKey {
|
||||
var cmd tea.Cmd
|
||||
m.textInput, cmd = m.textInput.Update(msg)
|
||||
return m, cmd
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *Model) handleEnter() (tea.Model, tea.Cmd) {
|
||||
switch m.step {
|
||||
case StepWelcome:
|
||||
m.step = StepNodeType
|
||||
m.cursor = 0
|
||||
|
||||
case StepNodeType:
|
||||
m.config.IsFirstNode = m.cursor == 0
|
||||
m.step = StepVpsIP
|
||||
m.setupStepInput()
|
||||
|
||||
case StepVpsIP:
|
||||
ip := strings.TrimSpace(m.textInput.Value())
|
||||
if err := validateIP(ip); err != nil {
|
||||
m.err = err
|
||||
return m, nil
|
||||
}
|
||||
m.config.VpsIP = ip
|
||||
m.err = nil
|
||||
m.step = StepDomain
|
||||
m.setupStepInput()
|
||||
|
||||
case StepDomain:
|
||||
domain := strings.TrimSpace(m.textInput.Value())
|
||||
if err := validateDomain(domain); err != nil {
|
||||
m.err = err
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Check SNI DNS records for this domain (non-blocking warning)
|
||||
m.discovering = true
|
||||
m.discoveryInfo = "Checking SNI DNS records for " + domain + "..."
|
||||
|
||||
if warning := validateSNIDNSRecords(domain); warning != "" {
|
||||
// Log warning but continue - SNI DNS is optional for single-node setups
|
||||
m.sniWarning = warning
|
||||
}
|
||||
|
||||
m.discovering = false
|
||||
m.config.Domain = domain
|
||||
m.err = nil
|
||||
|
||||
// Auto-generate self-signed certificates for this domain
|
||||
m.discovering = true
|
||||
m.discoveryInfo = "Generating SSL certificates for " + domain + "..."
|
||||
|
||||
if err := ensureCertificatesForDomain(domain); err != nil {
|
||||
m.discovering = false
|
||||
m.err = fmt.Errorf("failed to generate certificates: %w", err)
|
||||
return m, nil
|
||||
}
|
||||
|
||||
m.discovering = false
|
||||
|
||||
if m.config.IsFirstNode {
|
||||
m.step = StepBranch
|
||||
m.cursor = 0
|
||||
} else {
|
||||
m.step = StepPeerDomain
|
||||
m.setupStepInput()
|
||||
}
|
||||
|
||||
case StepPeerDomain:
|
||||
peerDomain := strings.TrimSpace(m.textInput.Value())
|
||||
if err := validateDomain(peerDomain); err != nil {
|
||||
m.err = err
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Check SNI DNS records for peer domain (non-blocking warning)
|
||||
m.discovering = true
|
||||
m.discoveryInfo = "Checking SNI DNS records for " + peerDomain + "..."
|
||||
|
||||
if warning := validateSNIDNSRecords(peerDomain); warning != "" {
|
||||
// Log warning but continue - peer might have different DNS setup
|
||||
m.sniWarning = warning
|
||||
}
|
||||
|
||||
// Discover peer info from domain (try HTTPS first, then HTTP)
|
||||
m.discovering = true
|
||||
m.discoveryInfo = "Discovering peer from " + peerDomain + "..."
|
||||
|
||||
discovery, err := discoverPeerFromDomain(peerDomain)
|
||||
m.discovering = false
|
||||
|
||||
if err != nil {
|
||||
m.err = fmt.Errorf("failed to discover peer: %w", err)
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Store discovered info
|
||||
m.config.PeerDomain = peerDomain
|
||||
m.discoveredPeer = discovery.PeerID
|
||||
|
||||
// Resolve peer domain to IP for direct RQLite TLS connection
|
||||
// RQLite uses native TLS on port 7002 (not SNI gateway on 7001)
|
||||
peerIPs, err := net.LookupIP(peerDomain)
|
||||
if err != nil || len(peerIPs) == 0 {
|
||||
m.err = fmt.Errorf("failed to resolve peer domain %s to IP: %w", peerDomain, err)
|
||||
return m, nil
|
||||
}
|
||||
// Prefer IPv4
|
||||
var peerIP string
|
||||
for _, ip := range peerIPs {
|
||||
if ip.To4() != nil {
|
||||
peerIP = ip.String()
|
||||
break
|
||||
}
|
||||
}
|
||||
if peerIP == "" {
|
||||
peerIP = peerIPs[0].String()
|
||||
}
|
||||
m.config.PeerIP = peerIP
|
||||
|
||||
// Auto-populate join address (direct RQLite TLS on port 7002) and bootstrap peers
|
||||
m.config.JoinAddress = fmt.Sprintf("%s:7002", peerIP)
|
||||
m.config.Peers = []string{
|
||||
fmt.Sprintf("/dns4/%s/tcp/4001/p2p/%s", peerDomain, discovery.PeerID),
|
||||
}
|
||||
|
||||
// Store IPFS peer info for Peering.Peers configuration
|
||||
if discovery.IPFSPeerID != "" {
|
||||
m.config.IPFSPeerID = discovery.IPFSPeerID
|
||||
m.config.IPFSSwarmAddrs = discovery.IPFSSwarmAddrs
|
||||
}
|
||||
|
||||
// Store IPFS Cluster peer info for cluster peer_addresses configuration
|
||||
if discovery.IPFSClusterPeerID != "" {
|
||||
m.config.IPFSClusterPeerID = discovery.IPFSClusterPeerID
|
||||
m.config.IPFSClusterAddrs = discovery.IPFSClusterAddrs
|
||||
}
|
||||
|
||||
m.err = nil
|
||||
m.step = StepClusterSecret
|
||||
m.setupStepInput()
|
||||
|
||||
case StepClusterSecret:
|
||||
secret := strings.TrimSpace(m.textInput.Value())
|
||||
if err := validateClusterSecret(secret); err != nil {
|
||||
m.err = err
|
||||
return m, nil
|
||||
}
|
||||
m.config.ClusterSecret = secret
|
||||
m.err = nil
|
||||
m.step = StepSwarmKey
|
||||
m.setupStepInput()
|
||||
|
||||
case StepSwarmKey:
|
||||
swarmKey := strings.TrimSpace(m.textInput.Value())
|
||||
if err := validateSwarmKey(swarmKey); err != nil {
|
||||
m.err = err
|
||||
return m, nil
|
||||
}
|
||||
m.config.SwarmKeyHex = swarmKey
|
||||
m.err = nil
|
||||
m.step = StepBranch
|
||||
m.cursor = 0
|
||||
|
||||
case StepBranch:
|
||||
if m.cursor == 0 {
|
||||
m.config.Branch = "main"
|
||||
} else {
|
||||
m.config.Branch = "nightly"
|
||||
}
|
||||
m.cursor = 0 // Reset cursor for next step
|
||||
m.step = StepNoPull
|
||||
|
||||
case StepNoPull:
|
||||
if m.cursor == 0 {
|
||||
m.config.NoPull = false
|
||||
} else {
|
||||
m.config.NoPull = true
|
||||
}
|
||||
m.step = StepConfirm
|
||||
|
||||
case StepConfirm:
|
||||
m.step = StepInstalling
|
||||
return m, m.startInstallation()
|
||||
|
||||
case StepDone:
|
||||
return m, tea.Quit
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *Model) setupStepInput() {
|
||||
m.textInput.Reset()
|
||||
m.textInput.Focus()
|
||||
m.textInput.EchoMode = textinput.EchoNormal // Reset echo mode
|
||||
|
||||
switch m.step {
|
||||
case StepVpsIP:
|
||||
m.textInput.Placeholder = "e.g., 203.0.113.1"
|
||||
// Try to auto-detect public IP
|
||||
if ip := detectPublicIP(); ip != "" {
|
||||
m.textInput.SetValue(ip)
|
||||
}
|
||||
case StepDomain:
|
||||
m.textInput.Placeholder = "e.g., node-1.orama.network"
|
||||
case StepPeerDomain:
|
||||
m.textInput.Placeholder = "e.g., node-123.orama.network"
|
||||
case StepClusterSecret:
|
||||
m.textInput.Placeholder = "64 hex characters"
|
||||
m.textInput.EchoMode = textinput.EchoPassword
|
||||
case StepSwarmKey:
|
||||
m.textInput.Placeholder = "64 hex characters"
|
||||
m.textInput.EchoMode = textinput.EchoPassword
|
||||
}
|
||||
}
|
||||
|
||||
func (m Model) startInstallation() tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
// This would trigger the actual installation
|
||||
// For now, we return the config for the CLI to handle
|
||||
return installCompleteMsg{config: m.config}
|
||||
}
|
||||
}
|
||||
|
||||
type installCompleteMsg struct {
|
||||
config InstallerConfig
|
||||
}
|
||||
|
||||
// View renders the UI
|
||||
func (m Model) View() string {
|
||||
var s strings.Builder
|
||||
|
||||
// Header
|
||||
s.WriteString(renderHeader())
|
||||
s.WriteString("\n\n")
|
||||
|
||||
switch m.step {
|
||||
case StepWelcome:
|
||||
s.WriteString(m.viewWelcome())
|
||||
case StepNodeType:
|
||||
s.WriteString(m.viewNodeType())
|
||||
case StepVpsIP:
|
||||
s.WriteString(m.viewVpsIP())
|
||||
case StepDomain:
|
||||
s.WriteString(m.viewDomain())
|
||||
case StepPeerDomain:
|
||||
s.WriteString(m.viewPeerDomain())
|
||||
case StepClusterSecret:
|
||||
s.WriteString(m.viewClusterSecret())
|
||||
case StepSwarmKey:
|
||||
s.WriteString(m.viewSwarmKey())
|
||||
case StepBranch:
|
||||
s.WriteString(m.viewBranch())
|
||||
case StepNoPull:
|
||||
s.WriteString(m.viewNoPull())
|
||||
case StepConfirm:
|
||||
s.WriteString(m.viewConfirm())
|
||||
case StepInstalling:
|
||||
s.WriteString(m.viewInstalling())
|
||||
case StepDone:
|
||||
s.WriteString(m.viewDone())
|
||||
}
|
||||
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func renderHeader() string {
|
||||
logo := `
|
||||
___ ____ _ __ __ _
|
||||
/ _ \| _ \ / \ | \/ | / \
|
||||
| | | | |_) | / _ \ | |\/| | / _ \
|
||||
| |_| | _ < / ___ \| | | |/ ___ \
|
||||
\___/|_| \_\/_/ \_\_| |_/_/ \_\
|
||||
`
|
||||
return titleStyle.Render(logo) + "\n" + subtitleStyle.Render("Network Installation Wizard")
|
||||
}
|
||||
|
||||
func (m Model) viewWelcome() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(boxStyle.Render(
|
||||
titleStyle.Render("Welcome to Orama Network!") + "\n\n" +
|
||||
"This wizard will guide you through setting up your node.\n\n" +
|
||||
"You'll need:\n" +
|
||||
" • A public IP address for your server\n" +
|
||||
" • A domain name (e.g., node-1.orama.network)\n" +
|
||||
" • For joining: cluster secret from existing node\n",
|
||||
))
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(helpStyle.Render("Press Enter to continue • q to quit"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewNodeType() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("Node Type") + "\n\n")
|
||||
s.WriteString("Is this the first node in a new cluster?\n\n")
|
||||
|
||||
options := []string{"Yes, create new cluster", "No, join existing cluster"}
|
||||
for i, opt := range options {
|
||||
if i == m.cursor {
|
||||
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
|
||||
} else {
|
||||
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
|
||||
}
|
||||
}
|
||||
|
||||
s.WriteString("\n")
|
||||
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewVpsIP() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("Server IP Address") + "\n\n")
|
||||
s.WriteString("Enter your server's public IP address:\n\n")
|
||||
s.WriteString(m.textInput.View())
|
||||
|
||||
if m.err != nil {
|
||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
||||
}
|
||||
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewDomain() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("Domain Name") + "\n\n")
|
||||
s.WriteString("Enter the domain for this node:\n\n")
|
||||
s.WriteString(m.textInput.View())
|
||||
|
||||
if m.err != nil {
|
||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
||||
}
|
||||
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewPeerDomain() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("Existing Node Domain") + "\n\n")
|
||||
s.WriteString("Enter the domain of an existing node to join:\n")
|
||||
s.WriteString(subtitleStyle.Render("The installer will auto-discover peer info via HTTPS/HTTP") + "\n\n")
|
||||
s.WriteString(m.textInput.View())
|
||||
|
||||
if m.discovering {
|
||||
s.WriteString("\n\n" + subtitleStyle.Render("🔍 "+m.discoveryInfo))
|
||||
}
|
||||
|
||||
if m.discoveredPeer != "" && m.err == nil {
|
||||
s.WriteString("\n\n" + successStyle.Render("✓ Discovered peer: "+m.discoveredPeer[:12]+"..."))
|
||||
}
|
||||
|
||||
if m.err != nil {
|
||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
||||
}
|
||||
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(helpStyle.Render("Enter to discover & continue • Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewClusterSecret() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("Cluster Secret") + "\n\n")
|
||||
s.WriteString("Enter the cluster secret from an existing node:\n")
|
||||
s.WriteString(subtitleStyle.Render("Get it with: cat ~/.orama/secrets/cluster-secret") + "\n\n")
|
||||
s.WriteString(m.textInput.View())
|
||||
|
||||
if m.err != nil {
|
||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
||||
}
|
||||
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewSwarmKey() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("IPFS Swarm Key") + "\n\n")
|
||||
s.WriteString("Enter the swarm key from an existing node:\n")
|
||||
s.WriteString(subtitleStyle.Render("Get it with: cat ~/.orama/secrets/swarm.key | tail -1") + "\n\n")
|
||||
s.WriteString(m.textInput.View())
|
||||
|
||||
if m.err != nil {
|
||||
s.WriteString("\n\n" + errorStyle.Render("✗ " + m.err.Error()))
|
||||
}
|
||||
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(helpStyle.Render("Enter to confirm • Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewBranch() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("Release Channel") + "\n\n")
|
||||
s.WriteString("Select the release channel:\n\n")
|
||||
|
||||
options := []string{"main (stable)", "nightly (latest features)"}
|
||||
for i, opt := range options {
|
||||
if i == m.cursor {
|
||||
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
|
||||
} else {
|
||||
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
|
||||
}
|
||||
}
|
||||
|
||||
s.WriteString("\n")
|
||||
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewNoPull() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("Git Repository") + "\n\n")
|
||||
s.WriteString("Pull latest changes from repository?\n\n")
|
||||
|
||||
options := []string{"Pull latest (recommended)", "Skip git pull (use existing source)"}
|
||||
for i, opt := range options {
|
||||
if i == m.cursor {
|
||||
s.WriteString(cursorStyle.Render("→ ") + focusedStyle.Render(opt) + "\n")
|
||||
} else {
|
||||
s.WriteString(" " + blurredStyle.Render(opt) + "\n")
|
||||
}
|
||||
}
|
||||
|
||||
s.WriteString("\n")
|
||||
s.WriteString(helpStyle.Render("↑/↓ to select • Enter to confirm • Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewConfirm() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("Confirm Installation") + "\n\n")
|
||||
|
||||
noPullStr := "Pull latest"
|
||||
if m.config.NoPull {
|
||||
noPullStr = "Skip git pull"
|
||||
}
|
||||
|
||||
config := fmt.Sprintf(
|
||||
" VPS IP: %s\n"+
|
||||
" Domain: %s\n"+
|
||||
" Branch: %s\n"+
|
||||
" Git Pull: %s\n"+
|
||||
" Node Type: %s\n",
|
||||
m.config.VpsIP,
|
||||
m.config.Domain,
|
||||
m.config.Branch,
|
||||
noPullStr,
|
||||
map[bool]string{true: "First node (new cluster)", false: "Join existing cluster"}[m.config.IsFirstNode],
|
||||
)
|
||||
|
||||
if !m.config.IsFirstNode {
|
||||
config += fmt.Sprintf(" Peer Node: %s\n", m.config.PeerDomain)
|
||||
config += fmt.Sprintf(" Join Addr: %s\n", m.config.JoinAddress)
|
||||
if len(m.config.Peers) > 0 {
|
||||
config += fmt.Sprintf(" Bootstrap: %s...\n", m.config.Peers[0][:40])
|
||||
}
|
||||
if len(m.config.ClusterSecret) >= 8 {
|
||||
config += fmt.Sprintf(" Secret: %s...\n", m.config.ClusterSecret[:8])
|
||||
}
|
||||
if len(m.config.SwarmKeyHex) >= 8 {
|
||||
config += fmt.Sprintf(" Swarm Key: %s...\n", m.config.SwarmKeyHex[:8])
|
||||
}
|
||||
if m.config.IPFSPeerID != "" {
|
||||
config += fmt.Sprintf(" IPFS Peer: %s...\n", m.config.IPFSPeerID[:16])
|
||||
}
|
||||
}
|
||||
|
||||
s.WriteString(boxStyle.Render(config))
|
||||
|
||||
// Show SNI DNS warning if present
|
||||
if m.sniWarning != "" {
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#FFA500")).Render(m.sniWarning))
|
||||
}
|
||||
|
||||
s.WriteString("\n\n")
|
||||
s.WriteString(helpStyle.Render("Press Enter to install • Esc to go back"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewInstalling() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(titleStyle.Render("Installing...") + "\n\n")
|
||||
s.WriteString("Please wait while the node is being configured.\n\n")
|
||||
for _, line := range m.installOutput {
|
||||
s.WriteString(line + "\n")
|
||||
}
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func (m Model) viewDone() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(successStyle.Render("✓ Installation Complete!") + "\n\n")
|
||||
s.WriteString("Your node is now running.\n\n")
|
||||
s.WriteString("Useful commands:\n")
|
||||
s.WriteString(" orama status - Check service status\n")
|
||||
s.WriteString(" orama logs node - View node logs\n")
|
||||
s.WriteString(" orama logs gateway - View gateway logs\n")
|
||||
s.WriteString("\n")
|
||||
s.WriteString(helpStyle.Render("Press Enter or q to exit"))
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// GetConfig returns the installer configuration after the TUI completes
|
||||
func (m Model) GetConfig() InstallerConfig {
|
||||
return m.config
|
||||
}
|
||||
|
||||
// Validation helpers
|
||||
|
||||
func validateIP(ip string) error {
|
||||
if ip == "" {
|
||||
return fmt.Errorf("IP address is required")
|
||||
}
|
||||
if net.ParseIP(ip) == nil {
|
||||
return fmt.Errorf("invalid IP address format")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDomain(domain string) error {
|
||||
if domain == "" {
|
||||
return fmt.Errorf("domain is required")
|
||||
}
|
||||
// Basic domain validation
|
||||
domainRegex := regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?)*$`)
|
||||
if !domainRegex.MatchString(domain) {
|
||||
return fmt.Errorf("invalid domain format")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiscoveryResult contains all information discovered from a peer node
|
||||
type DiscoveryResult struct {
|
||||
PeerID string // LibP2P peer ID
|
||||
IPFSPeerID string // IPFS peer ID
|
||||
IPFSSwarmAddrs []string // IPFS swarm addresses
|
||||
// IPFS Cluster info for cluster peer discovery
|
||||
IPFSClusterPeerID string // IPFS Cluster peer ID
|
||||
IPFSClusterAddrs []string // IPFS Cluster multiaddresses
|
||||
}
|
||||
|
||||
// discoverPeerFromDomain queries an existing node to get its peer ID and IPFS info
|
||||
// Tries HTTPS first, then falls back to HTTP
|
||||
// Respects DEBROS_TRUSTED_TLS_DOMAINS and DEBROS_CA_CERT_PATH environment variables for certificate verification
|
||||
func discoverPeerFromDomain(domain string) (*DiscoveryResult, error) {
|
||||
// Use centralized TLS configuration that respects CA certificates and trusted domains
|
||||
client := tlsutil.NewHTTPClientForDomain(10*time.Second, domain)
|
||||
|
||||
// Try HTTPS first
|
||||
url := fmt.Sprintf("https://%s/v1/network/status", domain)
|
||||
resp, err := client.Get(url)
|
||||
|
||||
// If HTTPS fails, try HTTP
|
||||
if err != nil {
|
||||
// Finally try plain HTTP
|
||||
url = fmt.Sprintf("http://%s/v1/network/status", domain)
|
||||
resp, err = client.Get(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not connect to %s (tried HTTPS and HTTP): %w", domain, err)
|
||||
}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status from %s: %s", domain, resp.Status)
|
||||
}
|
||||
|
||||
// Parse response including IPFS and IPFS Cluster info
|
||||
var status struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
NodeID string `json:"node_id"` // fallback for backward compatibility
|
||||
IPFS *struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
SwarmAddresses []string `json:"swarm_addresses"`
|
||||
} `json:"ipfs,omitempty"`
|
||||
IPFSCluster *struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
Addresses []string `json:"addresses"`
|
||||
} `json:"ipfs_cluster,omitempty"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse response from %s: %w", domain, err)
|
||||
}
|
||||
|
||||
// Use peer_id if available, otherwise fall back to node_id for backward compatibility
|
||||
peerID := status.PeerID
|
||||
if peerID == "" {
|
||||
peerID = status.NodeID
|
||||
}
|
||||
|
||||
if peerID == "" {
|
||||
return nil, fmt.Errorf("no peer_id or node_id in response from %s", domain)
|
||||
}
|
||||
|
||||
result := &DiscoveryResult{
|
||||
PeerID: peerID,
|
||||
}
|
||||
|
||||
// Include IPFS info if available
|
||||
if status.IPFS != nil {
|
||||
result.IPFSPeerID = status.IPFS.PeerID
|
||||
result.IPFSSwarmAddrs = status.IPFS.SwarmAddresses
|
||||
}
|
||||
|
||||
// Include IPFS Cluster info if available
|
||||
if status.IPFSCluster != nil {
|
||||
result.IPFSClusterPeerID = status.IPFSCluster.PeerID
|
||||
result.IPFSClusterAddrs = status.IPFSCluster.Addresses
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func validateClusterSecret(secret string) error {
|
||||
if len(secret) != 64 {
|
||||
return fmt.Errorf("cluster secret must be 64 hex characters")
|
||||
}
|
||||
secretRegex := regexp.MustCompile(`^[a-fA-F0-9]{64}$`)
|
||||
if !secretRegex.MatchString(secret) {
|
||||
return fmt.Errorf("cluster secret must be valid hexadecimal")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateSwarmKey(key string) error {
|
||||
if len(key) != 64 {
|
||||
return fmt.Errorf("swarm key must be 64 hex characters")
|
||||
}
|
||||
keyRegex := regexp.MustCompile(`^[a-fA-F0-9]{64}$`)
|
||||
if !keyRegex.MatchString(key) {
|
||||
return fmt.Errorf("swarm key must be valid hexadecimal")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureCertificatesForDomain generates self-signed certificates for the domain
|
||||
func ensureCertificatesForDomain(domain string) error {
|
||||
// Get home directory
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get home directory: %w", err)
|
||||
}
|
||||
|
||||
// Create cert directory
|
||||
certDir := filepath.Join(home, ".orama", "certs")
|
||||
if err := os.MkdirAll(certDir, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create cert directory: %w", err)
|
||||
}
|
||||
|
||||
// Create certificate manager
|
||||
cm := certutil.NewCertificateManager(certDir)
|
||||
|
||||
// Ensure CA certificate exists
|
||||
caCertPEM, caKeyPEM, err := cm.EnsureCACertificate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure CA certificate: %w", err)
|
||||
}
|
||||
|
||||
// Ensure node certificate exists for the domain
|
||||
_, _, err = cm.EnsureNodeCertificate(domain, caCertPEM, caKeyPEM)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure node certificate: %w", err)
|
||||
}
|
||||
|
||||
// Also create wildcard certificate if domain is not already wildcard
|
||||
if !strings.HasPrefix(domain, "*.") {
|
||||
wildcardDomain := "*." + domain
|
||||
_, _, err = cm.EnsureNodeCertificate(wildcardDomain, caCertPEM, caKeyPEM)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure wildcard certificate: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func detectPublicIP() string {
|
||||
// Try to detect public IP from common interfaces
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
if ipnet.IP.To4() != nil && !ipnet.IP.IsPrivate() {
|
||||
return ipnet.IP.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// validateSNIDNSRecords checks if the required SNI DNS records exist
|
||||
// It tries to resolve the key SNI hostnames for IPFS, IPFS Cluster, and Olric
|
||||
// Note: Raft no longer uses SNI - it uses direct RQLite TLS on port 7002
|
||||
// All should resolve to the same IP (the node's public IP or domain)
|
||||
// Returns a warning string if records are missing (empty string if all OK)
|
||||
func validateSNIDNSRecords(domain string) string {
|
||||
// List of SNI services that need DNS records
|
||||
// Note: raft.domain is NOT included - RQLite uses direct TLS on port 7002
|
||||
sniServices := []string{
|
||||
fmt.Sprintf("ipfs.%s", domain),
|
||||
fmt.Sprintf("ipfs-cluster.%s", domain),
|
||||
fmt.Sprintf("olric.%s", domain),
|
||||
}
|
||||
|
||||
// Try to resolve the main domain first to get baseline
|
||||
mainIPs, err := net.LookupHost(domain)
|
||||
if err != nil {
|
||||
// Main domain doesn't resolve - this is just a warning now
|
||||
return fmt.Sprintf("Warning: could not resolve main domain %s: %v", domain, err)
|
||||
}
|
||||
|
||||
if len(mainIPs) == 0 {
|
||||
return fmt.Sprintf("Warning: main domain %s resolved to no IP addresses", domain)
|
||||
}
|
||||
|
||||
// Check each SNI service
|
||||
var unresolvedServices []string
|
||||
for _, service := range sniServices {
|
||||
ips, err := net.LookupHost(service)
|
||||
if err != nil || len(ips) == 0 {
|
||||
unresolvedServices = append(unresolvedServices, service)
|
||||
}
|
||||
}
|
||||
|
||||
if len(unresolvedServices) > 0 {
|
||||
serviceList := strings.Join(unresolvedServices, ", ")
|
||||
return fmt.Sprintf(
|
||||
"⚠️ SNI DNS records not found for: %s\n"+
|
||||
" For multi-node clustering, add wildcard CNAME: *.%s -> %s\n"+
|
||||
" (Continuing anyway - single-node setup will work)",
|
||||
serviceList, domain, domain,
|
||||
)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Run starts the TUI installer and returns the configuration
|
||||
func Run() (*InstallerConfig, error) {
|
||||
// Check if running as root
|
||||
if os.Geteuid() != 0 {
|
||||
return nil, fmt.Errorf("installer must be run as root (use sudo)")
|
||||
}
|
||||
|
||||
model := NewModel()
|
||||
p := tea.NewProgram(&model, tea.WithAltScreen())
|
||||
finalModel, err := p.Run()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := finalModel.(*Model)
|
||||
if m.step == StepInstalling || m.step == StepDone {
|
||||
config := m.GetConfig()
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("installation cancelled")
|
||||
}
|
||||
|
||||
@ -130,18 +130,35 @@ func (c *Client) GetPeerCount(ctx context.Context) (int, error) {
|
||||
return 0, fmt.Errorf("peers request failed with status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var peers []struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&peers); err != nil {
|
||||
return 0, fmt.Errorf("failed to decode peers response: %w", err)
|
||||
// The /peers endpoint returns NDJSON (newline-delimited JSON), not a JSON array
|
||||
// We need to stream-read each peer object
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
peerCount := 0
|
||||
for {
|
||||
var peer map[string]interface{}
|
||||
err := dec.Decode(&peer)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return 0, fmt.Errorf("failed to decode peers response: %w", err)
|
||||
}
|
||||
peerCount++
|
||||
}
|
||||
|
||||
return len(peers), nil
|
||||
return peerCount, nil
|
||||
}
|
||||
|
||||
// Add adds content to IPFS and returns the CID
|
||||
func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddResponse, error) {
|
||||
// Track original size by reading into memory first
|
||||
// This allows us to return the actual byte count, not the DAG size
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read data: %w", err)
|
||||
}
|
||||
originalSize := int64(len(data))
|
||||
|
||||
// Create multipart form request for IPFS Cluster API
|
||||
var buf bytes.Buffer
|
||||
writer := multipart.NewWriter(&buf)
|
||||
@ -152,7 +169,7 @@ func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddRe
|
||||
return nil, fmt.Errorf("failed to create form file: %w", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, reader); err != nil {
|
||||
if _, err := io.Copy(part, bytes.NewReader(data)); err != nil {
|
||||
return nil, fmt.Errorf("failed to copy data: %w", err)
|
||||
}
|
||||
|
||||
@ -206,6 +223,9 @@ func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddRe
|
||||
last.Name = name
|
||||
}
|
||||
|
||||
// Override size with original byte count (not DAG size)
|
||||
last.Size = originalSize
|
||||
|
||||
return &last, nil
|
||||
}
|
||||
|
||||
|
||||
@ -59,7 +59,8 @@ func TestClient_Add(t *testing.T) {
|
||||
t.Run("success", func(t *testing.T) {
|
||||
expectedCID := "QmTest123"
|
||||
expectedName := "test.txt"
|
||||
expectedSize := int64(100)
|
||||
testContent := "test content"
|
||||
expectedSize := int64(len(testContent)) // Client overrides server size with actual content length
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/add" {
|
||||
@ -89,10 +90,11 @@ func TestClient_Add(t *testing.T) {
|
||||
// Read file content
|
||||
_, _ = io.ReadAll(file)
|
||||
|
||||
// Return a different size to verify the client correctly overrides it
|
||||
response := AddResponse{
|
||||
Cid: expectedCID,
|
||||
Name: expectedName,
|
||||
Size: expectedSize,
|
||||
Size: 999, // Client will override this with actual content size
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
@ -105,7 +107,7 @@ func TestClient_Add(t *testing.T) {
|
||||
t.Fatalf("Failed to create client: %v", err)
|
||||
}
|
||||
|
||||
reader := strings.NewReader("test content")
|
||||
reader := strings.NewReader(testContent)
|
||||
resp, err := client.Add(context.Background(), reader, expectedName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add content: %v", err)
|
||||
|
||||
@ -1,11 +1,13 @@
|
||||
package ipfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@ -17,6 +19,9 @@ import (
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"github.com/DeBrosOfficial/network/pkg/tlsutil"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// ClusterConfigManager manages IPFS Cluster configuration files
|
||||
@ -82,36 +87,33 @@ func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterCo
|
||||
}
|
||||
|
||||
// Determine cluster path based on data directory structure
|
||||
// Check if dataDir contains specific node names (e.g., ~/.debros/bootstrap, ~/.debros/node2)
|
||||
// Check if dataDir contains specific node names (e.g., ~/.orama/node-1, ~/.orama/node-2, etc.)
|
||||
clusterPath := filepath.Join(dataDir, "ipfs-cluster")
|
||||
if strings.Contains(dataDir, "bootstrap") {
|
||||
// Check if bootstrap is a direct child
|
||||
if filepath.Base(filepath.Dir(dataDir)) == "bootstrap" || filepath.Base(dataDir) == "bootstrap" {
|
||||
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
|
||||
} else {
|
||||
clusterPath = filepath.Join(dataDir, "bootstrap", "ipfs-cluster")
|
||||
}
|
||||
} else if strings.Contains(dataDir, "node2") {
|
||||
if filepath.Base(filepath.Dir(dataDir)) == "node2" || filepath.Base(dataDir) == "node2" {
|
||||
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
|
||||
} else {
|
||||
clusterPath = filepath.Join(dataDir, "node2", "ipfs-cluster")
|
||||
}
|
||||
} else if strings.Contains(dataDir, "node3") {
|
||||
if filepath.Base(filepath.Dir(dataDir)) == "node3" || filepath.Base(dataDir) == "node3" {
|
||||
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
|
||||
} else {
|
||||
clusterPath = filepath.Join(dataDir, "node3", "ipfs-cluster")
|
||||
nodeNames := []string{"node-1", "node-2", "node-3", "node-4", "node-5"}
|
||||
for _, nodeName := range nodeNames {
|
||||
if strings.Contains(dataDir, nodeName) {
|
||||
// Check if this is a direct child
|
||||
if filepath.Base(filepath.Dir(dataDir)) == nodeName || filepath.Base(dataDir) == nodeName {
|
||||
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
|
||||
} else {
|
||||
clusterPath = filepath.Join(dataDir, nodeName, "ipfs-cluster")
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Load or generate cluster secret
|
||||
// Always use ~/.orama/secrets/cluster-secret (new standard location)
|
||||
secretPath := filepath.Join(dataDir, "..", "cluster-secret")
|
||||
if strings.Contains(dataDir, ".debros") {
|
||||
// Try to find cluster-secret in ~/.debros
|
||||
if strings.Contains(dataDir, ".orama") {
|
||||
// Use the secrets directory for proper file organization
|
||||
home, err := os.UserHomeDir()
|
||||
if err == nil {
|
||||
secretPath = filepath.Join(home, ".debros", "cluster-secret")
|
||||
secretsDir := filepath.Join(home, ".orama", "secrets")
|
||||
// Ensure secrets directory exists
|
||||
if err := os.MkdirAll(secretsDir, 0700); err == nil {
|
||||
secretPath = filepath.Join(secretsDir, "cluster-secret")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -148,23 +150,35 @@ func (cm *ClusterConfigManager) EnsureConfig() error {
|
||||
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
|
||||
}
|
||||
|
||||
// Determine node name
|
||||
nodeName := cm.cfg.Node.Type
|
||||
if nodeName == "node" {
|
||||
// Try to extract from data dir or ID
|
||||
if strings.Contains(cm.cfg.Node.DataDir, "node2") || strings.Contains(cm.cfg.Node.ID, "node2") {
|
||||
nodeName = "node2"
|
||||
} else if strings.Contains(cm.cfg.Node.DataDir, "node3") || strings.Contains(cm.cfg.Node.ID, "node3") {
|
||||
nodeName = "node3"
|
||||
} else {
|
||||
nodeName = "node"
|
||||
// Determine node name from ID or DataDir
|
||||
nodeName := "node-1" // Default fallback
|
||||
possibleNames := []string{"node-1", "node-2", "node-3", "node-4", "node-5"}
|
||||
for _, name := range possibleNames {
|
||||
if strings.Contains(cm.cfg.Node.DataDir, name) || strings.Contains(cm.cfg.Node.ID, name) {
|
||||
nodeName = name
|
||||
break
|
||||
}
|
||||
}
|
||||
// If ID contains a node identifier, use it
|
||||
if cm.cfg.Node.ID != "" {
|
||||
for _, name := range possibleNames {
|
||||
if strings.Contains(cm.cfg.Node.ID, name) {
|
||||
nodeName = name
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate ports based on pattern
|
||||
proxyPort := clusterPort - 1
|
||||
pinSvcPort := clusterPort + 1
|
||||
clusterListenPort := clusterPort + 2
|
||||
// REST API: 9094
|
||||
// Proxy: 9094 - 1 = 9093 (NOT USED - keeping for reference)
|
||||
// PinSvc: 9094 + 1 = 9095
|
||||
// Proxy API: 9094 + 1 = 9095 (actual proxy port)
|
||||
// PinSvc API: 9094 + 3 = 9097
|
||||
// Cluster LibP2P: 9094 + 4 = 9098
|
||||
proxyPort := clusterPort + 1 // 9095 (IPFSProxy API)
|
||||
pinSvcPort := clusterPort + 3 // 9097 (PinSvc API)
|
||||
clusterListenPort := clusterPort + 4 // 9098 (Cluster LibP2P)
|
||||
|
||||
// If config doesn't exist, initialize it with ipfs-cluster-service init
|
||||
// This ensures we have all required sections (datastore, informer, etc.)
|
||||
@ -214,64 +228,570 @@ func (cm *ClusterConfigManager) EnsureConfig() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBootstrapPeers updates peer_addresses and peerstore with bootstrap peer information
|
||||
func (cm *ClusterConfigManager) UpdateBootstrapPeers(bootstrapAPIURL string) error {
|
||||
// UpdatePeerAddresses updates peer_addresses and peerstore with peer information
|
||||
// Returns true if update was successful, false if peer is not available yet (non-fatal)
|
||||
func (cm *ClusterConfigManager) UpdatePeerAddresses(peerAPIURL string) (bool, error) {
|
||||
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
||||
return nil // IPFS not configured
|
||||
return false, nil // IPFS not configured
|
||||
}
|
||||
|
||||
// Skip if this is the bootstrap node itself
|
||||
if cm.cfg.Node.Type == "bootstrap" {
|
||||
return nil
|
||||
// Skip if this is the first node (creates the cluster, no join address)
|
||||
if cm.cfg.Database.RQLiteJoinAddress == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Query bootstrap cluster API to get peer ID
|
||||
peerID, err := getBootstrapPeerID(bootstrapAPIURL)
|
||||
// Query peer cluster API to get peer ID
|
||||
peerID, err := getPeerID(peerAPIURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get bootstrap peer ID: %w", err)
|
||||
// Non-fatal: peer might not be available yet
|
||||
cm.logger.Debug("Peer not available yet, will retry",
|
||||
zap.String("peer_api", peerAPIURL),
|
||||
zap.Error(err))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if peerID == "" {
|
||||
cm.logger.Warn("Bootstrap peer ID not available yet")
|
||||
return nil
|
||||
cm.logger.Debug("Peer ID not available yet")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Extract bootstrap cluster port from URL
|
||||
_, clusterPort, err := parseClusterPorts(bootstrapAPIURL)
|
||||
// Extract peer host and cluster port from URL
|
||||
peerHost, clusterPort, err := parsePeerHostAndPort(peerAPIURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse bootstrap cluster API URL: %w", err)
|
||||
return false, fmt.Errorf("failed to parse peer cluster API URL: %w", err)
|
||||
}
|
||||
|
||||
// Bootstrap listens on clusterPort + 2 (same pattern)
|
||||
bootstrapClusterPort := clusterPort + 2
|
||||
bootstrapPeerAddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", bootstrapClusterPort, peerID)
|
||||
// Peer cluster LibP2P listens on clusterPort + 4
|
||||
// (REST API is 9094, LibP2P is 9098 = 9094 + 4)
|
||||
peerClusterPort := clusterPort + 4
|
||||
|
||||
// Determine IP protocol (ip4 or ip6) based on the host
|
||||
var ipProtocol string
|
||||
if net.ParseIP(peerHost).To4() != nil {
|
||||
ipProtocol = "ip4"
|
||||
} else {
|
||||
ipProtocol = "ip6"
|
||||
}
|
||||
|
||||
peerAddr := fmt.Sprintf("/%s/%s/tcp/%d/p2p/%s", ipProtocol, peerHost, peerClusterPort, peerID)
|
||||
|
||||
// Load current config
|
||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
return false, fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
// Update peer_addresses
|
||||
cfg.Cluster.PeerAddresses = []string{bootstrapPeerAddr}
|
||||
// CRITICAL: Always update peerstore file to ensure no stale addresses remain
|
||||
// Stale addresses (e.g., from old port configurations) cause LibP2P dial backoff,
|
||||
// preventing cluster peers from connecting even if the correct address is present.
|
||||
// We must clean and rewrite the peerstore on every update to avoid this.
|
||||
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
||||
|
||||
// Check if peerstore needs updating (avoid unnecessary writes but always clean stale entries)
|
||||
needsUpdate := true
|
||||
if peerstoreData, err := os.ReadFile(peerstorePath); err == nil {
|
||||
// Only skip update if peerstore contains EXACTLY the correct address and nothing else
|
||||
existingAddrs := strings.Split(strings.TrimSpace(string(peerstoreData)), "\n")
|
||||
if len(existingAddrs) == 1 && strings.TrimSpace(existingAddrs[0]) == peerAddr {
|
||||
cm.logger.Debug("Peer address already correct in peerstore", zap.String("addr", peerAddr))
|
||||
needsUpdate = false
|
||||
}
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
// Write ONLY the correct peer address, removing any stale entries
|
||||
if err := os.WriteFile(peerstorePath, []byte(peerAddr+"\n"), 0644); err != nil {
|
||||
return false, fmt.Errorf("failed to write peerstore: %w", err)
|
||||
}
|
||||
cm.logger.Info("Updated peerstore with peer (cleaned stale entries)",
|
||||
zap.String("addr", peerAddr),
|
||||
zap.String("peerstore_path", peerstorePath))
|
||||
}
|
||||
|
||||
// Then sync service.json from peerstore to keep them in sync
|
||||
cfg.Cluster.PeerAddresses = []string{peerAddr}
|
||||
|
||||
// Save config
|
||||
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
||||
return fmt.Errorf("failed to save config: %w", err)
|
||||
return false, fmt.Errorf("failed to save config: %w", err)
|
||||
}
|
||||
|
||||
// Write to peerstore file
|
||||
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
||||
if err := os.WriteFile(peerstorePath, []byte(bootstrapPeerAddr+"\n"), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write peerstore: %w", err)
|
||||
}
|
||||
|
||||
cm.logger.Info("Updated bootstrap peer configuration",
|
||||
zap.String("bootstrap_peer_addr", bootstrapPeerAddr),
|
||||
cm.logger.Info("Updated peer configuration",
|
||||
zap.String("peer_addr", peerAddr),
|
||||
zap.String("peerstore_path", peerstorePath))
|
||||
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// UpdateAllClusterPeers discovers all cluster peers from the local cluster API
|
||||
// and updates peer_addresses in service.json. This allows IPFS Cluster to automatically
|
||||
// connect to all discovered peers in the cluster.
|
||||
// Returns true if update was successful, false if cluster is not available yet (non-fatal)
|
||||
func (cm *ClusterConfigManager) UpdateAllClusterPeers() (bool, error) {
|
||||
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
||||
return false, nil // IPFS not configured
|
||||
}
|
||||
|
||||
// Query local cluster API to get all peers
|
||||
client := newStandardHTTPClient()
|
||||
peersURL := fmt.Sprintf("%s/peers", cm.cfg.Database.IPFS.ClusterAPIURL)
|
||||
resp, err := client.Get(peersURL)
|
||||
if err != nil {
|
||||
// Non-fatal: cluster might not be available yet
|
||||
cm.logger.Debug("Cluster API not available yet, will retry",
|
||||
zap.String("peers_url", peersURL),
|
||||
zap.Error(err))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Parse NDJSON response
|
||||
dec := json.NewDecoder(bytes.NewReader(resp))
|
||||
var allPeerAddresses []string
|
||||
seenPeers := make(map[string]bool)
|
||||
peerIDToAddresses := make(map[string][]string)
|
||||
|
||||
// First pass: collect all peer IDs and their addresses
|
||||
for {
|
||||
var peerInfo struct {
|
||||
ID string `json:"id"`
|
||||
Addresses []string `json:"addresses"`
|
||||
ClusterPeers []string `json:"cluster_peers"`
|
||||
ClusterPeersAddresses []string `json:"cluster_peers_addresses"`
|
||||
}
|
||||
|
||||
err := dec.Decode(&peerInfo)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
cm.logger.Debug("Failed to decode peer info", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
// Store this peer's addresses
|
||||
if peerInfo.ID != "" {
|
||||
peerIDToAddresses[peerInfo.ID] = peerInfo.Addresses
|
||||
}
|
||||
|
||||
// Also collect cluster peers addresses if available
|
||||
// These are addresses of all peers in the cluster
|
||||
for _, addr := range peerInfo.ClusterPeersAddresses {
|
||||
if ma, err := multiaddr.NewMultiaddr(addr); err == nil {
|
||||
// Validate it has p2p component (peer ID)
|
||||
if _, err := ma.ValueForProtocol(multiaddr.P_P2P); err == nil {
|
||||
addrStr := ma.String()
|
||||
if !seenPeers[addrStr] {
|
||||
allPeerAddresses = append(allPeerAddresses, addrStr)
|
||||
seenPeers[addrStr] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't get cluster_peers_addresses, try to construct them from peer IDs and addresses
|
||||
if len(allPeerAddresses) == 0 && len(peerIDToAddresses) > 0 {
|
||||
// Get cluster listen port from config
|
||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
||||
if err == nil && len(cfg.Cluster.ListenMultiaddress) > 0 {
|
||||
// Extract port from listen_multiaddress (e.g., "/ip4/0.0.0.0/tcp/9098")
|
||||
listenAddr := cfg.Cluster.ListenMultiaddress[0]
|
||||
if ma, err := multiaddr.NewMultiaddr(listenAddr); err == nil {
|
||||
if port, err := ma.ValueForProtocol(multiaddr.P_TCP); err == nil {
|
||||
// For each peer ID, try to find its IP address and construct cluster multiaddr
|
||||
for peerID, addresses := range peerIDToAddresses {
|
||||
// Try to find an IP address in the peer's addresses
|
||||
for _, addrStr := range addresses {
|
||||
if ma, err := multiaddr.NewMultiaddr(addrStr); err == nil {
|
||||
// Extract IP address (IPv4 or IPv6)
|
||||
if ip, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ip != "" {
|
||||
clusterAddr := fmt.Sprintf("/ip4/%s/tcp/%s/p2p/%s", ip, port, peerID)
|
||||
if !seenPeers[clusterAddr] {
|
||||
allPeerAddresses = append(allPeerAddresses, clusterAddr)
|
||||
seenPeers[clusterAddr] = true
|
||||
}
|
||||
break
|
||||
} else if ip, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ip != "" {
|
||||
clusterAddr := fmt.Sprintf("/ip6/%s/tcp/%s/p2p/%s", ip, port, peerID)
|
||||
if !seenPeers[clusterAddr] {
|
||||
allPeerAddresses = append(allPeerAddresses, clusterAddr)
|
||||
seenPeers[clusterAddr] = true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(allPeerAddresses) == 0 {
|
||||
cm.logger.Debug("No cluster peer addresses found in API response")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Load current config
|
||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
// Check if peer addresses have changed
|
||||
addressesChanged := false
|
||||
if len(cfg.Cluster.PeerAddresses) != len(allPeerAddresses) {
|
||||
addressesChanged = true
|
||||
} else {
|
||||
// Check if addresses are different
|
||||
currentAddrs := make(map[string]bool)
|
||||
for _, addr := range cfg.Cluster.PeerAddresses {
|
||||
currentAddrs[addr] = true
|
||||
}
|
||||
for _, addr := range allPeerAddresses {
|
||||
if !currentAddrs[addr] {
|
||||
addressesChanged = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !addressesChanged {
|
||||
cm.logger.Debug("Cluster peer addresses already up to date",
|
||||
zap.Int("peer_count", len(allPeerAddresses)))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Update peerstore file FIRST - this is what IPFS Cluster reads for bootstrapping
|
||||
// Peerstore is the source of truth, service.json is just for our tracking
|
||||
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
||||
peerstoreContent := strings.Join(allPeerAddresses, "\n") + "\n"
|
||||
if err := os.WriteFile(peerstorePath, []byte(peerstoreContent), 0644); err != nil {
|
||||
cm.logger.Warn("Failed to update peerstore file", zap.Error(err))
|
||||
// Non-fatal, continue
|
||||
}
|
||||
|
||||
// Then sync service.json from peerstore to keep them in sync
|
||||
cfg.Cluster.PeerAddresses = allPeerAddresses
|
||||
|
||||
// Save config
|
||||
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
||||
return false, fmt.Errorf("failed to save config: %w", err)
|
||||
}
|
||||
|
||||
cm.logger.Info("Updated cluster peer addresses",
|
||||
zap.Int("peer_count", len(allPeerAddresses)),
|
||||
zap.Strings("peer_addresses", allPeerAddresses))
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// RepairPeerConfiguration automatically discovers and repairs peer configuration
|
||||
// Tries multiple methods: gateway /v1/network/status, config-based discovery, peer multiaddr
|
||||
func (cm *ClusterConfigManager) RepairPeerConfiguration() (bool, error) {
|
||||
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
||||
return false, nil // IPFS not configured
|
||||
}
|
||||
|
||||
// Method 1: Try to discover cluster peers via /v1/network/status endpoint
|
||||
// This is the most reliable method as it uses the HTTPS gateway
|
||||
if len(cm.cfg.Discovery.BootstrapPeers) > 0 {
|
||||
success, err := cm.DiscoverClusterPeersFromGateway()
|
||||
if err != nil {
|
||||
cm.logger.Debug("Gateway discovery failed, trying direct API", zap.Error(err))
|
||||
} else if success {
|
||||
cm.logger.Info("Successfully discovered cluster peers from gateway")
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Skip direct API method if this is the first node (creates the cluster, no join address)
|
||||
if cm.cfg.Database.RQLiteJoinAddress == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Method 2: Try direct cluster API (fallback)
|
||||
var peerAPIURL string
|
||||
|
||||
// Try to extract from peers multiaddr
|
||||
if len(cm.cfg.Discovery.BootstrapPeers) > 0 {
|
||||
if ip := extractIPFromMultiaddrForCluster(cm.cfg.Discovery.BootstrapPeers[0]); ip != "" {
|
||||
// Default cluster API port is 9094
|
||||
peerAPIURL = fmt.Sprintf("http://%s:9094", ip)
|
||||
cm.logger.Debug("Inferred peer cluster API from peer",
|
||||
zap.String("peer_api", peerAPIURL))
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to localhost if nothing found (for local development)
|
||||
if peerAPIURL == "" {
|
||||
peerAPIURL = "http://localhost:9094"
|
||||
cm.logger.Debug("Using localhost fallback for peer cluster API")
|
||||
}
|
||||
|
||||
// Try to update peers
|
||||
success, err := cm.UpdatePeerAddresses(peerAPIURL)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if success {
|
||||
cm.logger.Info("Successfully repaired peer configuration via direct API")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If update failed (peer not available), return false but no error
|
||||
// This allows retries later
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// DiscoverClusterPeersFromGateway queries bootstrap peers' /v1/network/status endpoint
|
||||
// to discover IPFS Cluster peer information and updates the local service.json
|
||||
func (cm *ClusterConfigManager) DiscoverClusterPeersFromGateway() (bool, error) {
|
||||
if len(cm.cfg.Discovery.BootstrapPeers) == 0 {
|
||||
cm.logger.Debug("No bootstrap peers configured, skipping gateway discovery")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var discoveredPeers []string
|
||||
seenPeers := make(map[string]bool)
|
||||
|
||||
for _, peerAddr := range cm.cfg.Discovery.BootstrapPeers {
|
||||
// Extract domain or IP from multiaddr
|
||||
domain := extractDomainFromMultiaddr(peerAddr)
|
||||
if domain == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Query /v1/network/status endpoint
|
||||
statusURL := fmt.Sprintf("https://%s/v1/network/status", domain)
|
||||
cm.logger.Debug("Querying peer network status", zap.String("url", statusURL))
|
||||
|
||||
// Use TLS-aware HTTP client (handles staging certs for *.debros.network)
|
||||
client := tlsutil.NewHTTPClientForDomain(10*time.Second, domain)
|
||||
resp, err := client.Get(statusURL)
|
||||
if err != nil {
|
||||
// Try HTTP fallback
|
||||
statusURL = fmt.Sprintf("http://%s/v1/network/status", domain)
|
||||
resp, err = client.Get(statusURL)
|
||||
if err != nil {
|
||||
cm.logger.Debug("Failed to query peer status", zap.String("domain", domain), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
cm.logger.Debug("Peer returned non-OK status", zap.String("domain", domain), zap.Int("status", resp.StatusCode))
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse response
|
||||
var status struct {
|
||||
IPFSCluster *struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
Addresses []string `json:"addresses"`
|
||||
} `json:"ipfs_cluster"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
|
||||
cm.logger.Debug("Failed to decode peer status", zap.String("domain", domain), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
if status.IPFSCluster == nil || status.IPFSCluster.PeerID == "" {
|
||||
cm.logger.Debug("Peer has no IPFS Cluster info", zap.String("domain", domain))
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract IP from domain or addresses
|
||||
peerIP := extractIPFromMultiaddrForCluster(peerAddr)
|
||||
if peerIP == "" {
|
||||
// Try to resolve domain
|
||||
ips, err := net.LookupIP(domain)
|
||||
if err == nil && len(ips) > 0 {
|
||||
for _, ip := range ips {
|
||||
if ip.To4() != nil {
|
||||
peerIP = ip.String()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if peerIP == "" {
|
||||
cm.logger.Debug("Could not determine peer IP", zap.String("domain", domain))
|
||||
continue
|
||||
}
|
||||
|
||||
// Construct cluster multiaddr
|
||||
// IPFS Cluster listens on port 9098 (REST API port 9094 + 4)
|
||||
clusterAddr := fmt.Sprintf("/ip4/%s/tcp/9098/p2p/%s", peerIP, status.IPFSCluster.PeerID)
|
||||
if !seenPeers[clusterAddr] {
|
||||
discoveredPeers = append(discoveredPeers, clusterAddr)
|
||||
seenPeers[clusterAddr] = true
|
||||
cm.logger.Info("Discovered cluster peer from gateway",
|
||||
zap.String("domain", domain),
|
||||
zap.String("peer_id", status.IPFSCluster.PeerID),
|
||||
zap.String("cluster_addr", clusterAddr))
|
||||
}
|
||||
}
|
||||
|
||||
if len(discoveredPeers) == 0 {
|
||||
cm.logger.Debug("No cluster peers discovered from gateway")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Load current config
|
||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
// Update peerstore file
|
||||
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
||||
peerstoreContent := strings.Join(discoveredPeers, "\n") + "\n"
|
||||
if err := os.WriteFile(peerstorePath, []byte(peerstoreContent), 0644); err != nil {
|
||||
cm.logger.Warn("Failed to update peerstore file", zap.Error(err))
|
||||
}
|
||||
|
||||
// Update peer_addresses in config
|
||||
cfg.Cluster.PeerAddresses = discoveredPeers
|
||||
|
||||
// Save config
|
||||
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
||||
return false, fmt.Errorf("failed to save config: %w", err)
|
||||
}
|
||||
|
||||
cm.logger.Info("Updated cluster peer addresses from gateway discovery",
|
||||
zap.Int("peer_count", len(discoveredPeers)),
|
||||
zap.Strings("peer_addresses", discoveredPeers))
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// extractDomainFromMultiaddr extracts domain or IP from a multiaddr string
|
||||
// Handles formats like /dns4/domain/tcp/port/p2p/id or /ip4/ip/tcp/port/p2p/id
|
||||
func extractDomainFromMultiaddr(multiaddrStr string) string {
|
||||
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Try DNS4 first (domain name)
|
||||
if domain, err := ma.ValueForProtocol(multiaddr.P_DNS4); err == nil && domain != "" {
|
||||
return domain
|
||||
}
|
||||
|
||||
// Try DNS6
|
||||
if domain, err := ma.ValueForProtocol(multiaddr.P_DNS6); err == nil && domain != "" {
|
||||
return domain
|
||||
}
|
||||
|
||||
// Try IP4
|
||||
if ip, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ip != "" {
|
||||
return ip
|
||||
}
|
||||
|
||||
// Try IP6
|
||||
if ip, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ip != "" {
|
||||
return ip
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// DiscoverClusterPeersFromLibP2P loads IPFS cluster peer addresses from the peerstore file.
|
||||
// If peerstore is empty, it means there are no peers to connect to.
|
||||
// Returns true if peers were loaded and configured, false otherwise (non-fatal)
|
||||
func (cm *ClusterConfigManager) DiscoverClusterPeersFromLibP2P(host host.Host) (bool, error) {
|
||||
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
|
||||
return false, nil // IPFS not configured
|
||||
}
|
||||
|
||||
// Load peer addresses from peerstore file
|
||||
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
|
||||
peerstoreData, err := os.ReadFile(peerstorePath)
|
||||
if err != nil {
|
||||
// Peerstore file doesn't exist or can't be read - no peers to connect to
|
||||
cm.logger.Debug("Peerstore file not found or empty - no cluster peers to connect to",
|
||||
zap.String("peerstore_path", peerstorePath))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var allPeerAddresses []string
|
||||
seenPeers := make(map[string]bool)
|
||||
|
||||
// Parse peerstore file (one multiaddr per line)
|
||||
lines := strings.Split(strings.TrimSpace(string(peerstoreData)), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" && strings.HasPrefix(line, "/") {
|
||||
// Validate it's a proper multiaddr with p2p component
|
||||
if ma, err := multiaddr.NewMultiaddr(line); err == nil {
|
||||
if _, err := ma.ValueForProtocol(multiaddr.P_P2P); err == nil {
|
||||
if !seenPeers[line] {
|
||||
allPeerAddresses = append(allPeerAddresses, line)
|
||||
seenPeers[line] = true
|
||||
cm.logger.Debug("Loaded cluster peer address from peerstore",
|
||||
zap.String("addr", line))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(allPeerAddresses) == 0 {
|
||||
cm.logger.Debug("Peerstore file is empty - no cluster peers to connect to")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Get config to update peer_addresses
|
||||
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
|
||||
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
// Check if peer addresses have changed
|
||||
addressesChanged := false
|
||||
if len(cfg.Cluster.PeerAddresses) != len(allPeerAddresses) {
|
||||
addressesChanged = true
|
||||
} else {
|
||||
currentAddrs := make(map[string]bool)
|
||||
for _, addr := range cfg.Cluster.PeerAddresses {
|
||||
currentAddrs[addr] = true
|
||||
}
|
||||
for _, addr := range allPeerAddresses {
|
||||
if !currentAddrs[addr] {
|
||||
addressesChanged = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !addressesChanged {
|
||||
cm.logger.Debug("Cluster peer addresses already up to date",
|
||||
zap.Int("peer_count", len(allPeerAddresses)))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Update peer_addresses
|
||||
cfg.Cluster.PeerAddresses = allPeerAddresses
|
||||
|
||||
// Save config
|
||||
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
|
||||
return false, fmt.Errorf("failed to save config: %w", err)
|
||||
}
|
||||
|
||||
cm.logger.Info("Loaded cluster peer addresses from peerstore",
|
||||
zap.Int("peer_count", len(allPeerAddresses)),
|
||||
zap.Strings("peer_addresses", allPeerAddresses))
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// loadOrCreateConfig loads existing service.json or creates a template
|
||||
@ -471,6 +991,38 @@ func ensureRequiredSection(parent map[string]interface{}, key string, defaults m
|
||||
}
|
||||
}
|
||||
|
||||
// parsePeerHostAndPort extracts host and REST API port from peer API URL
|
||||
func parsePeerHostAndPort(peerAPIURL string) (host string, restAPIPort int, err error) {
|
||||
u, err := url.Parse(peerAPIURL)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
host = u.Hostname()
|
||||
if host == "" {
|
||||
return "", 0, fmt.Errorf("no host in URL: %s", peerAPIURL)
|
||||
}
|
||||
|
||||
portStr := u.Port()
|
||||
if portStr == "" {
|
||||
// Default port based on scheme
|
||||
if u.Scheme == "http" {
|
||||
portStr = "9094"
|
||||
} else if u.Scheme == "https" {
|
||||
portStr = "443"
|
||||
} else {
|
||||
return "", 0, fmt.Errorf("unknown scheme: %s", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = fmt.Sscanf(portStr, "%d", &restAPIPort)
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf("invalid port: %s", portStr)
|
||||
}
|
||||
|
||||
return host, restAPIPort, nil
|
||||
}
|
||||
|
||||
// parseClusterPorts extracts cluster port and REST API port from ClusterAPIURL
|
||||
func parseClusterPorts(clusterAPIURL string) (clusterPort, restAPIPort int, err error) {
|
||||
u, err := url.Parse(clusterAPIURL)
|
||||
@ -495,8 +1047,9 @@ func parseClusterPorts(clusterAPIURL string) (clusterPort, restAPIPort int, err
|
||||
return 0, 0, fmt.Errorf("invalid port: %s", portStr)
|
||||
}
|
||||
|
||||
// Cluster listen port is typically REST API port + 2
|
||||
clusterPort = restAPIPort + 2
|
||||
// clusterPort is used as the base port for calculations
|
||||
// The actual cluster LibP2P listen port is calculated as clusterPort + 4
|
||||
clusterPort = restAPIPort
|
||||
|
||||
return clusterPort, restAPIPort, nil
|
||||
}
|
||||
@ -529,23 +1082,26 @@ func parseIPFSPort(apiURL string) (int, error) {
|
||||
return port, nil
|
||||
}
|
||||
|
||||
// getBootstrapPeerID queries the bootstrap cluster API to get the peer ID
|
||||
func getBootstrapPeerID(apiURL string) (string, error) {
|
||||
// getPeerID queries the cluster API to get the peer ID
|
||||
func getPeerID(apiURL string) (string, error) {
|
||||
// Simple HTTP client to query /peers endpoint
|
||||
client := &standardHTTPClient{}
|
||||
peersResp, err := client.Get(fmt.Sprintf("%s/peers", apiURL))
|
||||
client := newStandardHTTPClient()
|
||||
resp, err := client.Get(fmt.Sprintf("%s/peers", apiURL))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var peersData struct {
|
||||
// The /peers endpoint returns NDJSON (newline-delimited JSON)
|
||||
// We need to read the first peer object to get the peer ID
|
||||
dec := json.NewDecoder(bytes.NewReader(resp))
|
||||
var firstPeer struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(peersResp, &peersData); err != nil {
|
||||
return "", err
|
||||
if err := dec.Decode(&firstPeer); err != nil {
|
||||
return "", fmt.Errorf("failed to decode first peer: %w", err)
|
||||
}
|
||||
|
||||
return peersData.ID, nil
|
||||
return firstPeer.ID, nil
|
||||
}
|
||||
|
||||
// loadOrGenerateClusterSecret loads cluster secret or generates a new one
|
||||
@ -581,11 +1137,19 @@ func generateRandomSecret(length int) string {
|
||||
return hex.EncodeToString(bytes)
|
||||
}
|
||||
|
||||
// standardHTTPClient implements HTTP client using net/http
|
||||
type standardHTTPClient struct{}
|
||||
// standardHTTPClient implements HTTP client using net/http with centralized TLS configuration
|
||||
type standardHTTPClient struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func newStandardHTTPClient() *standardHTTPClient {
|
||||
return &standardHTTPClient{
|
||||
client: tlsutil.NewHTTPClient(30 * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *standardHTTPClient) Get(url string) ([]byte, error) {
|
||||
resp, err := http.Get(url)
|
||||
resp, err := c.client.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -603,6 +1167,28 @@ func (c *standardHTTPClient) Get(url string) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// extractIPFromMultiaddrForCluster extracts IP address from a LibP2P multiaddr string
|
||||
// Used for inferring bootstrap cluster API URL
|
||||
func extractIPFromMultiaddrForCluster(multiaddrStr string) string {
|
||||
// Parse multiaddr
|
||||
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Try to extract IPv4 address
|
||||
if ipv4, err := ma.ValueForProtocol(multiaddr.P_IP4); err == nil && ipv4 != "" {
|
||||
return ipv4
|
||||
}
|
||||
|
||||
// Try to extract IPv6 address
|
||||
if ipv6, err := ma.ValueForProtocol(multiaddr.P_IP6); err == nil && ipv6 != "" {
|
||||
return ipv6
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// FixIPFSConfigAddresses fixes localhost addresses in IPFS config to use 127.0.0.1
|
||||
// This is necessary because IPFS doesn't accept "localhost" as a valid IP address in multiaddrs
|
||||
// This function always ensures the config is correct, regardless of current state
|
||||
@ -622,15 +1208,15 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
|
||||
}
|
||||
|
||||
// Try to find IPFS repo path
|
||||
// Check common locations: dataDir/ipfs/repo, or dataDir/bootstrap/ipfs/repo, etc.
|
||||
// Check common locations: dataDir/ipfs/repo, dataDir/node-1/ipfs/repo, etc.
|
||||
possiblePaths := []string{
|
||||
filepath.Join(dataDir, "ipfs", "repo"),
|
||||
filepath.Join(dataDir, "bootstrap", "ipfs", "repo"),
|
||||
filepath.Join(dataDir, "node2", "ipfs", "repo"),
|
||||
filepath.Join(dataDir, "node3", "ipfs", "repo"),
|
||||
filepath.Join(filepath.Dir(dataDir), "bootstrap", "ipfs", "repo"),
|
||||
filepath.Join(filepath.Dir(dataDir), "node2", "ipfs", "repo"),
|
||||
filepath.Join(filepath.Dir(dataDir), "node3", "ipfs", "repo"),
|
||||
filepath.Join(dataDir, "node-1", "ipfs", "repo"),
|
||||
filepath.Join(dataDir, "node-2", "ipfs", "repo"),
|
||||
filepath.Join(dataDir, "node-3", "ipfs", "repo"),
|
||||
filepath.Join(filepath.Dir(dataDir), "node-1", "ipfs", "repo"),
|
||||
filepath.Join(filepath.Dir(dataDir), "node-2", "ipfs", "repo"),
|
||||
filepath.Join(filepath.Dir(dataDir), "node-3", "ipfs", "repo"),
|
||||
}
|
||||
|
||||
var ipfsRepoPath string
|
||||
@ -652,7 +1238,7 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
|
||||
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
|
||||
}
|
||||
|
||||
// Determine gateway port (typically API port + 3079, or 8080 for bootstrap, 8081 for node2, etc.)
|
||||
// Determine gateway port (typically API port + 3079, or 8080 for node-1, 8081 for node-2, etc.)
|
||||
gatewayPort := 8080
|
||||
if strings.Contains(dataDir, "node2") {
|
||||
gatewayPort = 8081
|
||||
@ -665,7 +1251,7 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
|
||||
}
|
||||
|
||||
// Always ensure API address is correct (don't just check, always set it)
|
||||
correctAPIAddr := fmt.Sprintf(`["/ip4/127.0.0.1/tcp/%d"]`, ipfsPort)
|
||||
correctAPIAddr := fmt.Sprintf(`["/ip4/0.0.0.0/tcp/%d"]`, ipfsPort)
|
||||
cm.logger.Info("Ensuring IPFS API address is correct",
|
||||
zap.String("repo", ipfsRepoPath),
|
||||
zap.Int("port", ipfsPort),
|
||||
@ -679,7 +1265,7 @@ func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
|
||||
}
|
||||
|
||||
// Always ensure Gateway address is correct
|
||||
correctGatewayAddr := fmt.Sprintf(`["/ip4/127.0.0.1/tcp/%d"]`, gatewayPort)
|
||||
correctGatewayAddr := fmt.Sprintf(`["/ip4/0.0.0.0/tcp/%d"]`, gatewayPort)
|
||||
cm.logger.Info("Ensuring IPFS Gateway address is correct",
|
||||
zap.String("repo", ipfsRepoPath),
|
||||
zap.Int("port", gatewayPort),
|
||||
|
||||
@ -54,6 +54,7 @@ const (
|
||||
ComponentClient Component = "CLIENT"
|
||||
ComponentGeneral Component = "GENERAL"
|
||||
ComponentAnyone Component = "ANYONE"
|
||||
ComponentGateway Component = "GATEWAY"
|
||||
)
|
||||
|
||||
// getComponentColor returns the color for a specific component
|
||||
@ -75,6 +76,8 @@ func getComponentColor(component Component) string {
|
||||
return Yellow
|
||||
case ComponentAnyone:
|
||||
return Cyan
|
||||
case ComponentGateway:
|
||||
return BrightGreen
|
||||
default:
|
||||
return White
|
||||
}
|
||||
@ -101,8 +104,10 @@ func getLevelColor(level zapcore.Level) string {
|
||||
// coloredConsoleEncoder creates a custom encoder with colors
|
||||
func coloredConsoleEncoder(enableColors bool) zapcore.Encoder {
|
||||
config := zap.NewDevelopmentEncoderConfig()
|
||||
|
||||
// Ultra-short timestamp: HH:MM:SS (no milliseconds, no date, no timezone)
|
||||
config.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
|
||||
timeStr := t.Format("2006-01-02T15:04:05.000Z0700")
|
||||
timeStr := t.Format("15:04:05")
|
||||
if enableColors {
|
||||
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, timeStr, Reset))
|
||||
} else {
|
||||
@ -110,21 +115,41 @@ func coloredConsoleEncoder(enableColors bool) zapcore.Encoder {
|
||||
}
|
||||
}
|
||||
|
||||
// Single letter level: D, I, W, E
|
||||
config.EncodeLevel = func(level zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
|
||||
levelStr := strings.ToUpper(level.String())
|
||||
levelMap := map[zapcore.Level]string{
|
||||
zapcore.DebugLevel: "D",
|
||||
zapcore.InfoLevel: "I",
|
||||
zapcore.WarnLevel: "W",
|
||||
zapcore.ErrorLevel: "E",
|
||||
}
|
||||
levelStr := levelMap[level]
|
||||
if levelStr == "" {
|
||||
levelStr = "?"
|
||||
}
|
||||
if enableColors {
|
||||
color := getLevelColor(level)
|
||||
enc.AppendString(fmt.Sprintf("%s%s%-5s%s", color, Bold, levelStr, Reset))
|
||||
enc.AppendString(fmt.Sprintf("%s%s%s%s", color, Bold, levelStr, Reset))
|
||||
} else {
|
||||
enc.AppendString(fmt.Sprintf("%-5s", levelStr))
|
||||
enc.AppendString(levelStr)
|
||||
}
|
||||
}
|
||||
|
||||
// Just filename, no line number for cleaner output
|
||||
config.EncodeCaller = func(caller zapcore.EntryCaller, enc zapcore.PrimitiveArrayEncoder) {
|
||||
file := caller.File
|
||||
// Extract just the filename from the path
|
||||
if idx := strings.LastIndex(file, "/"); idx >= 0 {
|
||||
file = file[idx+1:]
|
||||
}
|
||||
// Remove .go extension for even more compact format
|
||||
if strings.HasSuffix(file, ".go") {
|
||||
file = file[:len(file)-3]
|
||||
}
|
||||
if enableColors {
|
||||
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, caller.TrimmedPath(), Reset))
|
||||
enc.AppendString(fmt.Sprintf("%s%s%s", Dim, file, Reset))
|
||||
} else {
|
||||
enc.AppendString(caller.TrimmedPath())
|
||||
enc.AppendString(file)
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,6 +182,33 @@ func NewDefaultLogger(component Component) (*ColoredLogger, error) {
|
||||
return NewColoredLogger(component, true)
|
||||
}
|
||||
|
||||
// NewFileLogger creates a logger that writes to a file
|
||||
func NewFileLogger(component Component, filePath string, enableColors bool) (*ColoredLogger, error) {
|
||||
// Create encoder
|
||||
encoder := coloredConsoleEncoder(enableColors)
|
||||
|
||||
// Create file writer
|
||||
file, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open log file %s: %w", filePath, err)
|
||||
}
|
||||
|
||||
// Create core
|
||||
core := zapcore.NewCore(
|
||||
encoder,
|
||||
zapcore.AddSync(file),
|
||||
zapcore.DebugLevel,
|
||||
)
|
||||
|
||||
// Create logger with caller information
|
||||
logger := zap.New(core, zap.AddCaller(), zap.AddCallerSkip(1))
|
||||
|
||||
return &ColoredLogger{
|
||||
Logger: logger,
|
||||
enableColors: enableColors,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Component-specific logging methods
|
||||
func (l *ColoredLogger) ComponentInfo(component Component, msg string, fields ...zap.Field) {
|
||||
if l.enableColors {
|
||||
|
||||
@ -10,6 +10,8 @@ import (
|
||||
"github.com/mackerelio/go-osstat/cpu"
|
||||
"github.com/mackerelio/go-osstat/memory"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
)
|
||||
|
||||
func logPeerStatus(n *Node, currentPeerCount int, lastPeerCount int, firstCheck bool) (int, bool) {
|
||||
@ -91,13 +93,13 @@ func announceMetrics(n *Node, peers []peer.ID, cpuUsage uint64, memUsage *memory
|
||||
}
|
||||
|
||||
msg := struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
PeerCount int `json:"peer_count"`
|
||||
PeerIDs []string `json:"peer_ids,omitempty"`
|
||||
CPU uint64 `json:"cpu_usage"`
|
||||
Memory uint64 `json:"memory_usage"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
ClusterHealth map[string]interface{} `json:"cluster_health,omitempty"`
|
||||
PeerID string `json:"peer_id"`
|
||||
PeerCount int `json:"peer_count"`
|
||||
PeerIDs []string `json:"peer_ids,omitempty"`
|
||||
CPU uint64 `json:"cpu_usage"`
|
||||
Memory uint64 `json:"memory_usage"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
ClusterHealth map[string]interface{} `json:"cluster_health,omitempty"`
|
||||
}{
|
||||
PeerID: n.host.ID().String(),
|
||||
PeerCount: len(peers),
|
||||
@ -210,6 +212,38 @@ func (n *Node) startConnectionMonitoring() {
|
||||
if err := announceMetrics(n, peers, cpuUsage, mem); err != nil {
|
||||
n.logger.Error("Failed to announce metrics", zap.Error(err))
|
||||
}
|
||||
|
||||
// Periodically update IPFS Cluster peer addresses
|
||||
// This discovers all cluster peers and updates peer_addresses in service.json
|
||||
// so IPFS Cluster can automatically connect to all discovered peers
|
||||
if n.clusterConfigManager != nil {
|
||||
// First try to discover from LibP2P connections (works even if cluster peers aren't connected yet)
|
||||
// This runs every minute to discover peers automatically via LibP2P discovery
|
||||
if time.Now().Unix()%60 == 0 {
|
||||
if success, err := n.clusterConfigManager.DiscoverClusterPeersFromLibP2P(n.host); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to discover cluster peers from LibP2P", zap.Error(err))
|
||||
} else if success {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Cluster peer addresses discovered from LibP2P")
|
||||
}
|
||||
}
|
||||
|
||||
// Also try to update from cluster API (works once peers are connected)
|
||||
// Update all cluster peers every 2 minutes to discover new peers
|
||||
if time.Now().Unix()%120 == 0 {
|
||||
if success, err := n.clusterConfigManager.UpdateAllClusterPeers(); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to update cluster peers during monitoring", zap.Error(err))
|
||||
} else if success {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Cluster peer addresses updated during monitoring")
|
||||
}
|
||||
|
||||
// Try to repair peer configuration
|
||||
if success, err := n.clusterConfigManager.RepairPeerConfiguration(); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair peer addresses during monitoring", zap.Error(err))
|
||||
} else if success {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Peer configuration repaired during monitoring")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
722
pkg/node/node.go
722
pkg/node/node.go
@ -2,8 +2,13 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -18,10 +23,13 @@ import (
|
||||
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/crypto/acme"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
|
||||
"github.com/DeBrosOfficial/network/pkg/config"
|
||||
"github.com/DeBrosOfficial/network/pkg/discovery"
|
||||
"github.com/DeBrosOfficial/network/pkg/encryption"
|
||||
"github.com/DeBrosOfficial/network/pkg/gateway"
|
||||
"github.com/DeBrosOfficial/network/pkg/ipfs"
|
||||
"github.com/DeBrosOfficial/network/pkg/logging"
|
||||
"github.com/DeBrosOfficial/network/pkg/pubsub"
|
||||
@ -39,7 +47,7 @@ type Node struct {
|
||||
clusterDiscovery *database.ClusterDiscoveryService
|
||||
|
||||
// Peer discovery
|
||||
bootstrapCancel context.CancelFunc
|
||||
peerDiscoveryCancel context.CancelFunc
|
||||
|
||||
// PubSub
|
||||
pubsub *pubsub.ClientAdapter
|
||||
@ -49,6 +57,20 @@ type Node struct {
|
||||
|
||||
// IPFS Cluster config manager
|
||||
clusterConfigManager *ipfs.ClusterConfigManager
|
||||
|
||||
// Full gateway (for API, auth, pubsub, and internal service routing)
|
||||
apiGateway *gateway.Gateway
|
||||
apiGatewayServer *http.Server
|
||||
|
||||
// SNI gateway (for TCP routing of raft, ipfs, olric, etc.)
|
||||
sniGateway *gateway.TCPSNIGateway
|
||||
|
||||
// Shared certificate manager for HTTPS and SNI
|
||||
certManager *autocert.Manager
|
||||
|
||||
// Certificate ready signal - closed when TLS certificates are extracted and ready for use
|
||||
// Used to coordinate RQLite node-to-node TLS startup with certificate provisioning
|
||||
certReady chan struct{}
|
||||
}
|
||||
|
||||
// NewNode creates a new network node
|
||||
@ -69,24 +91,26 @@ func NewNode(cfg *config.Config) (*Node, error) {
|
||||
func (n *Node) startRQLite(ctx context.Context) error {
|
||||
n.logger.Info("Starting RQLite database")
|
||||
|
||||
// Determine node identifier for log filename - use node ID for unique filenames
|
||||
nodeID := n.config.Node.ID
|
||||
if nodeID == "" {
|
||||
// Default to "node" if ID is not set
|
||||
nodeID = "node"
|
||||
}
|
||||
|
||||
// Create RQLite manager
|
||||
n.rqliteManager = database.NewRQLiteManager(&n.config.Database, &n.config.Discovery, n.config.Node.DataDir, n.logger.Logger)
|
||||
n.rqliteManager.SetNodeType(nodeID)
|
||||
|
||||
// Initialize cluster discovery service if LibP2P host is available
|
||||
if n.host != nil && n.discoveryManager != nil {
|
||||
// Determine node type
|
||||
nodeType := "node"
|
||||
if n.config.Node.Type == "bootstrap" {
|
||||
nodeType = "bootstrap"
|
||||
}
|
||||
|
||||
// Create cluster discovery service
|
||||
// Create cluster discovery service (all nodes are unified)
|
||||
n.clusterDiscovery = database.NewClusterDiscoveryService(
|
||||
n.host,
|
||||
n.discoveryManager,
|
||||
n.rqliteManager,
|
||||
n.config.Node.ID,
|
||||
nodeType,
|
||||
"node", // Unified node type
|
||||
n.config.Discovery.RaftAdvAddress,
|
||||
n.config.Discovery.HttpAdvAddress,
|
||||
n.config.Node.DataDir,
|
||||
@ -109,6 +133,25 @@ func (n *Node) startRQLite(ctx context.Context) error {
|
||||
n.logger.Info("Cluster discovery service started (waiting for RQLite)")
|
||||
}
|
||||
|
||||
// If node-to-node TLS is configured, wait for certificates to be provisioned
|
||||
// This ensures RQLite can start with TLS when joining through the SNI gateway
|
||||
if n.config.Database.NodeCert != "" && n.config.Database.NodeKey != "" && n.certReady != nil {
|
||||
n.logger.Info("RQLite node TLS configured, waiting for certificates to be provisioned...",
|
||||
zap.String("node_cert", n.config.Database.NodeCert),
|
||||
zap.String("node_key", n.config.Database.NodeKey))
|
||||
|
||||
// Wait for certificate ready signal with timeout
|
||||
certTimeout := 5 * time.Minute
|
||||
select {
|
||||
case <-n.certReady:
|
||||
n.logger.Info("Certificates ready, proceeding with RQLite startup")
|
||||
case <-time.After(certTimeout):
|
||||
return fmt.Errorf("timeout waiting for TLS certificates after %v - ensure HTTPS is configured and ports 80/443 are accessible for ACME challenges", certTimeout)
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("context cancelled while waiting for certificates: %w", ctx.Err())
|
||||
}
|
||||
}
|
||||
|
||||
// Start RQLite FIRST before updating metadata
|
||||
if err := n.rqliteManager.Start(ctx); err != nil {
|
||||
return err
|
||||
@ -131,25 +174,70 @@ func (n *Node) startRQLite(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// bootstrapPeerSource returns a PeerSource that yields peers from BootstrapPeers.
|
||||
func bootstrapPeerSource(bootstrapAddrs []string, logger *zap.Logger) func(context.Context, int) <-chan peer.AddrInfo {
|
||||
// extractIPFromMultiaddr extracts the IP address from a peer multiaddr
|
||||
// Supports IP4, IP6, DNS4, DNS6, and DNSADDR protocols
|
||||
func extractIPFromMultiaddr(multiaddrStr string) string {
|
||||
ma, err := multiaddr.NewMultiaddr(multiaddrStr)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// First, try to extract direct IP address
|
||||
var ip string
|
||||
var dnsName string
|
||||
multiaddr.ForEach(ma, func(c multiaddr.Component) bool {
|
||||
switch c.Protocol().Code {
|
||||
case multiaddr.P_IP4, multiaddr.P_IP6:
|
||||
ip = c.Value()
|
||||
return false // Stop iteration - found IP
|
||||
case multiaddr.P_DNS4, multiaddr.P_DNS6, multiaddr.P_DNSADDR:
|
||||
dnsName = c.Value()
|
||||
// Continue to check for IP, but remember DNS name as fallback
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// If we found a direct IP, return it
|
||||
if ip != "" {
|
||||
return ip
|
||||
}
|
||||
|
||||
// If we found a DNS name, try to resolve it
|
||||
if dnsName != "" {
|
||||
if resolvedIPs, err := net.LookupIP(dnsName); err == nil && len(resolvedIPs) > 0 {
|
||||
// Prefer IPv4 addresses, but accept IPv6 if that's all we have
|
||||
for _, resolvedIP := range resolvedIPs {
|
||||
if resolvedIP.To4() != nil {
|
||||
return resolvedIP.String()
|
||||
}
|
||||
}
|
||||
// Return first IPv6 address if no IPv4 found
|
||||
return resolvedIPs[0].String()
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// peerSource returns a PeerSource that yields peers from configured peers.
|
||||
func peerSource(peerAddrs []string, logger *zap.Logger) func(context.Context, int) <-chan peer.AddrInfo {
|
||||
return func(ctx context.Context, num int) <-chan peer.AddrInfo {
|
||||
out := make(chan peer.AddrInfo, num)
|
||||
go func() {
|
||||
defer close(out)
|
||||
count := 0
|
||||
for _, s := range bootstrapAddrs {
|
||||
for _, s := range peerAddrs {
|
||||
if count >= num {
|
||||
return
|
||||
}
|
||||
ma, err := multiaddr.NewMultiaddr(s)
|
||||
if err != nil {
|
||||
logger.Debug("invalid bootstrap multiaddr", zap.String("addr", s), zap.Error(err))
|
||||
logger.Debug("invalid peer multiaddr", zap.String("addr", s), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
ai, err := peer.AddrInfoFromP2pAddr(ma)
|
||||
if err != nil {
|
||||
logger.Debug("failed to parse bootstrap peer", zap.String("addr", s), zap.Error(err))
|
||||
logger.Debug("failed to parse peer address", zap.String("addr", s), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
select {
|
||||
@ -164,8 +252,8 @@ func bootstrapPeerSource(bootstrapAddrs []string, logger *zap.Logger) func(conte
|
||||
}
|
||||
}
|
||||
|
||||
// hasBootstrapConnections checks if we're connected to any bootstrap peers
|
||||
func (n *Node) hasBootstrapConnections() bool {
|
||||
// hasPeerConnections checks if we're connected to any peers
|
||||
func (n *Node) hasPeerConnections() bool {
|
||||
if n.host == nil || len(n.config.Discovery.BootstrapPeers) == 0 {
|
||||
return false
|
||||
}
|
||||
@ -175,10 +263,10 @@ func (n *Node) hasBootstrapConnections() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Parse bootstrap peer IDs
|
||||
bootstrapPeerIDs := make(map[peer.ID]bool)
|
||||
for _, bootstrapAddr := range n.config.Discovery.BootstrapPeers {
|
||||
ma, err := multiaddr.NewMultiaddr(bootstrapAddr)
|
||||
// Parse peer IDs
|
||||
peerIDs := make(map[peer.ID]bool)
|
||||
for _, peerAddr := range n.config.Discovery.BootstrapPeers {
|
||||
ma, err := multiaddr.NewMultiaddr(peerAddr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@ -186,12 +274,12 @@ func (n *Node) hasBootstrapConnections() bool {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
bootstrapPeerIDs[peerInfo.ID] = true
|
||||
peerIDs[peerInfo.ID] = true
|
||||
}
|
||||
|
||||
// Check if any connected peer is a bootstrap peer
|
||||
// Check if any connected peer is in our peer list
|
||||
for _, peerID := range connectedPeers {
|
||||
if bootstrapPeerIDs[peerID] {
|
||||
if peerIDs[peerID] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -226,8 +314,8 @@ func addJitter(interval time.Duration) time.Duration {
|
||||
return result
|
||||
}
|
||||
|
||||
// connectToBootstrapPeer connects to a single bootstrap peer
|
||||
func (n *Node) connectToBootstrapPeer(ctx context.Context, addr string) error {
|
||||
// connectToPeerAddr connects to a single peer address
|
||||
func (n *Node) connectToPeerAddr(ctx context.Context, addr string) error {
|
||||
ma, err := multiaddr.NewMultiaddr(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid multiaddr: %w", err)
|
||||
@ -239,16 +327,16 @@ func (n *Node) connectToBootstrapPeer(ctx context.Context, addr string) error {
|
||||
return fmt.Errorf("failed to extract peer info: %w", err)
|
||||
}
|
||||
|
||||
// Avoid dialing ourselves: if the bootstrap address resolves to our own peer ID, skip.
|
||||
// Avoid dialing ourselves: if the address resolves to our own peer ID, skip.
|
||||
if n.host != nil && peerInfo.ID == n.host.ID() {
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Skipping bootstrap address because it resolves to self",
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Skipping peer address because it resolves to self",
|
||||
zap.String("addr", addr),
|
||||
zap.String("peer_id", peerInfo.ID.String()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log resolved peer info prior to connect
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Resolved bootstrap peer",
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Resolved peer",
|
||||
zap.String("peer_id", peerInfo.ID.String()),
|
||||
zap.String("addr", addr),
|
||||
zap.Int("addr_count", len(peerInfo.Addrs)),
|
||||
@ -259,28 +347,28 @@ func (n *Node) connectToBootstrapPeer(ctx context.Context, addr string) error {
|
||||
return fmt.Errorf("failed to connect to peer: %w", err)
|
||||
}
|
||||
|
||||
n.logger.Info("Connected to bootstrap peer",
|
||||
n.logger.Info("Connected to peer",
|
||||
zap.String("peer", peerInfo.ID.String()),
|
||||
zap.String("addr", addr))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// connectToBootstrapPeers connects to configured LibP2P bootstrap peers
|
||||
func (n *Node) connectToBootstrapPeers(ctx context.Context) error {
|
||||
// connectToPeers connects to configured LibP2P peers
|
||||
func (n *Node) connectToPeers(ctx context.Context) error {
|
||||
if len(n.config.Discovery.BootstrapPeers) == 0 {
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "No bootstrap peers configured")
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "No peers configured")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use passed context with a reasonable timeout for bootstrap connections
|
||||
// Use passed context with a reasonable timeout for peer connections
|
||||
connectCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for _, bootstrapAddr := range n.config.Discovery.BootstrapPeers {
|
||||
if err := n.connectToBootstrapPeer(connectCtx, bootstrapAddr); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to bootstrap peer",
|
||||
zap.String("addr", bootstrapAddr),
|
||||
for _, peerAddr := range n.config.Discovery.BootstrapPeers {
|
||||
if err := n.connectToPeerAddr(connectCtx, peerAddr); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to peer",
|
||||
zap.String("addr", peerAddr),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
@ -326,7 +414,7 @@ func (n *Node) startLibP2P() error {
|
||||
// For production, these would be enabled
|
||||
isLocalhost := len(n.config.Node.ListenAddresses) > 0 &&
|
||||
(strings.Contains(n.config.Node.ListenAddresses[0], "localhost") ||
|
||||
strings.Contains(n.config.Node.ListenAddresses[0], "localhost"))
|
||||
strings.Contains(n.config.Node.ListenAddresses[0], "127.0.0.1"))
|
||||
|
||||
if isLocalhost {
|
||||
n.logger.ComponentInfo(logging.ComponentLibP2P, "Localhost detected - disabling NAT services for local development")
|
||||
@ -339,7 +427,7 @@ func (n *Node) startLibP2P() error {
|
||||
libp2p.EnableRelay(),
|
||||
libp2p.NATPortMap(),
|
||||
libp2p.EnableAutoRelayWithPeerSource(
|
||||
bootstrapPeerSource(n.config.Discovery.BootstrapPeers, n.logger.Logger),
|
||||
peerSource(n.config.Discovery.BootstrapPeers, n.logger.Logger),
|
||||
),
|
||||
)
|
||||
}
|
||||
@ -365,59 +453,59 @@ func (n *Node) startLibP2P() error {
|
||||
n.pubsub = pubsub.NewClientAdapter(ps, n.config.Discovery.NodeNamespace)
|
||||
n.logger.Info("Initialized pubsub adapter on namespace", zap.String("namespace", n.config.Discovery.NodeNamespace))
|
||||
|
||||
// Log configured bootstrap peers
|
||||
// Log configured peers
|
||||
if len(n.config.Discovery.BootstrapPeers) > 0 {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Configured bootstrap peers",
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Configured peers",
|
||||
zap.Strings("peers", n.config.Discovery.BootstrapPeers))
|
||||
} else {
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "No bootstrap peers configured")
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "No peers configured")
|
||||
}
|
||||
|
||||
// Connect to LibP2P bootstrap peers if configured
|
||||
if err := n.connectToBootstrapPeers(context.Background()); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to bootstrap peers", zap.Error(err))
|
||||
// Don't fail - continue without bootstrap connections
|
||||
// Connect to LibP2P peers if configured
|
||||
if err := n.connectToPeers(context.Background()); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to connect to peers", zap.Error(err))
|
||||
// Don't fail - continue without peer connections
|
||||
}
|
||||
|
||||
// Start exponential backoff reconnection for bootstrap peers
|
||||
// Start exponential backoff reconnection for peers
|
||||
if len(n.config.Discovery.BootstrapPeers) > 0 {
|
||||
bootstrapCtx, cancel := context.WithCancel(context.Background())
|
||||
n.bootstrapCancel = cancel
|
||||
peerCtx, cancel := context.WithCancel(context.Background())
|
||||
n.peerDiscoveryCancel = cancel
|
||||
|
||||
go func() {
|
||||
interval := 5 * time.Second
|
||||
consecutiveFailures := 0
|
||||
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Starting bootstrap peer reconnection with exponential backoff",
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Starting peer reconnection with exponential backoff",
|
||||
zap.Duration("initial_interval", interval),
|
||||
zap.Duration("max_interval", 10*time.Minute))
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-bootstrapCtx.Done():
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Bootstrap reconnection loop stopped")
|
||||
case <-peerCtx.Done():
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Peer reconnection loop stopped")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Check if we need to attempt connection
|
||||
if !n.hasBootstrapConnections() {
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Attempting bootstrap peer connection",
|
||||
if !n.hasPeerConnections() {
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Attempting peer connection",
|
||||
zap.Duration("current_interval", interval),
|
||||
zap.Int("consecutive_failures", consecutiveFailures))
|
||||
|
||||
if err := n.connectToBootstrapPeers(context.Background()); err != nil {
|
||||
if err := n.connectToPeers(context.Background()); err != nil {
|
||||
consecutiveFailures++
|
||||
// Calculate next backoff interval
|
||||
jitteredInterval := addJitter(interval)
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Bootstrap connection failed, backing off",
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Peer connection failed, backing off",
|
||||
zap.Error(err),
|
||||
zap.Duration("next_attempt_in", jitteredInterval),
|
||||
zap.Int("consecutive_failures", consecutiveFailures))
|
||||
|
||||
// Sleep with jitter
|
||||
select {
|
||||
case <-bootstrapCtx.Done():
|
||||
case <-peerCtx.Done():
|
||||
return
|
||||
case <-time.After(jitteredInterval):
|
||||
}
|
||||
@ -427,14 +515,14 @@ func (n *Node) startLibP2P() error {
|
||||
|
||||
// Log interval increases occasionally to show progress
|
||||
if consecutiveFailures%5 == 0 {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Bootstrap connection still failing",
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Peer connection still failing",
|
||||
zap.Int("consecutive_failures", consecutiveFailures),
|
||||
zap.Duration("current_interval", interval))
|
||||
}
|
||||
} else {
|
||||
// Success! Reset interval and counters
|
||||
if consecutiveFailures > 0 {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Successfully connected to bootstrap peers",
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Successfully connected to peers",
|
||||
zap.Int("failures_overcome", consecutiveFailures))
|
||||
}
|
||||
interval = 5 * time.Second
|
||||
@ -442,15 +530,15 @@ func (n *Node) startLibP2P() error {
|
||||
|
||||
// Wait 30 seconds before checking connection again
|
||||
select {
|
||||
case <-bootstrapCtx.Done():
|
||||
case <-peerCtx.Done():
|
||||
return
|
||||
case <-time.After(30 * time.Second):
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We have bootstrap connections, just wait and check periodically
|
||||
// We have peer connections, just wait and check periodically
|
||||
select {
|
||||
case <-bootstrapCtx.Done():
|
||||
case <-peerCtx.Done():
|
||||
return
|
||||
case <-time.After(30 * time.Second):
|
||||
}
|
||||
@ -459,15 +547,15 @@ func (n *Node) startLibP2P() error {
|
||||
}()
|
||||
}
|
||||
|
||||
// Add bootstrap peers to peerstore for peer exchange
|
||||
// Add peers to peerstore for peer exchange
|
||||
if len(n.config.Discovery.BootstrapPeers) > 0 {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Adding bootstrap peers to peerstore")
|
||||
for _, bootstrapAddr := range n.config.Discovery.BootstrapPeers {
|
||||
if ma, err := multiaddr.NewMultiaddr(bootstrapAddr); err == nil {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Adding peers to peerstore")
|
||||
for _, peerAddr := range n.config.Discovery.BootstrapPeers {
|
||||
if ma, err := multiaddr.NewMultiaddr(peerAddr); err == nil {
|
||||
if peerInfo, err := peer.AddrInfoFromP2pAddr(ma); err == nil {
|
||||
// Add to peerstore with longer TTL for peer exchange
|
||||
n.host.Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, time.Hour*24)
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Added bootstrap peer to peerstore",
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Added peer to peerstore",
|
||||
zap.String("peer", peerInfo.ID.String()))
|
||||
}
|
||||
}
|
||||
@ -580,14 +668,33 @@ func (n *Node) stopPeerDiscovery() {
|
||||
func (n *Node) Stop() error {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Stopping network node")
|
||||
|
||||
// Stop HTTP Gateway server
|
||||
if n.apiGatewayServer != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
_ = n.apiGatewayServer.Shutdown(ctx)
|
||||
}
|
||||
|
||||
// Close Gateway client
|
||||
if n.apiGateway != nil {
|
||||
n.apiGateway.Close()
|
||||
}
|
||||
|
||||
// Stop SNI Gateway
|
||||
if n.sniGateway != nil {
|
||||
if err := n.sniGateway.Stop(); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "SNI Gateway stop error", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Stop cluster discovery
|
||||
if n.clusterDiscovery != nil {
|
||||
n.clusterDiscovery.Stop()
|
||||
}
|
||||
|
||||
// Stop bootstrap reconnection loop
|
||||
if n.bootstrapCancel != nil {
|
||||
n.bootstrapCancel()
|
||||
// Stop peer reconnection loop
|
||||
if n.peerDiscoveryCancel != nil {
|
||||
n.peerDiscoveryCancel()
|
||||
}
|
||||
|
||||
// Stop peer discovery
|
||||
@ -610,6 +717,457 @@ func (n *Node) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadNodePeerIDFromIdentity safely loads the node's peer ID from its identity file
|
||||
// This is needed before the host is initialized, so we read directly from the file
|
||||
func loadNodePeerIDFromIdentity(dataDir string) string {
|
||||
identityFile := filepath.Join(os.ExpandEnv(dataDir), "identity.key")
|
||||
|
||||
// Expand ~ in path
|
||||
if strings.HasPrefix(identityFile, "~") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
identityFile = filepath.Join(home, identityFile[1:])
|
||||
}
|
||||
|
||||
// Load identity from file
|
||||
if info, err := encryption.LoadIdentity(identityFile); err == nil {
|
||||
return info.PeerID.String()
|
||||
}
|
||||
|
||||
return "" // Return empty string if can't load (gateway will work without it)
|
||||
}
|
||||
|
||||
// startHTTPGateway initializes and starts the full API gateway with auth, pubsub, and API endpoints
|
||||
func (n *Node) startHTTPGateway(ctx context.Context) error {
|
||||
if !n.config.HTTPGateway.Enabled {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "HTTP Gateway disabled in config")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create separate logger for gateway
|
||||
logFile := filepath.Join(os.ExpandEnv(n.config.Node.DataDir), "..", "logs", "gateway.log")
|
||||
|
||||
// Ensure logs directory exists
|
||||
logsDir := filepath.Dir(logFile)
|
||||
if err := os.MkdirAll(logsDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %w", err)
|
||||
}
|
||||
|
||||
gatewayLogger, err := logging.NewFileLogger(logging.ComponentGeneral, logFile, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create gateway logger: %w", err)
|
||||
}
|
||||
|
||||
// Create full API Gateway for auth, pubsub, rqlite, and API endpoints
|
||||
// This replaces both the old reverse proxy gateway and the standalone gateway
|
||||
gwCfg := &gateway.Config{
|
||||
ListenAddr: n.config.HTTPGateway.ListenAddr,
|
||||
ClientNamespace: n.config.HTTPGateway.ClientNamespace,
|
||||
BootstrapPeers: n.config.Discovery.BootstrapPeers,
|
||||
NodePeerID: loadNodePeerIDFromIdentity(n.config.Node.DataDir), // Load the node's actual peer ID from its identity file
|
||||
RQLiteDSN: n.config.HTTPGateway.RQLiteDSN,
|
||||
OlricServers: n.config.HTTPGateway.OlricServers,
|
||||
OlricTimeout: n.config.HTTPGateway.OlricTimeout,
|
||||
IPFSClusterAPIURL: n.config.HTTPGateway.IPFSClusterAPIURL,
|
||||
IPFSAPIURL: n.config.HTTPGateway.IPFSAPIURL,
|
||||
IPFSTimeout: n.config.HTTPGateway.IPFSTimeout,
|
||||
// HTTPS/TLS configuration
|
||||
EnableHTTPS: n.config.HTTPGateway.HTTPS.Enabled,
|
||||
DomainName: n.config.HTTPGateway.HTTPS.Domain,
|
||||
TLSCacheDir: n.config.HTTPGateway.HTTPS.CacheDir,
|
||||
}
|
||||
|
||||
apiGateway, err := gateway.New(gatewayLogger, gwCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create full API gateway: %w", err)
|
||||
}
|
||||
|
||||
n.apiGateway = apiGateway
|
||||
|
||||
// Check if HTTPS is enabled and set up certManager BEFORE starting goroutine
|
||||
// This ensures n.certManager is set before SNI gateway initialization checks it
|
||||
var certManager *autocert.Manager
|
||||
var tlsCacheDir string
|
||||
if gwCfg.EnableHTTPS && gwCfg.DomainName != "" {
|
||||
tlsCacheDir = gwCfg.TLSCacheDir
|
||||
if tlsCacheDir == "" {
|
||||
tlsCacheDir = "/home/debros/.orama/tls-cache"
|
||||
}
|
||||
|
||||
// Ensure TLS cache directory exists and is writable
|
||||
if err := os.MkdirAll(tlsCacheDir, 0700); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to create TLS cache directory",
|
||||
zap.String("dir", tlsCacheDir),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "TLS cache directory ready",
|
||||
zap.String("dir", tlsCacheDir),
|
||||
)
|
||||
}
|
||||
|
||||
// Create TLS configuration with Let's Encrypt autocert
|
||||
// Using STAGING environment to avoid rate limits during development/testing
|
||||
// TODO: Switch to production when ready (remove Client field)
|
||||
certManager = &autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: autocert.HostWhitelist(gwCfg.DomainName),
|
||||
Cache: autocert.DirCache(tlsCacheDir),
|
||||
Email: fmt.Sprintf("admin@%s", gwCfg.DomainName),
|
||||
Client: &acme.Client{
|
||||
DirectoryURL: "https://acme-staging-v02.api.letsencrypt.org/directory",
|
||||
},
|
||||
}
|
||||
|
||||
// Store certificate manager for use by SNI gateway
|
||||
n.certManager = certManager
|
||||
|
||||
// Initialize certificate ready channel - will be closed when certs are extracted
|
||||
// This allows RQLite to wait for certificates before starting with node TLS
|
||||
n.certReady = make(chan struct{})
|
||||
}
|
||||
|
||||
// Channel to signal when HTTP server is ready for ACME challenges
|
||||
httpReady := make(chan struct{})
|
||||
|
||||
// Start API Gateway in a goroutine
|
||||
go func() {
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Starting full API gateway",
|
||||
zap.String("listen_addr", gwCfg.ListenAddr),
|
||||
)
|
||||
|
||||
// Check if HTTPS is enabled
|
||||
if gwCfg.EnableHTTPS && gwCfg.DomainName != "" && certManager != nil {
|
||||
// Start HTTPS server with automatic certificate provisioning
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTPS enabled, starting secure gateway",
|
||||
zap.String("domain", gwCfg.DomainName),
|
||||
)
|
||||
|
||||
// Determine HTTPS and HTTP ports
|
||||
httpsPort := 443
|
||||
httpPort := 80
|
||||
|
||||
// Start HTTP server for ACME challenges and redirects
|
||||
// certManager.HTTPHandler() must be the main handler, with a fallback for other requests
|
||||
httpServer := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", httpPort),
|
||||
Handler: certManager.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Fallback for non-ACME requests: redirect to HTTPS
|
||||
target := fmt.Sprintf("https://%s%s", r.Host, r.URL.RequestURI())
|
||||
http.Redirect(w, r, target, http.StatusMovedPermanently)
|
||||
})),
|
||||
}
|
||||
|
||||
// Create HTTP listener first to ensure port 80 is bound before signaling ready
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Binding HTTP listener for ACME challenges",
|
||||
zap.Int("port", httpPort),
|
||||
)
|
||||
httpListener, err := net.Listen("tcp", fmt.Sprintf(":%d", httpPort))
|
||||
if err != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "failed to bind HTTP listener for ACME", zap.Error(err))
|
||||
close(httpReady) // Signal even on failure so SNI goroutine doesn't hang
|
||||
return
|
||||
}
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTP server ready for ACME challenges",
|
||||
zap.Int("port", httpPort),
|
||||
zap.String("tls_cache_dir", tlsCacheDir),
|
||||
)
|
||||
|
||||
// Start HTTP server in background for ACME challenges
|
||||
go func() {
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTP server serving ACME challenges",
|
||||
zap.String("addr", httpServer.Addr),
|
||||
)
|
||||
if err := httpServer.Serve(httpListener); err != nil && err != http.ErrServerClosed {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "HTTP server error", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// Pre-provision the certificate BEFORE starting HTTPS server
|
||||
// This ensures we don't accept HTTPS connections without a valid certificate
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Pre-provisioning TLS certificate...",
|
||||
zap.String("domain", gwCfg.DomainName),
|
||||
)
|
||||
|
||||
// Use a timeout context for certificate provisioning
|
||||
// If Let's Encrypt is rate-limited or unreachable, don't block forever
|
||||
certCtx, certCancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer certCancel()
|
||||
|
||||
certReq := &tls.ClientHelloInfo{
|
||||
ServerName: gwCfg.DomainName,
|
||||
}
|
||||
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Initiating certificate request to Let's Encrypt",
|
||||
zap.String("domain", gwCfg.DomainName),
|
||||
zap.String("acme_environment", "staging"),
|
||||
)
|
||||
|
||||
// Try to get certificate with timeout
|
||||
certProvisionChan := make(chan error, 1)
|
||||
go func() {
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "GetCertificate goroutine started")
|
||||
_, err := certManager.GetCertificate(certReq)
|
||||
if err != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "GetCertificate returned error",
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "GetCertificate succeeded")
|
||||
}
|
||||
certProvisionChan <- err
|
||||
}()
|
||||
|
||||
var certErr error
|
||||
select {
|
||||
case err := <-certProvisionChan:
|
||||
certErr = err
|
||||
if certErr != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Certificate provisioning failed",
|
||||
zap.String("domain", gwCfg.DomainName),
|
||||
zap.Error(certErr),
|
||||
)
|
||||
}
|
||||
case <-certCtx.Done():
|
||||
certErr = fmt.Errorf("certificate provisioning timeout (Let's Encrypt may be rate-limited or unreachable)")
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Certificate provisioning timeout",
|
||||
zap.String("domain", gwCfg.DomainName),
|
||||
zap.Duration("timeout", 30*time.Second),
|
||||
zap.Error(certErr),
|
||||
)
|
||||
}
|
||||
|
||||
if certErr != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to provision TLS certificate - HTTPS disabled",
|
||||
zap.String("domain", gwCfg.DomainName),
|
||||
zap.Error(certErr),
|
||||
zap.String("http_server_status", "running on port 80 for HTTP fallback"),
|
||||
)
|
||||
// Signal ready for SNI goroutine (even though we're failing)
|
||||
close(httpReady)
|
||||
|
||||
// HTTP server on port 80 is already running, but it's configured to redirect to HTTPS
|
||||
// Replace its handler to serve the gateway directly instead of redirecting
|
||||
httpServer.Handler = apiGateway.Routes()
|
||||
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTP gateway available on port 80 only",
|
||||
zap.String("port", "80"),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "TLS certificate provisioned successfully",
|
||||
zap.String("domain", gwCfg.DomainName),
|
||||
)
|
||||
|
||||
// Signal that HTTP server is ready for ACME challenges
|
||||
close(httpReady)
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
GetCertificate: certManager.GetCertificate,
|
||||
}
|
||||
|
||||
// Start HTTPS server
|
||||
httpsServer := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", httpsPort),
|
||||
TLSConfig: tlsConfig,
|
||||
Handler: apiGateway.Routes(),
|
||||
}
|
||||
|
||||
n.apiGatewayServer = httpsServer
|
||||
|
||||
listener, err := tls.Listen("tcp", fmt.Sprintf(":%d", httpsPort), tlsConfig)
|
||||
if err != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "failed to create TLS listener", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "HTTPS gateway listener bound",
|
||||
zap.String("domain", gwCfg.DomainName),
|
||||
zap.Int("port", httpsPort),
|
||||
)
|
||||
|
||||
// Serve HTTPS
|
||||
if err := httpsServer.Serve(listener); err != nil && err != http.ErrServerClosed {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "HTTPS Gateway error", zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
// No HTTPS - signal ready immediately (no ACME needed)
|
||||
close(httpReady)
|
||||
|
||||
// Start plain HTTP server
|
||||
server := &http.Server{
|
||||
Addr: gwCfg.ListenAddr,
|
||||
Handler: apiGateway.Routes(),
|
||||
}
|
||||
|
||||
n.apiGatewayServer = server
|
||||
|
||||
// Try to bind listener
|
||||
ln, err := net.Listen("tcp", gwCfg.ListenAddr)
|
||||
if err != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "failed to bind API gateway listener", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "API gateway listener bound", zap.String("listen_addr", ln.Addr().String()))
|
||||
|
||||
// Serve HTTP
|
||||
if err := server.Serve(ln); err != nil && err != http.ErrServerClosed {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "API Gateway error", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Initialize and start SNI gateway if HTTPS is enabled and SNI is configured
|
||||
// This runs in a separate goroutine that waits for HTTP server to be ready
|
||||
if n.config.HTTPGateway.SNI.Enabled && n.certManager != nil {
|
||||
go func() {
|
||||
// Wait for HTTP server to be ready for ACME challenges
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Waiting for HTTP server before SNI initialization...")
|
||||
<-httpReady
|
||||
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Initializing SNI gateway",
|
||||
zap.String("listen_addr", n.config.HTTPGateway.SNI.ListenAddr),
|
||||
)
|
||||
|
||||
// Provision the certificate from Let's Encrypt cache
|
||||
// This ensures the certificate file is downloaded and cached
|
||||
domain := n.config.HTTPGateway.HTTPS.Domain
|
||||
if domain != "" {
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Provisioning certificate for SNI",
|
||||
zap.String("domain", domain))
|
||||
|
||||
certReq := &tls.ClientHelloInfo{
|
||||
ServerName: domain,
|
||||
}
|
||||
if tlsCert, err := n.certManager.GetCertificate(certReq); err != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to provision certificate for SNI",
|
||||
zap.String("domain", domain), zap.Error(err))
|
||||
return // Can't start SNI without certificate
|
||||
} else {
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Certificate provisioned for SNI",
|
||||
zap.String("domain", domain))
|
||||
|
||||
// Extract certificate to PEM files for SNI gateway
|
||||
// SNI gateway needs standard PEM cert files, not autocert cache format
|
||||
tlsCacheDir := n.config.HTTPGateway.HTTPS.CacheDir
|
||||
if tlsCacheDir == "" {
|
||||
tlsCacheDir = "/home/debros/.orama/tls-cache"
|
||||
}
|
||||
|
||||
certPath := filepath.Join(tlsCacheDir, domain+".crt")
|
||||
keyPath := filepath.Join(tlsCacheDir, domain+".key")
|
||||
|
||||
if err := extractPEMFromTLSCert(tlsCert, certPath, keyPath); err != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to extract PEM from TLS cert for SNI",
|
||||
zap.Error(err))
|
||||
return // Can't start SNI without PEM files
|
||||
}
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "PEM certificates extracted for SNI",
|
||||
zap.String("cert_path", certPath), zap.String("key_path", keyPath))
|
||||
|
||||
// Signal that certificates are ready for RQLite node-to-node TLS
|
||||
if n.certReady != nil {
|
||||
close(n.certReady)
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "Certificate ready signal sent for RQLite node TLS")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "No domain configured for SNI certificate")
|
||||
return
|
||||
}
|
||||
|
||||
// Create SNI config with certificate files
|
||||
sniCfg := n.config.HTTPGateway.SNI
|
||||
|
||||
// Use the same gateway logger for SNI gateway (writes to gateway.log)
|
||||
sniGateway, err := gateway.NewTCPSNIGateway(gatewayLogger, &sniCfg)
|
||||
if err != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "Failed to initialize SNI gateway", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
n.sniGateway = sniGateway
|
||||
gatewayLogger.ComponentInfo(logging.ComponentGateway, "SNI gateway initialized, starting...")
|
||||
|
||||
// Start SNI gateway (this blocks until shutdown)
|
||||
if err := n.sniGateway.Start(ctx); err != nil {
|
||||
gatewayLogger.ComponentError(logging.ComponentGateway, "SNI Gateway error", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractPEMFromTLSCert extracts certificate and private key from tls.Certificate to PEM files
|
||||
func extractPEMFromTLSCert(tlsCert *tls.Certificate, certPath, keyPath string) error {
|
||||
if tlsCert == nil || len(tlsCert.Certificate) == 0 {
|
||||
return fmt.Errorf("invalid tls certificate")
|
||||
}
|
||||
|
||||
// Write certificate chain to PEM file
|
||||
certFile, err := os.Create(certPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cert file: %w", err)
|
||||
}
|
||||
defer certFile.Close()
|
||||
|
||||
// Write all certificates in the chain
|
||||
for _, certBytes := range tlsCert.Certificate {
|
||||
if err := pem.Encode(certFile, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: certBytes,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to encode certificate: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write private key to PEM file
|
||||
if tlsCert.PrivateKey == nil {
|
||||
return fmt.Errorf("private key is nil")
|
||||
}
|
||||
|
||||
keyFile, err := os.Create(keyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create key file: %w", err)
|
||||
}
|
||||
defer keyFile.Close()
|
||||
|
||||
// Handle different key types
|
||||
var keyBytes []byte
|
||||
switch key := tlsCert.PrivateKey.(type) {
|
||||
case *x509.Certificate:
|
||||
keyBytes, err = x509.MarshalPKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal private key: %w", err)
|
||||
}
|
||||
default:
|
||||
// Try to marshal as PKCS8
|
||||
keyBytes, err = x509.MarshalPKCS8PrivateKey(tlsCert.PrivateKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal private key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := pem.Encode(keyFile, &pem.Block{
|
||||
Type: "PRIVATE KEY",
|
||||
Bytes: keyBytes,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to encode private key: %w", err)
|
||||
}
|
||||
|
||||
// Set proper permissions
|
||||
os.Chmod(certPath, 0644)
|
||||
os.Chmod(keyPath, 0600)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Starts the network node
|
||||
func (n *Node) Start(ctx context.Context) error {
|
||||
n.logger.Info("Starting network node", zap.String("data_dir", n.config.Node.DataDir))
|
||||
@ -630,6 +1188,12 @@ func (n *Node) Start(ctx context.Context) error {
|
||||
return fmt.Errorf("failed to create data directory: %w", err)
|
||||
}
|
||||
|
||||
// Start HTTP Gateway first (doesn't depend on other services)
|
||||
if err := n.startHTTPGateway(ctx); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to start HTTP Gateway", zap.Error(err))
|
||||
// Don't fail node startup if gateway fails
|
||||
}
|
||||
|
||||
// Start LibP2P host first (needed for cluster discovery)
|
||||
if err := n.startLibP2P(); err != nil {
|
||||
return fmt.Errorf("failed to start LibP2P: %w", err)
|
||||
@ -686,16 +1250,14 @@ func (n *Node) startIPFSClusterConfig() error {
|
||||
return fmt.Errorf("failed to ensure cluster config: %w", err)
|
||||
}
|
||||
|
||||
// If this is not the bootstrap node, try to update bootstrap peer info
|
||||
if n.config.Node.Type != "bootstrap" && len(n.config.Discovery.BootstrapPeers) > 0 {
|
||||
// Try to find bootstrap cluster API URL from config
|
||||
// For now, we'll discover it from the first bootstrap peer
|
||||
// In a real scenario, you might want to configure this explicitly
|
||||
bootstrapClusterAPI := "http://localhost:9094" // Default bootstrap cluster API
|
||||
if err := cm.UpdateBootstrapPeers(bootstrapClusterAPI); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to update bootstrap peers, will retry later", zap.Error(err))
|
||||
// Don't fail - peers can connect later via mDNS or manual config
|
||||
}
|
||||
// Try to repair peer configuration automatically
|
||||
// This will be retried periodically if peer is not available yet
|
||||
if success, err := cm.RepairPeerConfiguration(); err != nil {
|
||||
n.logger.ComponentWarn(logging.ComponentNode, "Failed to repair peer configuration, will retry later", zap.Error(err))
|
||||
} else if success {
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "Peer configuration repaired successfully")
|
||||
} else {
|
||||
n.logger.ComponentDebug(logging.ComponentNode, "Peer not available yet, will retry periodically")
|
||||
}
|
||||
|
||||
n.logger.ComponentInfo(logging.ComponentNode, "IPFS Cluster configuration initialized")
|
||||
|
||||
@ -140,7 +140,7 @@ func TestLoadOrCreateIdentity(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestHashBootstrapConnections(t *testing.T) {
|
||||
func TestHasPeerConnections(t *testing.T) {
|
||||
cfg := &config.Config{}
|
||||
|
||||
n, err := NewNode(cfg)
|
||||
@ -148,8 +148,8 @@ func TestHashBootstrapConnections(t *testing.T) {
|
||||
t.Fatalf("NewNode() error: %v", err)
|
||||
}
|
||||
|
||||
// Assert: Does not have bootstrap connections
|
||||
conns := n.hasBootstrapConnections()
|
||||
// Assert: Does not have peer connections
|
||||
conns := n.hasPeerConnections()
|
||||
if conns != false {
|
||||
t.Fatalf("expected false, got %v", conns)
|
||||
}
|
||||
@ -162,13 +162,13 @@ func TestHashBootstrapConnections(t *testing.T) {
|
||||
defer h.Close()
|
||||
|
||||
n.host = h
|
||||
conns = n.hasBootstrapConnections()
|
||||
conns = n.hasPeerConnections()
|
||||
if conns != false {
|
||||
t.Fatalf("expected false, got %v", conns)
|
||||
}
|
||||
|
||||
// Assert: Return true if connected to at least one bootstrap peer
|
||||
t.Run("returns true when connected to at least one configured bootstrap peer", func(t *testing.T) {
|
||||
// Assert: Return true if connected to at least one peer
|
||||
t.Run("returns true when connected to at least one configured peer", func(t *testing.T) {
|
||||
// Fresh node and config
|
||||
cfg := &config.Config{}
|
||||
n2, err := NewNode(cfg)
|
||||
@ -189,7 +189,7 @@ func TestHashBootstrapConnections(t *testing.T) {
|
||||
}
|
||||
defer hB.Close()
|
||||
|
||||
// Build B's bootstrap multiaddr: <one-of-B.Addrs>/p2p/<B.ID>
|
||||
// Build B's peer multiaddr: <one-of-B.Addrs>/p2p/<B.ID>
|
||||
var base multiaddr.Multiaddr
|
||||
for _, a := range hB.Addrs() {
|
||||
if strings.Contains(a.String(), "/tcp/") {
|
||||
@ -204,11 +204,11 @@ func TestHashBootstrapConnections(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("NewMultiaddr(/p2p/<id>): %v", err)
|
||||
}
|
||||
bootstrap := base.Encapsulate(pidMA).String()
|
||||
peerAddr := base.Encapsulate(pidMA).String()
|
||||
|
||||
// Configure node A with B as a bootstrap peer
|
||||
// Configure node A with B as a peer
|
||||
n2.host = hA
|
||||
n2.config.Discovery.BootstrapPeers = []string{bootstrap}
|
||||
n2.config.Discovery.BootstrapPeers = []string{peerAddr}
|
||||
|
||||
// Connect A -> B
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
@ -229,13 +229,13 @@ func TestHashBootstrapConnections(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Assert: hasBootstrapConnections returns true
|
||||
if !n2.hasBootstrapConnections() {
|
||||
t.Fatalf("expected hasBootstrapConnections() to be true")
|
||||
// Assert: hasPeerConnections returns true
|
||||
if !n2.hasPeerConnections() {
|
||||
t.Fatalf("expected hasPeerConnections() to be true")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("returns false when connected peers are not in the bootstrap list", func(t *testing.T) {
|
||||
t.Run("returns false when connected peers are not in the peer list", func(t *testing.T) {
|
||||
// Fresh node and config
|
||||
cfg := &config.Config{}
|
||||
n2, err := NewNode(cfg)
|
||||
@ -262,7 +262,7 @@ func TestHashBootstrapConnections(t *testing.T) {
|
||||
}
|
||||
defer hC.Close()
|
||||
|
||||
// Build C's bootstrap multiaddr: <one-of-C.Addrs>/p2p/<C.ID>
|
||||
// Build C's peer multiaddr: <one-of-C.Addrs>/p2p/<C.ID>
|
||||
var baseC multiaddr.Multiaddr
|
||||
for _, a := range hC.Addrs() {
|
||||
if strings.Contains(a.String(), "/tcp/") {
|
||||
@ -277,13 +277,13 @@ func TestHashBootstrapConnections(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("NewMultiaddr(/p2p/<id>): %v", err)
|
||||
}
|
||||
bootstrapC := baseC.Encapsulate(pidC).String()
|
||||
peerC := baseC.Encapsulate(pidC).String()
|
||||
|
||||
// Configure node A with ONLY C as a bootstrap peer
|
||||
// Configure node A with ONLY C as a peer
|
||||
n2.host = hA
|
||||
n2.config.Discovery.BootstrapPeers = []string{bootstrapC}
|
||||
n2.config.Discovery.BootstrapPeers = []string{peerC}
|
||||
|
||||
// Connect A -> B (but C is in the bootstrap list, not B)
|
||||
// Connect A -> B (but C is in the peer list, not B)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
if err := hA.Connect(ctx, peer.AddrInfo{ID: hB.ID(), Addrs: hB.Addrs()}); err != nil {
|
||||
@ -302,9 +302,9 @@ func TestHashBootstrapConnections(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Assert: hasBootstrapConnections should be false (connected peer is not in bootstrap list)
|
||||
if n2.hasBootstrapConnections() {
|
||||
t.Fatalf("expected hasBootstrapConnections() to be false")
|
||||
// Assert: hasPeerConnections should be false (connected peer is not in peer list)
|
||||
if n2.hasPeerConnections() {
|
||||
t.Fatalf("expected hasPeerConnections() to be false")
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ package pubsub
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Publish publishes a message to a topic
|
||||
@ -27,6 +28,29 @@ func (m *Manager) Publish(ctx context.Context, topic string, data []byte) error
|
||||
return fmt.Errorf("failed to get topic for publishing: %w", err)
|
||||
}
|
||||
|
||||
// Wait briefly for mesh formation if no peers are in the mesh yet
|
||||
// GossipSub needs time to discover peers and form a mesh
|
||||
// With FloodPublish enabled, messages will be flooded to all connected peers
|
||||
// but we still want to give the mesh a chance to form for better delivery
|
||||
waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer waitCancel()
|
||||
|
||||
// Check if we have peers in the mesh, wait up to 2 seconds for mesh formation
|
||||
meshFormed := false
|
||||
for i := 0; i < 20 && !meshFormed; i++ {
|
||||
peers := libp2pTopic.ListPeers()
|
||||
if len(peers) > 0 {
|
||||
meshFormed = true
|
||||
break // Mesh has formed, proceed with publish
|
||||
}
|
||||
select {
|
||||
case <-waitCtx.Done():
|
||||
meshFormed = true // Timeout, proceed anyway (FloodPublish will handle it)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
// Continue waiting
|
||||
}
|
||||
}
|
||||
|
||||
// Publish message
|
||||
if err := libp2pTopic.Publish(ctx, data); err != nil {
|
||||
return fmt.Errorf("failed to publish message: %w", err)
|
||||
|
||||
@ -24,24 +24,21 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa
|
||||
}
|
||||
namespacedTopic := fmt.Sprintf("%s.%s", ns, topic)
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Check if we already have a subscription for this topic
|
||||
topicSub, exists := m.subscriptions[namespacedTopic]
|
||||
|
||||
if exists {
|
||||
// Add handler to existing subscription
|
||||
// Fast path: we already have a subscription for this topic
|
||||
m.mu.RLock()
|
||||
if existing := m.subscriptions[namespacedTopic]; existing != nil {
|
||||
m.mu.RUnlock()
|
||||
handlerID := generateHandlerID()
|
||||
topicSub.mu.Lock()
|
||||
topicSub.handlers[handlerID] = handler
|
||||
topicSub.refCount++
|
||||
topicSub.mu.Unlock()
|
||||
existing.mu.Lock()
|
||||
existing.handlers[handlerID] = handler
|
||||
existing.refCount++
|
||||
existing.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
// Create new subscription
|
||||
// Get or create topic
|
||||
// Create the underlying libp2p subscription without holding the manager lock
|
||||
// to avoid re-entrant lock attempts
|
||||
libp2pTopic, err := m.getOrCreateTopic(namespacedTopic)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get topic: %w", err)
|
||||
@ -58,26 +55,44 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa
|
||||
|
||||
// Create topic subscription with initial handler
|
||||
handlerID := generateHandlerID()
|
||||
topicSub = &topicSubscription{
|
||||
newSub := &topicSubscription{
|
||||
sub: sub,
|
||||
cancel: cancel,
|
||||
handlers: map[HandlerID]MessageHandler{handlerID: handler},
|
||||
refCount: 1,
|
||||
}
|
||||
m.subscriptions[namespacedTopic] = topicSub
|
||||
|
||||
// Install the subscription (or merge if another goroutine beat us)
|
||||
m.mu.Lock()
|
||||
if existing := m.subscriptions[namespacedTopic]; existing != nil {
|
||||
m.mu.Unlock()
|
||||
// Another goroutine already created a subscription while we were working
|
||||
// Clean up our resources and add to theirs
|
||||
cancel()
|
||||
sub.Cancel()
|
||||
handlerID := generateHandlerID()
|
||||
existing.mu.Lock()
|
||||
existing.handlers[handlerID] = handler
|
||||
existing.refCount++
|
||||
existing.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
m.subscriptions[namespacedTopic] = newSub
|
||||
m.mu.Unlock()
|
||||
|
||||
// Announce topic interest to help with peer discovery
|
||||
go m.announceTopicInterest(namespacedTopic)
|
||||
|
||||
// Start message handler goroutine (fan-out to all handlers)
|
||||
go func() {
|
||||
defer func() {
|
||||
sub.Cancel()
|
||||
}()
|
||||
go func(ts *topicSubscription) {
|
||||
defer ts.sub.Cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-subCtx.Done():
|
||||
return
|
||||
default:
|
||||
msg, err := sub.Next(subCtx)
|
||||
msg, err := ts.sub.Next(subCtx)
|
||||
if err != nil {
|
||||
if subCtx.Err() != nil {
|
||||
return // Context cancelled
|
||||
@ -85,13 +100,18 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter out internal discovery messages
|
||||
if string(msg.Data) == "PEER_DISCOVERY_PING" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Broadcast to all handlers
|
||||
topicSub.mu.RLock()
|
||||
handlers := make([]MessageHandler, 0, len(topicSub.handlers))
|
||||
for _, h := range topicSub.handlers {
|
||||
ts.mu.RLock()
|
||||
handlers := make([]MessageHandler, 0, len(ts.handlers))
|
||||
for _, h := range ts.handlers {
|
||||
handlers = append(handlers, h)
|
||||
}
|
||||
topicSub.mu.RUnlock()
|
||||
ts.mu.RUnlock()
|
||||
|
||||
// Call each handler (don't block on individual handler errors)
|
||||
for _, h := range handlers {
|
||||
@ -102,7 +122,7 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}(newSub)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -13,19 +15,21 @@ import (
|
||||
"github.com/DeBrosOfficial/network/pkg/discovery"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// ClusterDiscoveryService bridges LibP2P discovery with RQLite cluster management
|
||||
type ClusterDiscoveryService struct {
|
||||
host host.Host
|
||||
discoveryMgr *discovery.Manager
|
||||
rqliteManager *RQLiteManager
|
||||
nodeID string
|
||||
nodeType string
|
||||
raftAddress string
|
||||
httpAddress string
|
||||
dataDir string
|
||||
host host.Host
|
||||
discoveryMgr *discovery.Manager
|
||||
rqliteManager *RQLiteManager
|
||||
nodeID string
|
||||
nodeType string
|
||||
raftAddress string
|
||||
httpAddress string
|
||||
dataDir string
|
||||
minClusterSize int // Minimum cluster size required
|
||||
|
||||
knownPeers map[string]*discovery.RQLiteNodeMetadata // NodeID -> Metadata
|
||||
peerHealth map[string]*PeerHealth // NodeID -> Health
|
||||
@ -51,6 +55,11 @@ func NewClusterDiscoveryService(
|
||||
dataDir string,
|
||||
logger *zap.Logger,
|
||||
) *ClusterDiscoveryService {
|
||||
minClusterSize := 1
|
||||
if rqliteManager != nil && rqliteManager.config != nil {
|
||||
minClusterSize = rqliteManager.config.MinClusterSize
|
||||
}
|
||||
|
||||
return &ClusterDiscoveryService{
|
||||
host: h,
|
||||
discoveryMgr: discoveryMgr,
|
||||
@ -60,6 +69,7 @@ func NewClusterDiscoveryService(
|
||||
raftAddress: raftAddress,
|
||||
httpAddress: httpAddress,
|
||||
dataDir: dataDir,
|
||||
minClusterSize: minClusterSize,
|
||||
knownPeers: make(map[string]*discovery.RQLiteNodeMetadata),
|
||||
peerHealth: make(map[string]*PeerHealth),
|
||||
updateInterval: 30 * time.Second,
|
||||
@ -156,21 +166,34 @@ func (c *ClusterDiscoveryService) collectPeerMetadata() []*discovery.RQLiteNodeM
|
||||
connectedPeers := c.host.Network().Peers()
|
||||
var metadata []*discovery.RQLiteNodeMetadata
|
||||
|
||||
c.logger.Debug("Collecting peer metadata from LibP2P",
|
||||
zap.Int("connected_libp2p_peers", len(connectedPeers)))
|
||||
// Metadata collection is routine - no need to log every occurrence
|
||||
|
||||
c.mu.RLock()
|
||||
currentRaftAddr := c.raftAddress
|
||||
currentHTTPAddr := c.httpAddress
|
||||
c.mu.RUnlock()
|
||||
|
||||
// Add ourselves
|
||||
ourMetadata := &discovery.RQLiteNodeMetadata{
|
||||
NodeID: c.raftAddress, // RQLite uses raft address as node ID
|
||||
RaftAddress: c.raftAddress,
|
||||
HTTPAddress: c.httpAddress,
|
||||
NodeID: currentRaftAddr, // RQLite uses raft address as node ID
|
||||
RaftAddress: currentRaftAddr,
|
||||
HTTPAddress: currentHTTPAddr,
|
||||
NodeType: c.nodeType,
|
||||
RaftLogIndex: c.rqliteManager.getRaftLogIndex(),
|
||||
LastSeen: time.Now(),
|
||||
ClusterVersion: "1.0",
|
||||
}
|
||||
|
||||
if c.adjustSelfAdvertisedAddresses(ourMetadata) {
|
||||
c.logger.Debug("Adjusted self-advertised RQLite addresses",
|
||||
zap.String("raft_address", ourMetadata.RaftAddress),
|
||||
zap.String("http_address", ourMetadata.HTTPAddress))
|
||||
}
|
||||
|
||||
metadata = append(metadata, ourMetadata)
|
||||
|
||||
staleNodeIDs := make([]string, 0)
|
||||
|
||||
// Query connected peers for their RQLite metadata
|
||||
// For now, we'll use a simple approach - store metadata in peer metadata store
|
||||
// In a full implementation, this would use a custom protocol to exchange RQLite metadata
|
||||
@ -181,6 +204,9 @@ func (c *ClusterDiscoveryService) collectPeerMetadata() []*discovery.RQLiteNodeM
|
||||
if jsonData, ok := val.([]byte); ok {
|
||||
var peerMeta discovery.RQLiteNodeMetadata
|
||||
if err := json.Unmarshal(jsonData, &peerMeta); err == nil {
|
||||
if updated, stale := c.adjustPeerAdvertisedAddresses(peerID, &peerMeta); updated && stale != "" {
|
||||
staleNodeIDs = append(staleNodeIDs, stale)
|
||||
}
|
||||
peerMeta.LastSeen = time.Now()
|
||||
metadata = append(metadata, &peerMeta)
|
||||
}
|
||||
@ -188,6 +214,16 @@ func (c *ClusterDiscoveryService) collectPeerMetadata() []*discovery.RQLiteNodeM
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up stale entries if NodeID changed
|
||||
if len(staleNodeIDs) > 0 {
|
||||
c.mu.Lock()
|
||||
for _, id := range staleNodeIDs {
|
||||
delete(c.knownPeers, id)
|
||||
delete(c.peerHealth, id)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
@ -203,9 +239,6 @@ type membershipUpdateResult struct {
|
||||
func (c *ClusterDiscoveryService) updateClusterMembership() {
|
||||
metadata := c.collectPeerMetadata()
|
||||
|
||||
c.logger.Debug("Collected peer metadata",
|
||||
zap.Int("metadata_count", len(metadata)))
|
||||
|
||||
// Compute membership changes while holding lock
|
||||
c.mu.Lock()
|
||||
result := c.computeMembershipChangesLocked(metadata)
|
||||
@ -215,35 +248,30 @@ func (c *ClusterDiscoveryService) updateClusterMembership() {
|
||||
if result.changed {
|
||||
// Log state changes (peer added/removed) at Info level
|
||||
if len(result.added) > 0 || len(result.updated) > 0 {
|
||||
c.logger.Info("Cluster membership changed",
|
||||
c.logger.Info("Membership changed",
|
||||
zap.Int("added", len(result.added)),
|
||||
zap.Int("updated", len(result.updated)),
|
||||
zap.Strings("added_ids", result.added),
|
||||
zap.Strings("updated_ids", result.updated))
|
||||
zap.Strings("added", result.added),
|
||||
zap.Strings("updated", result.updated))
|
||||
}
|
||||
|
||||
// Write peers.json without holding lock
|
||||
if err := c.writePeersJSONWithData(result.peersJSON); err != nil {
|
||||
c.logger.Error("CRITICAL: Failed to write peers.json",
|
||||
c.logger.Error("Failed to write peers.json",
|
||||
zap.Error(err),
|
||||
zap.String("data_dir", c.dataDir),
|
||||
zap.Int("peer_count", len(result.peersJSON)))
|
||||
zap.Int("peers", len(result.peersJSON)))
|
||||
} else {
|
||||
c.logger.Debug("peers.json updated",
|
||||
zap.Int("peer_count", len(result.peersJSON)))
|
||||
zap.Int("peers", len(result.peersJSON)))
|
||||
}
|
||||
|
||||
// Update lastUpdate timestamp
|
||||
c.mu.Lock()
|
||||
c.lastUpdate = time.Now()
|
||||
c.mu.Unlock()
|
||||
} else {
|
||||
c.mu.RLock()
|
||||
totalPeers := len(c.knownPeers)
|
||||
c.mu.RUnlock()
|
||||
c.logger.Debug("No changes to cluster membership",
|
||||
zap.Int("total_peers", totalPeers))
|
||||
}
|
||||
// No changes - don't log (reduces noise)
|
||||
}
|
||||
|
||||
// computeMembershipChangesLocked computes membership changes and returns snapshot data
|
||||
@ -268,10 +296,10 @@ func (c *ClusterDiscoveryService) computeMembershipChangesLocked(metadata []*dis
|
||||
} else {
|
||||
// New peer discovered
|
||||
added = append(added, meta.NodeID)
|
||||
c.logger.Info("Node added to cluster",
|
||||
zap.String("node_id", meta.NodeID),
|
||||
zap.String("raft_address", meta.RaftAddress),
|
||||
zap.String("node_type", meta.NodeType),
|
||||
c.logger.Info("Node added",
|
||||
zap.String("node", meta.NodeID),
|
||||
zap.String("raft", meta.RaftAddress),
|
||||
zap.String("type", meta.NodeType),
|
||||
zap.Uint64("log_index", meta.RaftLogIndex))
|
||||
}
|
||||
|
||||
@ -293,18 +321,56 @@ func (c *ClusterDiscoveryService) computeMembershipChangesLocked(metadata []*dis
|
||||
}
|
||||
}
|
||||
|
||||
// CRITICAL FIX: Count remote peers (excluding self)
|
||||
remotePeerCount := 0
|
||||
for _, peer := range c.knownPeers {
|
||||
if peer.NodeID != c.raftAddress {
|
||||
remotePeerCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Get peers JSON snapshot (for checking if it would be empty)
|
||||
peers := c.getPeersJSONUnlocked()
|
||||
|
||||
// Determine if we should write peers.json
|
||||
shouldWrite := len(added) > 0 || len(updated) > 0 || c.lastUpdate.IsZero()
|
||||
|
||||
// CRITICAL FIX: Don't write peers.json until we have minimum cluster size
|
||||
// This prevents RQLite from starting as a single-node cluster
|
||||
// For min_cluster_size=3, we need at least 2 remote peers (plus self = 3 total)
|
||||
if shouldWrite {
|
||||
// Log initial sync if this is the first time
|
||||
// For initial sync, wait until we have at least (MinClusterSize - 1) remote peers
|
||||
// This ensures peers.json contains enough peers for proper cluster formation
|
||||
if c.lastUpdate.IsZero() {
|
||||
c.logger.Info("Initial cluster membership sync",
|
||||
zap.Int("total_peers", len(c.knownPeers)))
|
||||
requiredRemotePeers := c.minClusterSize - 1
|
||||
|
||||
if remotePeerCount < requiredRemotePeers {
|
||||
c.logger.Info("Waiting for peers",
|
||||
zap.Int("have", remotePeerCount),
|
||||
zap.Int("need", requiredRemotePeers),
|
||||
zap.Int("min_size", c.minClusterSize))
|
||||
return membershipUpdateResult{
|
||||
changed: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Additional safety check: don't write empty peers.json (would cause single-node cluster)
|
||||
if len(peers) == 0 && c.lastUpdate.IsZero() {
|
||||
c.logger.Info("No remote peers - waiting")
|
||||
return membershipUpdateResult{
|
||||
changed: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Log initial sync if this is the first time
|
||||
if c.lastUpdate.IsZero() {
|
||||
c.logger.Info("Initial sync",
|
||||
zap.Int("total", len(c.knownPeers)),
|
||||
zap.Int("remote", remotePeerCount),
|
||||
zap.Int("in_json", len(peers)))
|
||||
}
|
||||
|
||||
// Get peers JSON snapshot
|
||||
peers := c.getPeersJSONUnlocked()
|
||||
return membershipUpdateResult{
|
||||
peersJSON: peers,
|
||||
added: added,
|
||||
@ -331,8 +397,8 @@ func (c *ClusterDiscoveryService) removeInactivePeers() {
|
||||
|
||||
if inactiveDuration > c.inactivityLimit {
|
||||
// Mark as inactive and remove
|
||||
c.logger.Warn("Node removed from cluster",
|
||||
zap.String("node_id", nodeID),
|
||||
c.logger.Warn("Node removed",
|
||||
zap.String("node", nodeID),
|
||||
zap.String("reason", "inactive"),
|
||||
zap.Duration("inactive_duration", inactiveDuration))
|
||||
|
||||
@ -344,9 +410,9 @@ func (c *ClusterDiscoveryService) removeInactivePeers() {
|
||||
|
||||
// Regenerate peers.json if any peers were removed
|
||||
if len(removed) > 0 {
|
||||
c.logger.Info("Removed inactive nodes, regenerating peers.json",
|
||||
zap.Int("removed", len(removed)),
|
||||
zap.Strings("node_ids", removed))
|
||||
c.logger.Info("Removed inactive",
|
||||
zap.Int("count", len(removed)),
|
||||
zap.Strings("nodes", removed))
|
||||
|
||||
if err := c.writePeersJSON(); err != nil {
|
||||
c.logger.Error("Failed to write peers.json after cleanup", zap.Error(err))
|
||||
@ -366,6 +432,11 @@ func (c *ClusterDiscoveryService) getPeersJSONUnlocked() []map[string]interface{
|
||||
peers := make([]map[string]interface{}, 0, len(c.knownPeers))
|
||||
|
||||
for _, peer := range c.knownPeers {
|
||||
// CRITICAL FIX: Include ALL peers (including self) in peers.json
|
||||
// When using expect configuration with recovery, RQLite needs the complete
|
||||
// expected cluster configuration to properly form consensus.
|
||||
// The peers.json file is used by RQLite's recovery mechanism to know
|
||||
// what the full cluster membership should be, including the local node.
|
||||
peerEntry := map[string]interface{}{
|
||||
"id": peer.RaftAddress, // RQLite uses raft address as node ID
|
||||
"address": peer.RaftAddress,
|
||||
@ -401,11 +472,7 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
|
||||
// Get the RQLite raft directory
|
||||
rqliteDir := filepath.Join(dataDir, "rqlite", "raft")
|
||||
|
||||
c.logger.Debug("Writing peers.json",
|
||||
zap.String("data_dir", c.dataDir),
|
||||
zap.String("expanded_path", dataDir),
|
||||
zap.String("raft_dir", rqliteDir),
|
||||
zap.Int("peer_count", len(peers)))
|
||||
// Writing peers.json - routine operation, no need to log details
|
||||
|
||||
if err := os.MkdirAll(rqliteDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create raft directory %s: %w", rqliteDir, err)
|
||||
@ -416,7 +483,7 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
|
||||
|
||||
// Backup existing peers.json if it exists
|
||||
if _, err := os.Stat(peersFile); err == nil {
|
||||
c.logger.Debug("Backing up existing peers.json", zap.String("backup_file", backupFile))
|
||||
// Backup existing peers.json if it exists - routine operation
|
||||
data, err := os.ReadFile(peersFile)
|
||||
if err == nil {
|
||||
_ = os.WriteFile(backupFile, data, 0644)
|
||||
@ -429,7 +496,7 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
|
||||
return fmt.Errorf("failed to marshal peers.json: %w", err)
|
||||
}
|
||||
|
||||
c.logger.Debug("Marshaled peers.json", zap.Int("data_size", len(data)))
|
||||
// Marshaled peers.json - routine operation
|
||||
|
||||
// Write atomically using temp file + rename
|
||||
tempFile := peersFile + ".tmp"
|
||||
@ -449,9 +516,8 @@ func (c *ClusterDiscoveryService) writePeersJSONWithData(peers []map[string]inte
|
||||
}
|
||||
|
||||
c.logger.Info("peers.json written",
|
||||
zap.String("file", peersFile),
|
||||
zap.Int("node_count", len(peers)),
|
||||
zap.Strings("node_ids", nodeIDs))
|
||||
zap.Int("peers", len(peers)),
|
||||
zap.Strings("nodes", nodeIDs))
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -518,25 +584,34 @@ func (c *ClusterDiscoveryService) HasRecentPeersJSON() bool {
|
||||
return time.Since(c.lastUpdate) < 5*time.Minute
|
||||
}
|
||||
|
||||
// FindJoinTargets discovers join targets via LibP2P, prioritizing bootstrap nodes
|
||||
// FindJoinTargets discovers join targets via LibP2P
|
||||
func (c *ClusterDiscoveryService) FindJoinTargets() []string {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
targets := []string{}
|
||||
|
||||
// Prioritize bootstrap nodes
|
||||
// All nodes are equal - prioritize by Raft log index (more advanced = better)
|
||||
type nodeWithIndex struct {
|
||||
address string
|
||||
logIndex uint64
|
||||
}
|
||||
var nodes []nodeWithIndex
|
||||
for _, peer := range c.knownPeers {
|
||||
if peer.NodeType == "bootstrap" {
|
||||
targets = append(targets, peer.RaftAddress)
|
||||
nodes = append(nodes, nodeWithIndex{peer.RaftAddress, peer.RaftLogIndex})
|
||||
}
|
||||
|
||||
// Sort by log index descending (higher log index = more up-to-date)
|
||||
for i := 0; i < len(nodes)-1; i++ {
|
||||
for j := i + 1; j < len(nodes); j++ {
|
||||
if nodes[j].logIndex > nodes[i].logIndex {
|
||||
nodes[i], nodes[j] = nodes[j], nodes[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add other nodes as fallback
|
||||
for _, peer := range c.knownPeers {
|
||||
if peer.NodeType != "bootstrap" {
|
||||
targets = append(targets, peer.RaftAddress)
|
||||
}
|
||||
for _, n := range nodes {
|
||||
targets = append(targets, n.address)
|
||||
}
|
||||
|
||||
return targets
|
||||
@ -567,15 +642,52 @@ func (c *ClusterDiscoveryService) WaitForDiscoverySettling(ctx context.Context)
|
||||
|
||||
// TriggerSync manually triggers a cluster membership sync
|
||||
func (c *ClusterDiscoveryService) TriggerSync() {
|
||||
c.logger.Info("Manually triggering cluster membership sync")
|
||||
// All nodes use the same discovery timing for consistency
|
||||
c.updateClusterMembership()
|
||||
}
|
||||
|
||||
// For bootstrap nodes, wait a bit for peer discovery to stabilize
|
||||
if c.nodeType == "bootstrap" {
|
||||
c.logger.Info("Bootstrap node: waiting for peer discovery to complete")
|
||||
time.Sleep(5 * time.Second)
|
||||
// ForceWritePeersJSON forces writing peers.json regardless of membership changes
|
||||
// This is useful after clearing raft state when we need to recreate peers.json
|
||||
func (c *ClusterDiscoveryService) ForceWritePeersJSON() error {
|
||||
c.logger.Info("Force writing peers.json")
|
||||
|
||||
// First, collect latest peer metadata to ensure we have current information
|
||||
metadata := c.collectPeerMetadata()
|
||||
|
||||
// Update known peers with latest metadata (without writing file yet)
|
||||
c.mu.Lock()
|
||||
for _, meta := range metadata {
|
||||
c.knownPeers[meta.NodeID] = meta
|
||||
// Update health tracking for remote peers
|
||||
if meta.NodeID != c.raftAddress {
|
||||
if _, ok := c.peerHealth[meta.NodeID]; !ok {
|
||||
c.peerHealth[meta.NodeID] = &PeerHealth{
|
||||
LastSeen: time.Now(),
|
||||
LastSuccessful: time.Now(),
|
||||
Status: "active",
|
||||
}
|
||||
} else {
|
||||
c.peerHealth[meta.NodeID].LastSeen = time.Now()
|
||||
c.peerHealth[meta.NodeID].Status = "active"
|
||||
}
|
||||
}
|
||||
}
|
||||
peers := c.getPeersJSONUnlocked()
|
||||
c.mu.Unlock()
|
||||
|
||||
// Now force write the file
|
||||
if err := c.writePeersJSONWithData(peers); err != nil {
|
||||
c.logger.Error("Failed to force write peers.json",
|
||||
zap.Error(err),
|
||||
zap.String("data_dir", c.dataDir),
|
||||
zap.Int("peers", len(peers)))
|
||||
return err
|
||||
}
|
||||
|
||||
c.updateClusterMembership()
|
||||
c.logger.Info("peers.json written",
|
||||
zap.Int("peers", len(peers)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TriggerPeerExchange actively exchanges peer information with connected peers
|
||||
@ -585,25 +697,36 @@ func (c *ClusterDiscoveryService) TriggerPeerExchange(ctx context.Context) error
|
||||
return fmt.Errorf("discovery manager not available")
|
||||
}
|
||||
|
||||
c.logger.Info("Triggering peer exchange via discovery manager")
|
||||
collected := c.discoveryMgr.TriggerPeerExchange(ctx)
|
||||
c.logger.Info("Peer exchange completed", zap.Int("peers_with_metadata", collected))
|
||||
c.logger.Debug("Exchange completed", zap.Int("with_metadata", collected))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateOwnMetadata updates our own RQLite metadata in the peerstore
|
||||
func (c *ClusterDiscoveryService) UpdateOwnMetadata() {
|
||||
c.mu.RLock()
|
||||
currentRaftAddr := c.raftAddress
|
||||
currentHTTPAddr := c.httpAddress
|
||||
c.mu.RUnlock()
|
||||
|
||||
metadata := &discovery.RQLiteNodeMetadata{
|
||||
NodeID: c.raftAddress, // RQLite uses raft address as node ID
|
||||
RaftAddress: c.raftAddress,
|
||||
HTTPAddress: c.httpAddress,
|
||||
NodeID: currentRaftAddr, // RQLite uses raft address as node ID
|
||||
RaftAddress: currentRaftAddr,
|
||||
HTTPAddress: currentHTTPAddr,
|
||||
NodeType: c.nodeType,
|
||||
RaftLogIndex: c.rqliteManager.getRaftLogIndex(),
|
||||
LastSeen: time.Now(),
|
||||
ClusterVersion: "1.0",
|
||||
}
|
||||
|
||||
// Adjust addresses if needed
|
||||
if c.adjustSelfAdvertisedAddresses(metadata) {
|
||||
c.logger.Debug("Adjusted self-advertised RQLite addresses in UpdateOwnMetadata",
|
||||
zap.String("raft_address", metadata.RaftAddress),
|
||||
zap.String("http_address", metadata.HTTPAddress))
|
||||
}
|
||||
|
||||
// Store in our own peerstore for peer exchange
|
||||
data, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
@ -616,13 +739,28 @@ func (c *ClusterDiscoveryService) UpdateOwnMetadata() {
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Debug("Updated own RQLite metadata",
|
||||
zap.String("node_id", metadata.NodeID),
|
||||
c.logger.Debug("Metadata updated",
|
||||
zap.String("node", metadata.NodeID),
|
||||
zap.Uint64("log_index", metadata.RaftLogIndex))
|
||||
}
|
||||
|
||||
// StoreRemotePeerMetadata stores metadata received from a remote peer
|
||||
func (c *ClusterDiscoveryService) StoreRemotePeerMetadata(peerID peer.ID, metadata *discovery.RQLiteNodeMetadata) error {
|
||||
if metadata == nil {
|
||||
return fmt.Errorf("metadata is nil")
|
||||
}
|
||||
|
||||
// Adjust addresses if needed (replace localhost with actual IP)
|
||||
if updated, stale := c.adjustPeerAdvertisedAddresses(peerID, metadata); updated && stale != "" {
|
||||
// Clean up stale entry if NodeID changed
|
||||
c.mu.Lock()
|
||||
delete(c.knownPeers, stale)
|
||||
delete(c.peerHealth, stale)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
metadata.LastSeen = time.Now()
|
||||
|
||||
data, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal metadata: %w", err)
|
||||
@ -632,9 +770,245 @@ func (c *ClusterDiscoveryService) StoreRemotePeerMetadata(peerID peer.ID, metada
|
||||
return fmt.Errorf("failed to store metadata: %w", err)
|
||||
}
|
||||
|
||||
c.logger.Debug("Stored remote peer metadata",
|
||||
zap.String("peer_id", peerID.String()[:8]+"..."),
|
||||
zap.String("node_id", metadata.NodeID))
|
||||
c.logger.Debug("Metadata stored",
|
||||
zap.String("peer", shortPeerID(peerID)),
|
||||
zap.String("node", metadata.NodeID))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// adjustPeerAdvertisedAddresses adjusts peer metadata addresses by replacing localhost/loopback
|
||||
// with the actual IP address from LibP2P connection. Returns (updated, staleNodeID).
|
||||
// staleNodeID is non-empty if NodeID changed (indicating old entry should be cleaned up).
|
||||
func (c *ClusterDiscoveryService) adjustPeerAdvertisedAddresses(peerID peer.ID, meta *discovery.RQLiteNodeMetadata) (bool, string) {
|
||||
ip := c.selectPeerIP(peerID)
|
||||
if ip == "" {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
changed, stale := rewriteAdvertisedAddresses(meta, ip, true)
|
||||
if changed {
|
||||
c.logger.Debug("Addresses normalized",
|
||||
zap.String("peer", shortPeerID(peerID)),
|
||||
zap.String("raft", meta.RaftAddress),
|
||||
zap.String("http_address", meta.HTTPAddress))
|
||||
}
|
||||
return changed, stale
|
||||
}
|
||||
|
||||
// adjustSelfAdvertisedAddresses adjusts our own metadata addresses by replacing localhost/loopback
|
||||
// with the actual IP address from LibP2P host. Updates internal state if changed.
|
||||
func (c *ClusterDiscoveryService) adjustSelfAdvertisedAddresses(meta *discovery.RQLiteNodeMetadata) bool {
|
||||
ip := c.selectSelfIP()
|
||||
if ip == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
changed, _ := rewriteAdvertisedAddresses(meta, ip, true)
|
||||
if !changed {
|
||||
return false
|
||||
}
|
||||
|
||||
// Update internal state with corrected addresses
|
||||
c.mu.Lock()
|
||||
c.raftAddress = meta.RaftAddress
|
||||
c.httpAddress = meta.HTTPAddress
|
||||
c.mu.Unlock()
|
||||
|
||||
if c.rqliteManager != nil {
|
||||
c.rqliteManager.UpdateAdvertisedAddresses(meta.RaftAddress, meta.HTTPAddress)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// selectPeerIP selects the best IP address for a peer from LibP2P connections.
|
||||
// Prefers public IPs, falls back to private IPs if no public IP is available.
|
||||
func (c *ClusterDiscoveryService) selectPeerIP(peerID peer.ID) string {
|
||||
var fallback string
|
||||
|
||||
// First, try to get IP from active connections
|
||||
for _, conn := range c.host.Network().ConnsToPeer(peerID) {
|
||||
if ip, public := ipFromMultiaddr(conn.RemoteMultiaddr()); ip != "" {
|
||||
if shouldReplaceHost(ip) {
|
||||
continue
|
||||
}
|
||||
if public {
|
||||
return ip
|
||||
}
|
||||
if fallback == "" {
|
||||
fallback = ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to peerstore addresses
|
||||
for _, addr := range c.host.Peerstore().Addrs(peerID) {
|
||||
if ip, public := ipFromMultiaddr(addr); ip != "" {
|
||||
if shouldReplaceHost(ip) {
|
||||
continue
|
||||
}
|
||||
if public {
|
||||
return ip
|
||||
}
|
||||
if fallback == "" {
|
||||
fallback = ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fallback
|
||||
}
|
||||
|
||||
// selectSelfIP selects the best IP address for ourselves from LibP2P host addresses.
|
||||
// Prefers public IPs, falls back to private IPs if no public IP is available.
|
||||
func (c *ClusterDiscoveryService) selectSelfIP() string {
|
||||
var fallback string
|
||||
|
||||
for _, addr := range c.host.Addrs() {
|
||||
if ip, public := ipFromMultiaddr(addr); ip != "" {
|
||||
if shouldReplaceHost(ip) {
|
||||
continue
|
||||
}
|
||||
if public {
|
||||
return ip
|
||||
}
|
||||
if fallback == "" {
|
||||
fallback = ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fallback
|
||||
}
|
||||
|
||||
// rewriteAdvertisedAddresses rewrites RaftAddress and HTTPAddress in metadata,
|
||||
// replacing localhost/loopback addresses with the provided IP.
|
||||
// Returns (changed, staleNodeID). staleNodeID is non-empty if NodeID changed.
|
||||
func rewriteAdvertisedAddresses(meta *discovery.RQLiteNodeMetadata, newHost string, allowNodeIDRewrite bool) (bool, string) {
|
||||
if meta == nil || newHost == "" {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
originalNodeID := meta.NodeID
|
||||
changed := false
|
||||
nodeIDChanged := false
|
||||
|
||||
// Replace host in RaftAddress if it's localhost/loopback
|
||||
if newAddr, replaced := replaceAddressHost(meta.RaftAddress, newHost); replaced {
|
||||
if meta.RaftAddress != newAddr {
|
||||
meta.RaftAddress = newAddr
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
|
||||
// Replace host in HTTPAddress if it's localhost/loopback
|
||||
if newAddr, replaced := replaceAddressHost(meta.HTTPAddress, newHost); replaced {
|
||||
if meta.HTTPAddress != newAddr {
|
||||
meta.HTTPAddress = newAddr
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
|
||||
// Update NodeID to match RaftAddress if it changed
|
||||
if allowNodeIDRewrite {
|
||||
if meta.RaftAddress != "" && (meta.NodeID == "" || meta.NodeID == originalNodeID || shouldReplaceHost(hostFromAddress(meta.NodeID))) {
|
||||
if meta.NodeID != meta.RaftAddress {
|
||||
meta.NodeID = meta.RaftAddress
|
||||
nodeIDChanged = meta.NodeID != originalNodeID
|
||||
if nodeIDChanged {
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if nodeIDChanged {
|
||||
return changed, originalNodeID
|
||||
}
|
||||
return changed, ""
|
||||
}
|
||||
|
||||
// replaceAddressHost replaces the host part of an address if it's localhost/loopback.
|
||||
// Returns (newAddress, replaced). replaced is true if host was replaced.
|
||||
func replaceAddressHost(address, newHost string) (string, bool) {
|
||||
if address == "" || newHost == "" {
|
||||
return address, false
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return address, false
|
||||
}
|
||||
|
||||
if !shouldReplaceHost(host) {
|
||||
return address, false
|
||||
}
|
||||
|
||||
return net.JoinHostPort(newHost, port), true
|
||||
}
|
||||
|
||||
// shouldReplaceHost returns true if the host should be replaced (localhost, loopback, etc.)
|
||||
func shouldReplaceHost(host string) bool {
|
||||
if host == "" {
|
||||
return true
|
||||
}
|
||||
if strings.EqualFold(host, "localhost") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if it's a loopback or unspecified address
|
||||
if addr, err := netip.ParseAddr(host); err == nil {
|
||||
if addr.IsLoopback() || addr.IsUnspecified() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// hostFromAddress extracts the host part from a host:port address
|
||||
func hostFromAddress(address string) string {
|
||||
host, _, err := net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return host
|
||||
}
|
||||
|
||||
// ipFromMultiaddr extracts an IP address from a multiaddr and returns (ip, isPublic)
|
||||
func ipFromMultiaddr(addr multiaddr.Multiaddr) (string, bool) {
|
||||
if addr == nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if v4, err := addr.ValueForProtocol(multiaddr.P_IP4); err == nil {
|
||||
return v4, isPublicIP(v4)
|
||||
}
|
||||
if v6, err := addr.ValueForProtocol(multiaddr.P_IP6); err == nil {
|
||||
return v6, isPublicIP(v6)
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// isPublicIP returns true if the IP is a public (non-private, non-loopback) address
|
||||
func isPublicIP(ip string) bool {
|
||||
addr, err := netip.ParseAddr(ip)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Exclude loopback, unspecified, link-local, multicast, and private addresses
|
||||
if addr.IsLoopback() || addr.IsUnspecified() || addr.IsLinkLocalUnicast() || addr.IsLinkLocalMulticast() || addr.IsPrivate() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// shortPeerID returns a shortened version of a peer ID for logging
|
||||
func shortPeerID(id peer.ID) string {
|
||||
s := id.String()
|
||||
if len(s) <= 8 {
|
||||
return s
|
||||
}
|
||||
return s[:8] + "..."
|
||||
}
|
||||
|
||||
@ -5,26 +5,98 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// getRaftLogIndex returns the current Raft log index for this node
|
||||
// It first tries to get the index from the running RQLite instance via /status endpoint.
|
||||
// If that fails or returns 0, it falls back to reading persisted snapshot metadata from disk.
|
||||
// This ensures accurate log index reporting even before RQLite is fully started.
|
||||
func (r *RQLiteManager) getRaftLogIndex() uint64 {
|
||||
status, err := r.getRQLiteStatus()
|
||||
if err != nil {
|
||||
r.logger.Debug("Failed to get Raft log index", zap.Error(err))
|
||||
if err == nil {
|
||||
// Return the highest index we have from runtime status
|
||||
maxIndex := status.Store.Raft.LastLogIndex
|
||||
if status.Store.Raft.AppliedIndex > maxIndex {
|
||||
maxIndex = status.Store.Raft.AppliedIndex
|
||||
}
|
||||
if status.Store.Raft.CommitIndex > maxIndex {
|
||||
maxIndex = status.Store.Raft.CommitIndex
|
||||
}
|
||||
|
||||
// If runtime status reports a valid index, use it
|
||||
if maxIndex > 0 {
|
||||
return maxIndex
|
||||
}
|
||||
|
||||
// Runtime status returned 0, fall back to persisted snapshot metadata
|
||||
// This handles the case where RQLite is running but hasn't applied any logs yet
|
||||
if persisted := r.getPersistedRaftLogIndex(); persisted > 0 {
|
||||
r.logger.Debug("Using persisted Raft log index because runtime status reported zero",
|
||||
zap.Uint64("persisted_index", persisted))
|
||||
return persisted
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Return the highest index we have
|
||||
maxIndex := status.Store.Raft.LastLogIndex
|
||||
if status.Store.Raft.AppliedIndex > maxIndex {
|
||||
maxIndex = status.Store.Raft.AppliedIndex
|
||||
// RQLite status endpoint is not available (not started yet or unreachable)
|
||||
// Fall back to reading persisted snapshot metadata from disk
|
||||
persisted := r.getPersistedRaftLogIndex()
|
||||
if persisted > 0 {
|
||||
r.logger.Debug("Using persisted Raft log index before RQLite is reachable",
|
||||
zap.Uint64("persisted_index", persisted),
|
||||
zap.Error(err))
|
||||
return persisted
|
||||
}
|
||||
if status.Store.Raft.CommitIndex > maxIndex {
|
||||
maxIndex = status.Store.Raft.CommitIndex
|
||||
|
||||
r.logger.Debug("Failed to get Raft log index", zap.Error(err))
|
||||
return 0
|
||||
}
|
||||
|
||||
// getPersistedRaftLogIndex reads the highest Raft log index from snapshot metadata files
|
||||
// This allows us to report accurate log indexes even before RQLite is started
|
||||
func (r *RQLiteManager) getPersistedRaftLogIndex() uint64 {
|
||||
rqliteDataDir, err := r.rqliteDataDirPath()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
snapshotsDir := filepath.Join(rqliteDataDir, "rsnapshots")
|
||||
entries, err := os.ReadDir(snapshotsDir)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
var maxIndex uint64
|
||||
for _, entry := range entries {
|
||||
// Only process directories (snapshot directories)
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Read meta.json from the snapshot directory
|
||||
metaPath := filepath.Join(snapshotsDir, entry.Name(), "meta.json")
|
||||
raw, err := os.ReadFile(metaPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse the metadata JSON to extract the Index field
|
||||
var meta struct {
|
||||
Index uint64 `json:"Index"`
|
||||
}
|
||||
if err := json.Unmarshal(raw, &meta); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track the highest index found
|
||||
if meta.Index > maxIndex {
|
||||
maxIndex = meta.Index
|
||||
}
|
||||
}
|
||||
|
||||
return maxIndex
|
||||
|
||||
@ -210,10 +210,11 @@ type txOp struct {
|
||||
}
|
||||
|
||||
type transactionRequest struct {
|
||||
Ops []txOp `json:"ops"`
|
||||
ReturnResults bool `json:"return_results"` // if true, returns per-op results
|
||||
StopOnError bool `json:"stop_on_error"` // default true in tx
|
||||
PartialResults bool `json:"partial_results"` // ignored for actual TX (atomic); kept for API symmetry
|
||||
Ops []txOp `json:"ops"`
|
||||
Statements []string `json:"statements"` // legacy format: array of SQL strings (treated as exec ops)
|
||||
ReturnResults bool `json:"return_results"` // if true, returns per-op results
|
||||
StopOnError bool `json:"stop_on_error"` // default true in tx
|
||||
PartialResults bool `json:"partial_results"` // ignored for actual TX (atomic); kept for API symmetry
|
||||
}
|
||||
|
||||
// --------------------
|
||||
@ -427,8 +428,21 @@ func (g *HTTPGateway) handleTransaction(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
var body transactionRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&body); err != nil || len(body.Ops) == 0 {
|
||||
writeError(w, http.StatusBadRequest, "invalid body: {ops:[{kind,sql,args?}], return_results?}")
|
||||
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid body: {ops:[{kind,sql,args?}], return_results?} or {statements:[sql...]}")
|
||||
return
|
||||
}
|
||||
|
||||
// Support legacy "statements" format by converting to ops
|
||||
if len(body.Statements) > 0 && len(body.Ops) == 0 {
|
||||
body.Ops = make([]txOp, len(body.Statements))
|
||||
for i, stmt := range body.Statements {
|
||||
body.Ops[i] = txOp{Kind: "exec", SQL: stmt}
|
||||
}
|
||||
}
|
||||
|
||||
if len(body.Ops) == 0 {
|
||||
writeError(w, http.StatusBadRequest, "invalid body: {ops:[{kind,sql,args?}], return_results?} or {statements:[sql...]}")
|
||||
return
|
||||
}
|
||||
ctx, cancel := g.withTimeout(r.Context())
|
||||
@ -501,8 +515,8 @@ func (g *HTTPGateway) handleSchema(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, map[string]any{
|
||||
"objects": rows,
|
||||
"count": len(rows),
|
||||
"tables": rows,
|
||||
"count": len(rows),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -8,18 +8,18 @@ import (
|
||||
func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
|
||||
activeCount := 0
|
||||
inactiveCount := 0
|
||||
totalHealth := 0.0
|
||||
currentLeader := ""
|
||||
|
||||
|
||||
now := time.Now()
|
||||
|
||||
|
||||
for nodeID, health := range c.peerHealth {
|
||||
if health.Status == "active" {
|
||||
activeCount++
|
||||
|
||||
|
||||
// Calculate health score (0-100) based on last seen
|
||||
timeSinceLastSeen := now.Sub(health.LastSeen)
|
||||
healthScore := 100.0
|
||||
@ -34,22 +34,22 @@ func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
|
||||
} else {
|
||||
inactiveCount++
|
||||
}
|
||||
|
||||
// Try to determine leader
|
||||
|
||||
// Try to determine leader (highest log index is likely the leader)
|
||||
if peer, ok := c.knownPeers[nodeID]; ok {
|
||||
// We'd need to check the actual leader status from RQLite
|
||||
// For now, bootstrap nodes are more likely to be leader
|
||||
if peer.NodeType == "bootstrap" && currentLeader == "" {
|
||||
// For now, use highest log index as heuristic
|
||||
if currentLeader == "" || peer.RaftLogIndex > c.knownPeers[currentLeader].RaftLogIndex {
|
||||
currentLeader = nodeID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
averageHealth := 0.0
|
||||
if activeCount > 0 {
|
||||
averageHealth = totalHealth / float64(activeCount)
|
||||
}
|
||||
|
||||
|
||||
// Determine discovery status
|
||||
discoveryStatus := "healthy"
|
||||
if len(c.knownPeers) == 0 {
|
||||
@ -59,7 +59,7 @@ func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
|
||||
} else if averageHealth < 50 {
|
||||
discoveryStatus = "degraded"
|
||||
}
|
||||
|
||||
|
||||
return &ClusterMetrics{
|
||||
ClusterSize: len(c.knownPeers),
|
||||
ActiveNodes: activeCount,
|
||||
@ -71,4 +71,3 @@ func (c *ClusterDiscoveryService) GetMetrics() *ClusterMetrics {
|
||||
AveragePeerHealth: averageHealth,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1001
pkg/rqlite/rqlite.go
1001
pkg/rqlite/rqlite.go
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user