Compare commits

...

90 Commits

Author SHA1 Message Date
anonpenguin23
d5cfb12435 Updated makefile 2026-02-09 15:23:20 +02:00
anonpenguin23
a297a14b44 Updated docs and bug fixes and updated redeploy script 2026-02-09 15:23:02 +02:00
anonpenguin23
e2b38c409a Fixed bug on limiting to 10 nodes on cluster because of WG 2026-02-09 09:12:08 +02:00
anonpenguin
5d543b2662
Merge pull request #79 from DeBrosOfficial/0.100.0
0.100.0
2026-02-06 14:26:10 +02:00
anonpenguin23
b382350f76 Rate limit fixes 2026-02-06 11:09:34 +02:00
anonpenguin23
7690b22c0a Improved performance on request journey with cache and some tricks 2026-02-06 08:30:11 +02:00
anonpenguin23
3999253685 Create redeploy script with creds 2026-02-06 07:23:28 +02:00
anonpenguin23
854523c3a9 Fixed bugs on pubsub and ipfs 2026-02-06 07:21:26 +02:00
anonpenguin23
02b5c095d0 More bug fixing 2026-02-05 16:12:52 +02:00
anonpenguin23
a7f100038d Fixed system service sudoer error on debros user 2026-02-05 13:32:06 +02:00
anonpenguin23
c855b790f8 Updated the way we spawn services on namespace added systemd 2026-02-04 17:17:01 +02:00
anonpenguin23
f972358e78 Bored of fixing bugs 2026-02-04 16:14:49 +02:00
anonpenguin23
0c4af88388 Updated docs 2026-02-03 17:34:07 +02:00
anonpenguin23
d85ed032f8 Bug fixing 2026-02-03 17:27:36 +02:00
anonpenguin23
156de7eb19 Bug fixing 2026-02-03 13:59:03 +02:00
anonpenguin23
65ffd28151 DNS Bug Fixing Filtering out private WG ip's 2026-02-03 07:01:54 +02:00
anonpenguin23
11d5c1b19a Bug fixing 2026-02-02 16:18:13 +02:00
anonpenguin23
859c30fcd9 Bug fixing 2026-02-02 14:55:29 +02:00
anonpenguin23
79a489d650 Fix ensure only nameservers nodes added on schema for caddy load balancing 2026-02-02 11:17:54 +02:00
anonpenguin23
e95ecfb12a Fixed filter our prviate ips on dns register 2026-02-02 09:31:47 +02:00
anonpenguin23
b43e6d77b7 Fixed zombie rqlite on upgrade 2026-02-02 09:19:02 +02:00
anonpenguin23
e3dd359e55 Bug fixing 2026-02-02 08:39:42 +02:00
anonpenguin23
765ce46ea7 fixed ipfs problem forming cluster 2026-02-01 18:19:43 +02:00
anonpenguin23
3343ade433 New check node health script 2026-02-01 17:36:03 +02:00
anonpenguin23
c7036cb931 Fixed ipfs blocking WG 2026-02-01 17:09:58 +02:00
anonpenguin23
9c52287af9 fixed upload scripts 2026-02-01 17:01:32 +02:00
anonpenguin23
af5f5f9893 WG Fix for IPFS 2026-02-01 16:17:36 +02:00
anonpenguin23
683ce50106 Made building faster 2026-02-01 16:15:37 +02:00
anonpenguin23
c401fdcd74 fixed more bugs and updated docs 2026-02-01 15:58:28 +02:00
anonpenguin23
73dfe22438 fixes 2026-02-01 14:26:36 +02:00
anonpenguin23
4b3b7b3458 updated docs 2026-02-01 12:06:20 +02:00
anonpenguin23
9282fe64ee Deployement updates 2026-02-01 12:01:31 +02:00
anonpenguin23
b5109f1ee8 Added delete namespace handler 2026-01-31 13:13:09 +02:00
anonpenguin23
16eaf9a129 Fixed olric bug 2026-01-31 13:11:26 +02:00
anonpenguin23
8c392194bb Fixed olric cluster problem 2026-01-31 12:14:49 +02:00
anonpenguin23
51371e199d Added self signed cert fallback, fixed dns bugs 2026-01-31 10:07:15 +02:00
anonpenguin23
04f345f9ee Added flags on cli auth login 2026-01-31 07:27:55 +02:00
anonpenguin23
810094771d Updated docs and fixed WG bugs and ip's bugs 2026-01-31 07:09:09 +02:00
anonpenguin23
4acea72467 Added wireguard and updated installation process and added more tests 2026-01-30 15:30:18 +02:00
anonpenguin23
dcaf695fbc Fixed more tests, fixed gateway ip to use domain 2026-01-30 06:30:04 +02:00
anonpenguin23
9a8fba3f47 Fixed some broken tests 2026-01-30 05:35:50 +02:00
anonpenguin23
46aa2f2869 fixed some e2e tests 2026-01-29 15:05:50 +02:00
anonpenguin23
7b12dde469 Fixed dns failover middleware 2026-01-29 13:07:05 +02:00
anonpenguin23
82963c960e Updated docs and added replication and load balancing for deployments 2026-01-29 11:44:50 +02:00
anonpenguin23
d6106bcbb8 Added nyx auto install with anyone relay 2026-01-29 10:23:40 +02:00
anonpenguin23
15ecf366d5 Added Stats for deployments on CLI 2026-01-29 10:13:29 +02:00
anonpenguin23
e706ed3397 Fixed failing unit test on handlers 2026-01-29 09:54:26 +02:00
anonpenguin23
42c0c61d19 Round Robin DNS fix for deployments (update, rollback etc) 2026-01-29 09:53:11 +02:00
anonpenguin23
cd4189f64b Updated testdata for e2e tests 2026-01-29 09:29:41 +02:00
anonpenguin23
d8c93f6ee9 Fixed services on caddy 2026-01-29 08:56:28 +02:00
anonpenguin23
571f8babb4 Fixed IPFS systemd service and deploy issue on nextjs 2026-01-29 08:38:33 +02:00
anonpenguin23
4b24b0aa6c Fixed swarm problem flag and install TUI problem 2026-01-29 08:03:06 +02:00
anonpenguin23
6397efde25 updated cli env to use https 2026-01-29 07:52:59 +02:00
anonpenguin23
29581bec51 refactored all e2e tests 2026-01-29 07:50:40 +02:00
anonpenguin23
81414722cd core dns https issue 2026-01-29 07:45:40 +02:00
anonpenguin23
c3d6500785 fixed cert issue 2026-01-29 07:45:15 +02:00
anonpenguin23
5ec292a4f2 fixed bugs on dns for deployment 2026-01-29 07:22:32 +02:00
anonpenguin23
d4f5f3b999 added more tests 2026-01-28 14:30:28 +02:00
anonpenguin23
c3f87aede7 bug fixing on namespaces 2026-01-28 13:33:15 +02:00
anonpenguin23
7ded21939b fixed test issues 2026-01-28 11:52:58 +02:00
anonpenguin23
edd9c1f3dc namespaces on gateway, load balancer and rqlite and olric namespaces 2026-01-28 11:24:21 +02:00
anonpenguin23
468ca06398 added support for anyone relay with rewards 2026-01-28 08:36:57 +02:00
anonpenguin23
c827651245 fixed mobile not running e2e tests and process update 2026-01-26 15:55:40 +02:00
anonpenguin23
2c374b2156 fixed some bugs on tests and nextjs and nodejs 2026-01-26 15:19:00 +02:00
anonpenguin23
039c246d47 fixed nextjs problem 2026-01-26 15:12:51 +02:00
anonpenguin23
380b10add3 fixing bugs on tests and on codebase 2026-01-26 14:41:26 +02:00
anonpenguin23
1a717537e5 enchanced e2e tests, fixed rqlite issue 2026-01-26 10:04:30 +02:00
anonpenguin23
e94da3a639 Fixed problem on ipfs 2026-01-26 08:52:52 +02:00
anonpenguin23
6c3d16c332 fixed bug on nextjs deployment 2026-01-26 08:10:19 +02:00
anonpenguin23
ec664466c0 Extra tests and a lot of bug fixing 2026-01-26 07:53:35 +02:00
anonpenguin23
6101455f4a bug fixing 2026-01-24 17:37:52 +02:00
anonpenguin23
3d3b0d2ee6 update install and upgrade 2026-01-24 16:42:58 +02:00
anonpenguin23
2281899784 removed executable 2026-01-24 16:03:19 +02:00
anonpenguin23
fb229af2a0 pushed more changes 2026-01-24 16:00:28 +02:00
anonpenguin23
00c9792780 updated docs 2026-01-24 13:18:14 +02:00
anonpenguin23
fc0b958b1e fixed deployments 2026-01-24 12:55:17 +02:00
anonpenguin23
84c9b9ab9b fixes 2026-01-24 11:12:00 +02:00
anonpenguin23
da8c9822f4 Changed from dbn to orama 2026-01-24 09:40:42 +02:00
anonpenguin23
b1011c29b5 added support for different domain except orama.network 2026-01-24 09:26:31 +02:00
anonpenguin23
ec66213e2e updated docs 2026-01-22 18:55:00 +02:00
anonpenguin23
5547c8ccb5 some more fixes 2026-01-22 18:23:37 +02:00
anonpenguin23
1c2bde2d81 fixed ore tests 2026-01-22 18:00:24 +02:00
anonpenguin23
b33da4282b more test fixes 2026-01-22 17:49:10 +02:00
anonpenguin23
903bef14a3 fixed some more tests 2026-01-22 17:13:08 +02:00
anonpenguin23
0a7e3ba3c7 did some fixes 2026-01-22 16:05:03 +02:00
anonpenguin23
c2071586f8 fixed more tests 2026-01-22 15:42:54 +02:00
anonpenguin23
1338b32a0e fixing tests 2026-01-22 15:21:46 +02:00
anonpenguin23
76bbf23f25 fixed test problems 2026-01-22 14:52:50 +02:00
anonpenguin23
0dcde29f7c added some tests 2026-01-22 14:39:50 +02:00
anonpenguin23
9fc9bbb8e5 a lot of changes 2026-01-22 13:04:52 +02:00
267 changed files with 47407 additions and 3222 deletions

View File

@ -0,0 +1,6 @@
# THIS IS AUTOGENERATED. DO NOT EDIT MANUALLY
version = 1
name = "network"
[setup]
script = "export MCP_BEARER_TOKEN=\"ra_9941ab97eb51668394a68963a2ab6fead0ca942afe437a6e2f4a520efcb24036\""

View File

@ -82,7 +82,7 @@ jobs:
Priority: optional
Architecture: ${ARCH}
Depends: libc6
Maintainer: DeBros Team <team@debros.network>
Maintainer: DeBros Team <team@orama.network>
Description: Orama Network - Distributed P2P Database System
Orama is a distributed peer-to-peer network that combines
RQLite for distributed SQL, IPFS for content-addressed storage,

View File

@ -34,6 +34,7 @@ jobs:
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
HOMEBREW_TAP_TOKEN: ${{ secrets.HOMEBREW_TAP_TOKEN }}
- name: Upload artifacts
uses: actions/upload-artifact@v4
@ -42,32 +43,26 @@ jobs:
path: dist/
retention-days: 5
# Optional: Publish to GitHub Packages (requires additional setup)
publish-packages:
# Verify release artifacts
verify-release:
runs-on: ubuntu-latest
needs: build-release
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: release-artifacts
path: dist/
- name: Publish to GitHub Packages
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: List release artifacts
run: |
echo "Publishing Debian packages to GitHub Packages..."
for deb in dist/*.deb; do
if [ -f "$deb" ]; then
curl -H "Authorization: token $GITHUB_TOKEN" \
-H "Content-Type: application/octet-stream" \
--data-binary @"$deb" \
"https://uploads.github.com/repos/${{ github.repository }}/releases/upload?name=$(basename "$deb")"
fi
done
echo "=== Release Artifacts ==="
ls -la dist/
echo ""
echo "=== .deb packages ==="
ls -la dist/*.deb 2>/dev/null || echo "No .deb files found"
echo ""
echo "=== Archives ==="
ls -la dist/*.tar.gz 2>/dev/null || echo "No .tar.gz files found"

22
.gitignore vendored
View File

@ -30,6 +30,8 @@ dist/
# OS generated files
.DS_Store
.codex/
redeploy-6.sh
.DS_Store?
._*
.Spotlight-V100
@ -45,6 +47,9 @@ Thumbs.db
.env.local
.env.*.local
# E2E test config (contains production credentials)
e2e/config.yaml
# Temporary files
tmp/
temp/
@ -80,4 +85,19 @@ configs/
.claude/
.mcp.json
.cursor/
.cursor/
# Remote node credentials
scripts/remote-nodes.conf
orama-cli-linux
rnd/
keys_backup/
vps.txt
bin-linux/
website/

View File

@ -1,17 +1,21 @@
# GoReleaser Configuration for DeBros Network
# Builds and releases the dbn binary for multiple platforms
# Other binaries (node, gateway, identity) are installed via: dbn setup
# Builds and releases orama (CLI) and orama-node binaries
# Publishes to: GitHub Releases, Homebrew, and apt (.deb packages)
project_name: debros-network
env:
- GO111MODULE=on
before:
hooks:
- go mod tidy
builds:
# dbn binary - only build the CLI
- id: dbn
# orama CLI binary
- id: orama
main: ./cmd/cli
binary: dbn
binary: orama
goos:
- linux
- darwin
@ -25,18 +29,107 @@ builds:
- -X main.date={{.Date}}
mod_timestamp: "{{ .CommitTimestamp }}"
# orama-node binary (Linux only for apt)
- id: orama-node
main: ./cmd/node
binary: orama-node
goos:
- linux
goarch:
- amd64
- arm64
ldflags:
- -s -w
- -X main.version={{.Version}}
- -X main.commit={{.ShortCommit}}
- -X main.date={{.Date}}
mod_timestamp: "{{ .CommitTimestamp }}"
archives:
# Tar.gz archives for dbn
- id: binaries
# Tar.gz archives for orama CLI
- id: orama-archives
builds:
- orama
format: tar.gz
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
name_template: "orama_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
files:
- README.md
- LICENSE
- CHANGELOG.md
format_overrides:
- goos: windows
format: zip
# Tar.gz archives for orama-node
- id: orama-node-archives
builds:
- orama-node
format: tar.gz
name_template: "orama-node_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
files:
- README.md
- LICENSE
# Debian packages for apt
nfpms:
# orama CLI .deb package
- id: orama-deb
package_name: orama
builds:
- orama
vendor: DeBros
homepage: https://github.com/DeBrosOfficial/network
maintainer: DeBros <support@debros.io>
description: CLI tool for the Orama decentralized network
license: MIT
formats:
- deb
bindir: /usr/bin
section: utils
priority: optional
contents:
- src: ./README.md
dst: /usr/share/doc/orama/README.md
deb:
lintian_overrides:
- statically-linked-binary
# orama-node .deb package
- id: orama-node-deb
package_name: orama-node
builds:
- orama-node
vendor: DeBros
homepage: https://github.com/DeBrosOfficial/network
maintainer: DeBros <support@debros.io>
description: Node daemon for the Orama decentralized network
license: MIT
formats:
- deb
bindir: /usr/bin
section: net
priority: optional
contents:
- src: ./README.md
dst: /usr/share/doc/orama-node/README.md
deb:
lintian_overrides:
- statically-linked-binary
# Homebrew tap for macOS (orama CLI only)
brews:
- name: orama
ids:
- orama-archives
repository:
owner: DeBrosOfficial
name: homebrew-tap
token: "{{ .Env.HOMEBREW_TAP_TOKEN }}"
folder: Formula
homepage: https://github.com/DeBrosOfficial/network
description: CLI tool for the Orama decentralized network
license: MIT
install: |
bin.install "orama"
test: |
system "#{bin}/orama", "--version"
checksum:
name_template: "checksums.txt"
@ -64,3 +157,5 @@ release:
draft: false
prerelease: auto
name_template: "Release {{.Version}}"
extra_files:
- glob: ./dist/*.deb

130
Makefile
View File

@ -8,21 +8,89 @@ test:
# Gateway-focused E2E tests assume gateway and nodes are already running
# Auto-discovers configuration from ~/.orama and queries database for API key
# No environment variables required
.PHONY: test-e2e
.PHONY: test-e2e test-e2e-deployments test-e2e-fullstack test-e2e-https test-e2e-quick test-e2e-local test-e2e-prod test-e2e-shared test-e2e-cluster test-e2e-integration test-e2e-production
# Check if gateway is running (helper)
.PHONY: check-gateway
check-gateway:
@if ! curl -sf http://localhost:6001/v1/health > /dev/null 2>&1; then \
echo "❌ Gateway not running on localhost:6001"; \
echo ""; \
echo "To run tests locally:"; \
echo " 1. Start the dev environment: make dev"; \
echo " 2. Wait for all services to start (~30 seconds)"; \
echo " 3. Run tests: make test-e2e-local"; \
echo ""; \
echo "To run tests against production:"; \
echo " ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e"; \
exit 1; \
fi
@echo "✅ Gateway is running"
# Local E2E tests - checks gateway first
test-e2e-local: check-gateway
@echo "Running E2E tests against local dev environment..."
go test -v -tags e2e -timeout 30m ./e2e/...
# Production E2E tests - includes production-only tests
test-e2e-prod:
@if [ -z "$$ORAMA_GATEWAY_URL" ]; then \
echo "❌ ORAMA_GATEWAY_URL not set"; \
echo "Usage: ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e-prod"; \
exit 1; \
fi
@echo "Running E2E tests (including production-only) against $$ORAMA_GATEWAY_URL..."
go test -v -tags "e2e production" -timeout 30m ./e2e/...
# Generic e2e target (works with both local and production)
test-e2e:
@echo "Running comprehensive E2E tests..."
@echo "Auto-discovering configuration from ~/.orama..."
go test -v -tags e2e ./e2e
@echo "Tip: Use 'make test-e2e-local' for local or 'make test-e2e-prod' for production"
go test -v -tags e2e -timeout 30m ./e2e/...
test-e2e-deployments:
@echo "Running deployment E2E tests..."
go test -v -tags e2e -timeout 15m ./e2e/deployments/...
test-e2e-fullstack:
@echo "Running fullstack E2E tests..."
go test -v -tags e2e -timeout 20m -run "TestFullStack" ./e2e/...
test-e2e-https:
@echo "Running HTTPS/external access E2E tests..."
go test -v -tags e2e -timeout 10m -run "TestHTTPS" ./e2e/...
test-e2e-shared:
@echo "Running shared E2E tests..."
go test -v -tags e2e -timeout 10m ./e2e/shared/...
test-e2e-cluster:
@echo "Running cluster E2E tests..."
go test -v -tags e2e -timeout 15m ./e2e/cluster/...
test-e2e-integration:
@echo "Running integration E2E tests..."
go test -v -tags e2e -timeout 20m ./e2e/integration/...
test-e2e-production:
@echo "Running production-only E2E tests..."
go test -v -tags "e2e production" -timeout 15m ./e2e/production/...
test-e2e-quick:
@echo "Running quick E2E smoke tests..."
go test -v -tags e2e -timeout 5m -run "TestStatic|TestHealth" ./e2e/...
# Network - Distributed P2P Database System
# Makefile for development and build tasks
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
VERSION := 0.90.0
VERSION := 0.101.0
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
LDFLAGS_LINUX := -s -w $(LDFLAGS)
# Build targets
build: deps
@ -36,6 +104,46 @@ build: deps
go build -ldflags "$(LDFLAGS) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -o bin/gateway ./cmd/gateway
@echo "Build complete! Run ./bin/orama version"
# Cross-compile all binaries for Linux (used with --pre-built flag on VPS)
# Builds: DeBros binaries + Olric + CoreDNS (with rqlite plugin) + Caddy (with orama DNS module)
build-linux: deps
@echo "Cross-compiling all binaries for linux/amd64 (version=$(VERSION))..."
@mkdir -p bin-linux
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/identity ./cmd/identity
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama-node ./cmd/node
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama cmd/cli/main.go
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/rqlite-mcp ./cmd/rqlite-mcp
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX) -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildVersion=$(VERSION)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildCommit=$(COMMIT)' -X 'github.com/DeBrosOfficial/network/pkg/gateway.BuildTime=$(DATE)'" -trimpath -o bin-linux/gateway ./cmd/gateway
GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS_LINUX)" -trimpath -o bin-linux/orama-cli ./cmd/cli
@echo "Building Olric for linux/amd64..."
GOOS=linux GOARCH=amd64 go build -ldflags "-s -w" -trimpath -o bin-linux/olric-server github.com/olric-data/olric/cmd/olric-server
@echo "✓ All Linux binaries built in bin-linux/"
@echo ""
@echo "Next steps:"
@echo " 1. Build CoreDNS: make build-linux-coredns"
@echo " 2. Build Caddy: make build-linux-caddy"
@echo " 3. Or build all: make build-linux-all"
# Build CoreDNS with rqlite plugin for Linux
build-linux-coredns:
@bash scripts/build-linux-coredns.sh
# Build Caddy with orama DNS module for Linux
build-linux-caddy:
@bash scripts/build-linux-caddy.sh
# Build everything for Linux (all binaries + CoreDNS + Caddy)
build-linux-all: build-linux build-linux-coredns build-linux-caddy
@echo ""
@echo "✅ All Linux binaries ready in bin-linux/:"
@ls -la bin-linux/
@echo ""
@echo "Deploy to VPS:"
@echo " scp bin-linux/* ubuntu@<ip>:/home/debros/bin/"
@echo " scp bin-linux/coredns ubuntu@<ip>:/usr/local/bin/coredns"
@echo " scp bin-linux/caddy ubuntu@<ip>:/usr/bin/caddy"
@echo " sudo orama install --pre-built --no-pull ..."
# Install git hooks
install-hooks:
@echo "Installing git hooks..."
@ -93,7 +201,7 @@ help:
@echo "Available targets:"
@echo " build - Build all executables"
@echo " clean - Clean build artifacts"
@echo " test - Run tests"
@echo " test - Run unit tests"
@echo ""
@echo "Local Development (Recommended):"
@echo " make dev - Start full development stack with one command"
@ -103,6 +211,20 @@ help:
@echo " make stop - Gracefully stop all development services"
@echo " make kill - Force kill all development services (use if stop fails)"
@echo ""
@echo "E2E Testing:"
@echo " make test-e2e-local - Run E2E tests against local dev (checks gateway first)"
@echo " make test-e2e-prod - Run all E2E tests incl. production-only (needs ORAMA_GATEWAY_URL)"
@echo " make test-e2e-shared - Run shared E2E tests (cache, storage, pubsub, auth)"
@echo " make test-e2e-cluster - Run cluster E2E tests (libp2p, olric, rqlite, namespace)"
@echo " make test-e2e-integration - Run integration E2E tests (fullstack, persistence, concurrency)"
@echo " make test-e2e-deployments - Run deployment E2E tests"
@echo " make test-e2e-production - Run production-only E2E tests (DNS, HTTPS, cross-node)"
@echo " make test-e2e-quick - Quick smoke tests (static deploys, health checks)"
@echo " make test-e2e - Generic E2E tests (auto-discovers config)"
@echo ""
@echo " Example production test:"
@echo " ORAMA_GATEWAY_URL=https://dbrs.space make test-e2e-prod"
@echo ""
@echo "Development Management (via orama):"
@echo " ./bin/orama dev status - Show status of all dev services"
@echo " ./bin/orama dev logs <component> [--follow]"

226
README.md
View File

@ -9,11 +9,97 @@ A high-performance API Gateway and distributed platform built in Go. Provides a
- **🔐 Authentication** - Wallet signatures, API keys, JWT tokens
- **💾 Storage** - IPFS-based decentralized file storage with encryption
- **⚡ Cache** - Distributed cache with Olric (in-memory key-value)
- **🗄️ Database** - RQLite distributed SQL with Raft consensus
- **🗄️ Database** - RQLite distributed SQL with Raft consensus + Per-namespace SQLite databases
- **📡 Pub/Sub** - Real-time messaging via LibP2P and WebSocket
- **⚙️ Serverless** - WebAssembly function execution with host functions
- **🌐 HTTP Gateway** - Unified REST API with automatic HTTPS (Let's Encrypt)
- **📦 Client SDK** - Type-safe Go SDK for all services
- **🚀 App Deployments** - Deploy React, Next.js, Go, Node.js apps with automatic domains
- **🗄️ SQLite Databases** - Per-namespace isolated databases with IPFS backups
## Application Deployments
Deploy full-stack applications with automatic domain assignment and namespace isolation.
### Deploy a React App
```bash
# Build your app
cd my-react-app
npm run build
# Deploy to Orama Network
orama deploy static ./dist --name my-app
# Your app is now live at: https://my-app.orama.network
```
### Deploy Next.js with SSR
```bash
cd my-nextjs-app
# Ensure next.config.js has: output: 'standalone'
npm run build
orama deploy nextjs . --name my-nextjs --ssr
# Live at: https://my-nextjs.orama.network
```
### Deploy Go Backend
```bash
# Build for Linux (name binary 'app' for auto-detection)
GOOS=linux GOARCH=amd64 go build -o app main.go
# Deploy (must implement /health endpoint)
orama deploy go ./app --name my-api
# API live at: https://my-api.orama.network
```
### Create SQLite Database
```bash
# Create database
orama db create my-database
# Create schema
orama db query my-database "CREATE TABLE users (id INT, name TEXT)"
# Insert data
orama db query my-database "INSERT INTO users VALUES (1, 'Alice')"
# Query data
orama db query my-database "SELECT * FROM users"
# Backup to IPFS
orama db backup my-database
```
### Full-Stack Example
Deploy a complete app with React frontend, Go backend, and SQLite database:
```bash
# 1. Create database
orama db create myapp-db
orama db query myapp-db "CREATE TABLE users (id INT PRIMARY KEY, name TEXT)"
# 2. Deploy Go backend (connects to database)
GOOS=linux GOARCH=amd64 go build -o api main.go
orama deploy go ./api --name myapp-api
# 3. Deploy React frontend (calls backend API)
cd frontend && npm run build
orama deploy static ./dist --name myapp
# Access:
# Frontend: https://myapp.orama.network
# Backend: https://myapp-api.orama.network
```
**📖 Full Guide**: See [Deployment Guide](docs/DEPLOYMENT_GUIDE.md) for complete documentation, examples, and best practices.
## Quick Start
@ -108,36 +194,63 @@ make build
## CLI Commands
### Authentication
```bash
orama auth login # Authenticate with wallet
orama auth status # Check authentication
orama auth logout # Clear credentials
```
### Application Deployments
```bash
# Deploy applications
orama deploy static <path> --name myapp # React, Vue, static sites
orama deploy nextjs <path> --name myapp --ssr # Next.js with SSR (requires output: 'standalone')
orama deploy go <path> --name myapp # Go binaries (must have /health endpoint)
orama deploy nodejs <path> --name myapp # Node.js apps (must have /health endpoint)
# Manage deployments
orama deployments list # List all deployments
orama deployments get <name> # Get deployment details
orama deployments logs <name> --follow # View logs
orama deployments delete <name> # Delete deployment
orama deployments rollback <name> --version 1 # Rollback to version
```
### SQLite Databases
```bash
orama db create <name> # Create database
orama db query <name> "SELECT * FROM t" # Execute SQL query
orama db list # List all databases
orama db backup <name> # Backup to IPFS
orama db backups <name> # List backups
```
### Network Status
```bash
./bin/orama health # Cluster health check
./bin/orama peers # List connected peers
./bin/orama status # Network status
orama health # Cluster health check
orama peers # List connected peers
orama status # Network status
```
### Database Operations
### RQLite Operations
```bash
./bin/orama query "SELECT * FROM users"
./bin/orama query "CREATE TABLE users (id INTEGER PRIMARY KEY)"
./bin/orama transaction --file ops.json
orama query "SELECT * FROM users"
orama query "CREATE TABLE users (id INTEGER PRIMARY KEY)"
orama transaction --file ops.json
```
### Pub/Sub
```bash
./bin/orama pubsub publish <topic> <message>
./bin/orama pubsub subscribe <topic> 30s
./bin/orama pubsub topics
```
### Authentication
```bash
./bin/orama auth login
./bin/orama auth status
./bin/orama auth logout
orama pubsub publish <topic> <message>
orama pubsub subscribe <topic> 30s
orama pubsub topics
```
## Serverless Functions (WASM)
@ -211,18 +324,81 @@ curl -X DELETE http://localhost:6001/v1/functions/hello-world?namespace=default
- 5001 - RQLite HTTP API
- 6001 - Unified Gateway
- 8080 - IPFS Gateway
- 9050 - Anyone Client SOCKS5 proxy
- 9050 - Anyone SOCKS5 proxy
- 9094 - IPFS Cluster API
- 3320/3322 - Olric Cache
### Installation
**Anyone Relay Mode (optional, for earning rewards):**
- 9001 - Anyone ORPort (relay traffic, must be open externally)
### Anyone Network Integration
Orama Network integrates with the [Anyone Protocol](https://anyone.io) for anonymous routing. By default, nodes run as **clients** (consuming the network). Optionally, you can run as a **relay operator** to earn rewards.
**Client Mode (Default):**
- Routes traffic through Anyone network for anonymity
- SOCKS5 proxy on localhost:9050
- No rewards, just consumes network
**Relay Mode (Earn Rewards):**
- Provide bandwidth to the Anyone network
- Earn $ANYONE tokens as a relay operator
- Requires 100 $ANYONE tokens in your wallet
- Requires ORPort (9001) open to the internet
```bash
# Install via APT
echo "deb https://debrosficial.github.io/network/apt stable main" | sudo tee /etc/apt/sources.list.d/debros.list
# Install as relay operator (earn rewards)
sudo orama install --vps-ip <IP> --domain <domain> \
--anyone-relay \
--anyone-nickname "MyRelay" \
--anyone-contact "operator@email.com" \
--anyone-wallet "0x1234...abcd"
sudo apt update && sudo apt install orama
# With exit relay (legal implications apply)
sudo orama install --vps-ip <IP> --domain <domain> \
--anyone-relay \
--anyone-exit \
--anyone-nickname "MyExitRelay" \
--anyone-contact "operator@email.com" \
--anyone-wallet "0x1234...abcd"
# Migrate existing Anyone installation
sudo orama install --vps-ip <IP> --domain <domain> \
--anyone-relay \
--anyone-migrate \
--anyone-nickname "MyRelay" \
--anyone-contact "operator@email.com" \
--anyone-wallet "0x1234...abcd"
```
**Important:** After installation, register your relay at [dashboard.anyone.io](https://dashboard.anyone.io) to start earning rewards.
### Installation
**macOS (Homebrew):**
```bash
brew install DeBrosOfficial/tap/orama
```
**Linux (Debian/Ubuntu):**
```bash
# Download and install the latest .deb package
curl -sL https://github.com/DeBrosOfficial/network/releases/latest/download/orama_$(curl -s https://api.github.com/repos/DeBrosOfficial/network/releases/latest | grep tag_name | cut -d '"' -f 4 | tr -d 'v')_linux_amd64.deb -o orama.deb
sudo dpkg -i orama.deb
```
**From Source:**
```bash
go install github.com/DeBrosOfficial/network/cmd/cli@latest
```
**Setup (after installation):**
```bash
sudo orama install --interactive
```
@ -331,10 +507,12 @@ See `openapi/gateway.yaml` for complete API specification.
## Documentation
- **[Deployment Guide](docs/DEPLOYMENT_GUIDE.md)** - Deploy React, Next.js, Go apps and manage databases
- **[Architecture Guide](docs/ARCHITECTURE.md)** - System architecture and design patterns
- **[Client SDK](docs/CLIENT_SDK.md)** - Go SDK documentation and examples
- **[Gateway API](docs/GATEWAY_API.md)** - Complete HTTP API reference
- **[Security Deployment](docs/SECURITY_DEPLOYMENT_GUIDE.md)** - Production security hardening
- **[Testing Plan](docs/TESTING_PLAN.md)** - Comprehensive testing strategy and implementation
## Resources

View File

@ -53,6 +53,8 @@ func main() {
cli.HandleProdCommand(args)
// Direct production commands (new simplified interface)
case "invite":
cli.HandleProdCommand(append([]string{"invite"}, args...))
case "install":
cli.HandleProdCommand(append([]string{"install"}, args...))
case "upgrade":
@ -76,6 +78,24 @@ func main() {
case "auth":
cli.HandleAuthCommand(args)
// Deployment commands
case "deploy":
cli.HandleDeployCommand(args)
case "deployments":
cli.HandleDeploymentsCommand(args)
// Database commands
case "db":
cli.HandleDBCommand(args)
// Namespace management
case "namespace":
cli.HandleNamespaceCommand(args)
// Environment management
case "env":
cli.HandleEnvCommand(args)
// Help
case "help", "--help", "-h":
showHelp()
@ -132,19 +152,59 @@ func showHelp() {
fmt.Printf(" auth status - Show detailed auth info\n")
fmt.Printf(" auth help - Show auth command help\n\n")
fmt.Printf("📦 Deployments:\n")
fmt.Printf(" deploy static <path> - Deploy a static site (React, Vue, etc.)\n")
fmt.Printf(" deploy nextjs <path> - Deploy a Next.js application\n")
fmt.Printf(" deploy go <path> - Deploy a Go backend\n")
fmt.Printf(" deploy nodejs <path> - Deploy a Node.js backend\n")
fmt.Printf(" deployments list - List all deployments\n")
fmt.Printf(" deployments get <name> - Get deployment details\n")
fmt.Printf(" deployments logs <name> - View deployment logs\n")
fmt.Printf(" deployments delete <name> - Delete a deployment\n")
fmt.Printf(" deployments rollback <name> - Rollback to previous version\n\n")
fmt.Printf("🗄️ Databases:\n")
fmt.Printf(" db create <name> - Create a SQLite database\n")
fmt.Printf(" db query <name> \"<sql>\" - Execute SQL query\n")
fmt.Printf(" db list - List all databases\n")
fmt.Printf(" db backup <name> - Backup database to IPFS\n")
fmt.Printf(" db backups <name> - List database backups\n\n")
fmt.Printf("🏢 Namespaces:\n")
fmt.Printf(" namespace delete - Delete current namespace and all resources\n\n")
fmt.Printf("🌍 Environments:\n")
fmt.Printf(" env list - List all environments\n")
fmt.Printf(" env current - Show current environment\n")
fmt.Printf(" env switch <name> - Switch to environment\n\n")
fmt.Printf("Global Flags:\n")
fmt.Printf(" -f, --format <format> - Output format: table, json (default: table)\n")
fmt.Printf(" -t, --timeout <duration> - Operation timeout (default: 30s)\n")
fmt.Printf(" --help, -h - Show this help message\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" # Deploy a React app\n")
fmt.Printf(" cd my-react-app && npm run build\n")
fmt.Printf(" orama deploy static ./dist --name my-app\n\n")
fmt.Printf(" # Deploy a Next.js app with SSR\n")
fmt.Printf(" cd my-nextjs-app && npm run build\n")
fmt.Printf(" orama deploy nextjs . --name my-nextjs --ssr\n\n")
fmt.Printf(" # Create and use a database\n")
fmt.Printf(" orama db create my-db\n")
fmt.Printf(" orama db query my-db \"CREATE TABLE users (id INT, name TEXT)\"\n")
fmt.Printf(" orama db query my-db \"INSERT INTO users VALUES (1, 'Alice')\"\n\n")
fmt.Printf(" # Manage deployments\n")
fmt.Printf(" orama deployments list\n")
fmt.Printf(" orama deployments get my-app\n")
fmt.Printf(" orama deployments logs my-app --follow\n\n")
fmt.Printf(" # First node (creates new cluster)\n")
fmt.Printf(" sudo orama install --vps-ip 203.0.113.1 --domain node-1.orama.network\n\n")
fmt.Printf(" # Join existing cluster\n")
fmt.Printf(" sudo orama install --vps-ip 203.0.113.2 --domain node-2.orama.network \\\n")
fmt.Printf(" --peers /ip4/203.0.113.1/tcp/4001/p2p/12D3KooW... --cluster-secret <hex>\n\n")
fmt.Printf(" # Service management\n")
fmt.Printf(" orama status\n")
fmt.Printf(" orama logs node --follow\n")

View File

@ -77,6 +77,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
ListenAddr string `yaml:"listen_addr"`
ClientNamespace string `yaml:"client_namespace"`
RQLiteDSN string `yaml:"rqlite_dsn"`
GlobalRQLiteDSN string `yaml:"global_rqlite_dsn"`
Peers []string `yaml:"bootstrap_peers"`
EnableHTTPS bool `yaml:"enable_https"`
DomainName string `yaml:"domain_name"`
@ -95,7 +96,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
zap.String("path", configPath),
zap.Error(err))
fmt.Fprintf(os.Stderr, "\nConfig file not found at %s\n", configPath)
fmt.Fprintf(os.Stderr, "Generate it using: dbn config init --type gateway\n")
fmt.Fprintf(os.Stderr, "Generate it using: orama config init --type gateway\n")
os.Exit(1)
}
@ -113,6 +114,7 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
ClientNamespace: "default",
BootstrapPeers: nil,
RQLiteDSN: "",
GlobalRQLiteDSN: "",
EnableHTTPS: false,
DomainName: "",
TLSCacheDir: "",
@ -133,6 +135,9 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
if v := strings.TrimSpace(y.RQLiteDSN); v != "" {
cfg.RQLiteDSN = v
}
if v := strings.TrimSpace(y.GlobalRQLiteDSN); v != "" {
cfg.GlobalRQLiteDSN = v
}
if len(y.Peers) > 0 {
var peers []string
for _, p := range y.Peers {

View File

@ -60,6 +60,12 @@ type MCPServer struct {
}
func NewMCPServer(rqliteURL string) (*MCPServer, error) {
// Disable gorqlite cluster discovery to avoid /nodes timeouts from unreachable peers
if strings.Contains(rqliteURL, "?") {
rqliteURL += "&disableClusterDiscovery=true"
} else {
rqliteURL += "?disableClusterDiscovery=true"
}
conn, err := gorqlite.Open(rqliteURL)
if err != nil {
return nil, err

View File

@ -52,6 +52,13 @@ The system follows a clean, layered architecture with clear separation of concer
│ │ │ │
│ Port 9094 │ │ In-Process │
└─────────────────┘ └──────────────┘
┌─────────────────┐
│ Anyone │
│ (Anonymity) │
│ │
│ Port 9050 │
└─────────────────┘
```
## Core Components
@ -226,7 +233,38 @@ pkg/config/
└── gateway.go
```
### 6. Shared Utilities
### 6. Anyone Integration (`pkg/anyoneproxy/`)
Integration with the Anyone Protocol for anonymous routing.
**Modes:**
| Mode | Purpose | Port | Rewards |
|------|---------|------|---------|
| Client | Route traffic anonymously | 9050 (SOCKS5) | No |
| Relay | Provide bandwidth to network | 9001 (ORPort) + 9050 | Yes ($ANYONE) |
**Key Files:**
- `pkg/anyoneproxy/socks.go` - SOCKS5 proxy client interface
- `pkg/gateway/anon_proxy_handler.go` - Anonymous proxy API endpoint
- `pkg/environments/production/installers/anyone_relay.go` - Relay installation
**Features:**
- Smart routing (bypasses proxy for local/private addresses)
- Automatic detection of existing Anyone installations
- Migration support for existing relay operators
- Exit relay mode with legal warnings
**API Endpoint:**
- `POST /v1/proxy/anon` - Route HTTP requests through Anyone network
**Relay Requirements:**
- Linux OS (Debian/Ubuntu)
- 100 $ANYONE tokens in wallet
- ORPort accessible from internet
- Registration at dashboard.anyone.io
### 7. Shared Utilities
**HTTP Utilities (`pkg/httputil/`):**
- Request parsing and validation
@ -315,12 +353,22 @@ Function Invocation:
- Refresh token support
- Claims-based authorization
### Network Security (WireGuard Mesh)
All inter-node communication is encrypted via a WireGuard VPN mesh:
- **WireGuard IPs:** Each node gets a private IP (10.0.0.x) used for all cluster traffic
- **UFW Firewall:** Only public ports are exposed: 22 (SSH), 53 (DNS, nameservers only), 80/443 (HTTP/HTTPS), 51820 (WireGuard UDP)
- **Internal services** (RQLite 5001/7001, IPFS 4001/4501, Olric 3320/3322, Gateway 6001) are only accessible via WireGuard or localhost
- **Invite tokens:** Single-use, time-limited tokens for secure node joining. No shared secrets on the CLI
- **Join flow:** New nodes authenticate via HTTPS (443), establish WireGuard tunnel, then join all services over the encrypted mesh
### TLS/HTTPS
- Automatic ACME (Let's Encrypt) certificate management
- Automatic ACME (Let's Encrypt) certificate management via Caddy
- TLS 1.3 support
- HTTP/2 enabled
- Certificate caching
- On-demand TLS for deployment custom domains
### Middleware Stack
@ -403,17 +451,26 @@ make test-e2e # Run E2E tests
### Production
```bash
# First node (creates cluster)
sudo orama install --vps-ip <IP> --domain node1.example.com
# First node (genesis — creates cluster)
# Nameserver nodes use the base domain as --domain
sudo orama install --vps-ip <IP> --domain example.com --base-domain example.com --nameserver
# Additional nodes (join cluster)
sudo orama install --vps-ip <IP> --domain node2.example.com \
--peers /dns4/node1.example.com/tcp/4001/p2p/<PEER_ID> \
--join <node1-ip>:7002 \
--cluster-secret <secret> \
--swarm-key <key>
# On the genesis node, generate an invite for a new node
orama invite
# Outputs: sudo orama install --join https://example.com --token <TOKEN> --vps-ip <NEW_IP>
# Additional nameserver nodes (join via invite token over HTTPS)
sudo orama install --join https://example.com --token <TOKEN> \
--vps-ip <IP> --domain example.com --base-domain example.com --nameserver
```
**Security:** Nodes join via single-use invite tokens over HTTPS. A WireGuard VPN tunnel
is established before any cluster services start. All inter-node traffic (RQLite, IPFS,
Olric, LibP2P) flows over the encrypted WireGuard mesh — no cluster ports are exposed
publicly. **Never use `http://<ip>:6001`** for joining — port 6001 is internal-only and
blocked by UFW. Use the domain (`https://node1.example.com`) or, if DNS is not yet
configured, use the IP over HTTP port 80 (`http://<ip>`) which goes through Caddy.
### Docker (Future)
Planned containerization with Docker Compose and Kubernetes support.

142
docs/CLEAN_NODE.md Normal file
View File

@ -0,0 +1,142 @@
# Clean Node — Full Reset Guide
How to completely remove all Orama Network state from a VPS so it can be reinstalled fresh.
## Quick Clean (Copy-Paste)
Run this as root or with sudo on the target VPS:
```bash
# 1. Stop and disable all services
sudo systemctl stop debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null
sudo systemctl disable debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null
# 2. Remove systemd service files
sudo rm -f /etc/systemd/system/debros-*.service
sudo rm -f /etc/systemd/system/coredns.service
sudo rm -f /etc/systemd/system/caddy.service
sudo systemctl daemon-reload
# 3. Tear down WireGuard
# Must stop the systemd unit first — wg-quick@wg0 is a oneshot with
# RemainAfterExit=yes, so it stays "active (exited)" even after the
# interface is removed. Without "stop", a future "systemctl start" is a no-op.
sudo systemctl stop wg-quick@wg0 2>/dev/null
sudo wg-quick down wg0 2>/dev/null
sudo systemctl disable wg-quick@wg0 2>/dev/null
sudo rm -f /etc/wireguard/wg0.conf
# 4. Reset UFW firewall
sudo ufw --force reset
sudo ufw allow 22/tcp
sudo ufw --force enable
# 5. Remove debros user and home directory
sudo userdel -r debros 2>/dev/null
sudo rm -rf /home/debros
# 6. Remove sudoers files
sudo rm -f /etc/sudoers.d/debros-access
sudo rm -f /etc/sudoers.d/debros-deployments
sudo rm -f /etc/sudoers.d/debros-wireguard
# 7. Remove CoreDNS config
sudo rm -rf /etc/coredns
# 8. Remove Caddy config and data
sudo rm -rf /etc/caddy
sudo rm -rf /var/lib/caddy
# 9. Remove deployment systemd services (dynamic)
sudo rm -f /etc/systemd/system/orama-deploy-*.service
sudo systemctl daemon-reload
# 10. Clean temp files
sudo rm -f /tmp/orama /tmp/network-source.tar.gz /tmp/network-source.zip
sudo rm -rf /tmp/network-extract /tmp/coredns-build /tmp/caddy-build
echo "Node cleaned. Ready for fresh install."
```
## What This Removes
| Category | Paths |
|----------|-------|
| **User** | `debros` system user and `/home/debros/` |
| **App data** | `/home/debros/.orama/` (configs, secrets, logs, IPFS, RQLite, Olric) |
| **Source code** | `/home/debros/src/` |
| **Binaries** | `/home/debros/bin/orama-node`, `/home/debros/bin/gateway` |
| **Systemd** | `debros-*.service`, `coredns.service`, `caddy.service`, `orama-deploy-*.service` |
| **WireGuard** | `/etc/wireguard/wg0.conf`, `wg-quick@wg0` systemd unit |
| **Firewall** | All UFW rules (reset to default + SSH only) |
| **Sudoers** | `/etc/sudoers.d/debros-*` |
| **CoreDNS** | `/etc/coredns/Corefile` |
| **Caddy** | `/etc/caddy/Caddyfile`, `/var/lib/caddy/` (TLS certs) |
| **Anyone Relay** | `debros-anyone-relay.service`, `debros-anyone-client.service` |
| **Temp files** | `/tmp/orama`, `/tmp/network-source.*`, build dirs |
## What This Does NOT Remove
These are shared system tools that may be used by other software. Remove manually if desired:
| Binary | Path | Remove Command |
|--------|------|----------------|
| RQLite | `/usr/local/bin/rqlited` | `sudo rm /usr/local/bin/rqlited` |
| IPFS | `/usr/local/bin/ipfs` | `sudo rm /usr/local/bin/ipfs` |
| IPFS Cluster | `/usr/local/bin/ipfs-cluster-service` | `sudo rm /usr/local/bin/ipfs-cluster-service` |
| Olric | `/usr/local/bin/olric-server` | `sudo rm /usr/local/bin/olric-server` |
| CoreDNS | `/usr/local/bin/coredns` | `sudo rm /usr/local/bin/coredns` |
| Caddy | `/usr/bin/caddy` | `sudo rm /usr/bin/caddy` |
| xcaddy | `/usr/local/bin/xcaddy` | `sudo rm /usr/local/bin/xcaddy` |
| Go | `/usr/local/go/` | `sudo rm -rf /usr/local/go` |
| Orama CLI | `/usr/local/bin/orama` | `sudo rm /usr/local/bin/orama` |
## Nuclear Clean (Remove Everything Including Binaries)
```bash
# Run quick clean above first, then:
sudo rm -f /usr/local/bin/rqlited
sudo rm -f /usr/local/bin/ipfs
sudo rm -f /usr/local/bin/ipfs-cluster-service
sudo rm -f /usr/local/bin/olric-server
sudo rm -f /usr/local/bin/coredns
sudo rm -f /usr/local/bin/xcaddy
sudo rm -f /usr/bin/caddy
sudo rm -f /usr/local/bin/orama
```
## Multi-Node Clean
To clean all nodes at once from your local machine:
```bash
# Define your nodes
NODES=(
"ubuntu@141.227.165.168:password1"
"ubuntu@141.227.165.154:password2"
"ubuntu@141.227.156.51:password3"
)
for entry in "${NODES[@]}"; do
IFS=: read -r userhost pass <<< "$entry"
echo "Cleaning $userhost..."
sshpass -p "$pass" ssh -o StrictHostKeyChecking=no "$userhost" 'bash -s' << 'CLEAN'
sudo systemctl stop debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null
sudo systemctl disable debros-node debros-ipfs debros-ipfs-cluster debros-olric debros-anyone-relay debros-anyone-client coredns caddy 2>/dev/null
sudo rm -f /etc/systemd/system/debros-*.service /etc/systemd/system/coredns.service /etc/systemd/system/caddy.service /etc/systemd/system/orama-deploy-*.service
sudo systemctl daemon-reload
sudo systemctl stop wg-quick@wg0 2>/dev/null
sudo wg-quick down wg0 2>/dev/null
sudo systemctl disable wg-quick@wg0 2>/dev/null
sudo rm -f /etc/wireguard/wg0.conf
sudo ufw --force reset && sudo ufw allow 22/tcp && sudo ufw --force enable
sudo userdel -r debros 2>/dev/null
sudo rm -rf /home/debros
sudo rm -f /etc/sudoers.d/debros-access /etc/sudoers.d/debros-deployments /etc/sudoers.d/debros-wireguard
sudo rm -rf /etc/coredns /etc/caddy /var/lib/caddy
sudo rm -f /tmp/orama /tmp/network-source.tar.gz
sudo rm -rf /tmp/network-extract /tmp/coredns-build /tmp/caddy-build
echo "Done"
CLEAN
done
```

990
docs/DEPLOYMENT_GUIDE.md Normal file
View File

@ -0,0 +1,990 @@
# Orama Network Deployment Guide
Complete guide for deploying applications and managing databases on Orama Network.
## Table of Contents
- [Overview](#overview)
- [Authentication](#authentication)
- [Deploying Static Sites (React, Vue, etc.)](#deploying-static-sites)
- [Deploying Next.js Applications](#deploying-nextjs-applications)
- [Deploying Go Backends](#deploying-go-backends)
- [Deploying Node.js Backends](#deploying-nodejs-backends)
- [Managing SQLite Databases](#managing-sqlite-databases)
- [How Domains Work](#how-domains-work)
- [Full-Stack Application Example](#full-stack-application-example)
- [Managing Deployments](#managing-deployments)
- [Troubleshooting](#troubleshooting)
---
## Overview
Orama Network provides a decentralized platform for deploying web applications and managing databases. Each deployment:
- **Gets a unique domain** automatically (e.g., `myapp.orama.network`)
- **Isolated per namespace** - your data and apps are completely separate from others
- **Served from IPFS** (static) or **runs as a process** (dynamic apps)
- **Fully managed** - automatic health checks, restarts, and logging
### Supported Deployment Types
| Type | Description | Use Case | Domain Example |
|------|-------------|----------|----------------|
| **Static** | HTML/CSS/JS files served from IPFS | React, Vue, Angular, plain HTML | `myapp.orama.network` |
| **Next.js** | Next.js with SSR support | Full-stack Next.js apps | `myapp.orama.network` |
| **Go** | Compiled Go binaries | REST APIs, microservices | `api.orama.network` |
| **Node.js** | Node.js applications | Express APIs, TypeScript backends | `backend.orama.network` |
---
## Authentication
Before deploying, authenticate with your wallet:
```bash
# Authenticate
orama auth login
# Check authentication status
orama auth whoami
```
Your API key is stored securely and used for all deployment operations.
---
## Deploying Static Sites
Deploy static sites built with React, Vue, Angular, or any static site generator.
### React/Vite Example
```bash
# 1. Build your React app
cd my-react-app
npm run build
# 2. Deploy the build directory
orama deploy static ./dist --name my-react-app --domain repoanalyzer.ai
# Output:
# 📦 Creating tarball from ./dist...
# ☁️ Uploading to Orama Network...
#
# ✅ Deployment successful!
#
# Name: my-react-app
# Type: static
# Status: active
# Version: 1
# Content CID: QmXxxx...
#
# URLs:
# • https://my-react-app.orama.network
```
### What Happens Behind the Scenes
1. **Tarball Creation**: CLI automatically creates a `.tar.gz` from your directory
2. **IPFS Upload**: Files are uploaded to IPFS and pinned across the network
3. **DNS Record**: A DNS record is created pointing `my-react-app.orama.network` to the gateway
4. **Instant Serving**: Your app is immediately accessible via the URL
### Features
- ✅ **SPA Routing**: Unknown routes automatically serve `/index.html` (perfect for React Router)
- ✅ **Correct Content-Types**: Automatically detects and serves `.html`, `.css`, `.js`, `.json`, `.png`, etc.
- ✅ **Caching**: `Cache-Control: public, max-age=3600` headers for optimal performance
- ✅ **Zero Downtime Updates**: Use `--update` flag to update without downtime
### Updating a Deployment
```bash
# Make changes to your app
# Rebuild
npm run build
# Update deployment
orama deploy static ./dist --name my-react-app --update
# Version increments automatically (1 → 2)
```
---
## Deploying Next.js Applications
Deploy Next.js apps with full SSR (Server-Side Rendering) support.
### Prerequisites
> ⚠️ **IMPORTANT**: Your `next.config.js` MUST have `output: 'standalone'` for SSR deployments.
```js
// next.config.js
/** @type {import('next').NextConfig} */
const nextConfig = {
output: 'standalone', // REQUIRED for SSR deployments
}
module.exports = nextConfig
```
This setting makes Next.js create a standalone build in `.next/standalone/` that can run without `node_modules`.
### Next.js with SSR
```bash
# 1. Ensure next.config.js has output: 'standalone'
# 2. Build your Next.js app
cd my-nextjs-app
npm run build
# 3. Create tarball (must include .next and public directories)
tar -czvf nextjs.tar.gz .next public package.json next.config.js
# 4. Deploy with SSR enabled
orama deploy nextjs ./nextjs.tar.gz --name my-nextjs --ssr
# Output:
# 📦 Creating tarball from .
# ☁️ Uploading to Orama Network...
#
# ✅ Deployment successful!
#
# Name: my-nextjs
# Type: nextjs
# Status: active
# Version: 1
# Port: 10100
#
# URLs:
# • https://my-nextjs.orama.network
#
# ⚠️ Note: SSR deployment may take a minute to start. Check status with: orama deployments get my-nextjs
```
### What Happens Behind the Scenes
1. **Tarball Upload**: Your `.next` build directory, `package.json`, and `public` are uploaded
2. **Home Node Assignment**: A node is chosen to host your app based on capacity
3. **Port Allocation**: A unique port (10100-19999) is assigned
4. **Systemd Service**: A systemd service is created to run `node server.js`
5. **Health Checks**: Gateway monitors your app every 30 seconds
6. **Reverse Proxy**: Gateway proxies requests from your domain to the local port
### Static Next.js Export (No SSR)
If you export Next.js to static HTML:
```bash
# next.config.js
module.exports = {
output: 'export'
}
# Build and deploy as static
npm run build
orama deploy static ./out --name my-nextjs-static
```
---
## Deploying Go Backends
Deploy compiled Go binaries for high-performance APIs.
### Prerequisites
> ⚠️ **IMPORTANT**: Your Go application MUST:
> 1. Be compiled for Linux: `GOOS=linux GOARCH=amd64`
> 2. Listen on the port from `PORT` environment variable
> 3. Implement a `/health` endpoint that returns HTTP 200 when ready
### Go REST API Example
```bash
# 1. Build your Go binary for Linux (if on Mac/Windows)
cd my-go-api
GOOS=linux GOARCH=amd64 go build -o app main.go # Name it 'app' for auto-detection
# 2. Create tarball
tar -czvf api.tar.gz app
# 3. Deploy the binary
orama deploy go ./api.tar.gz --name my-api
# Output:
# 📦 Creating tarball from ./api...
# ☁️ Uploading to Orama Network...
#
# ✅ Deployment successful!
#
# Name: my-api
# Type: go
# Status: active
# Version: 1
# Port: 10101
#
# URLs:
# • https://my-api.orama.network
```
### Example Go API Code
```go
// main.go
package main
import (
"encoding/json"
"log"
"net/http"
"os"
)
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(map[string]string{"status": "healthy"})
})
http.HandleFunc("/api/users", func(w http.ResponseWriter, r *http.Request) {
users := []map[string]interface{}{
{"id": 1, "name": "Alice"},
{"id": 2, "name": "Bob"},
}
json.NewEncoder(w).Encode(users)
})
log.Printf("Starting server on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
```
### Important Notes
- **Environment Variables**: The `PORT` environment variable is automatically set to your allocated port
- **Health Endpoint**: **REQUIRED** - Must implement `/health` that returns HTTP 200 when ready
- **Binary Requirements**: Must be Linux amd64 (`GOOS=linux GOARCH=amd64`)
- **Binary Naming**: Name your binary `app` for automatic detection, or any ELF executable will work
- **Systemd Managed**: Runs as a systemd service with auto-restart on failure
- **Port Range**: Allocated ports are in the range 10100-19999
---
## Deploying Node.js Backends
Deploy Node.js/Express/TypeScript backends.
### Prerequisites
> ⚠️ **IMPORTANT**: Your Node.js application MUST:
> 1. Listen on the port from `PORT` environment variable
> 2. Implement a `/health` endpoint that returns HTTP 200 when ready
> 3. Have a valid `package.json` with either:
> - A `start` script (runs via `npm start`), OR
> - A `main` field pointing to entry file (runs via `node {main}`), OR
> - An `index.js` file (default fallback)
### Express API Example
```bash
# 1. Build your Node.js app (if using TypeScript)
cd my-node-api
npm run build
# 2. Create tarball (include package.json, your code, and optionally node_modules)
tar -czvf api.tar.gz dist package.json package-lock.json
# 3. Deploy
orama deploy nodejs ./api.tar.gz --name my-node-api
# Output:
# 📦 Creating tarball from ./dist...
# ☁️ Uploading to Orama Network...
#
# ✅ Deployment successful!
#
# Name: my-node-api
# Type: nodejs
# Status: active
# Version: 1
# Port: 10102
#
# URLs:
# • https://my-node-api.orama.network
```
### Example Node.js API
```javascript
// server.js
const express = require('express');
const app = express();
const port = process.env.PORT || 8080;
app.get('/health', (req, res) => {
res.json({ status: 'healthy' });
});
app.get('/api/data', (req, res) => {
res.json({ message: 'Hello from Orama Network!' });
});
app.listen(port, () => {
console.log(`Server running on port ${port}`);
});
```
### Important Notes
- **Environment Variables**: The `PORT` environment variable is automatically set to your allocated port
- **Health Endpoint**: **REQUIRED** - Must implement `/health` that returns HTTP 200 when ready
- **Dependencies**: If `node_modules` is not included, `npm install --production` runs automatically
- **Start Command Detection**:
1. If `package.json` has `scripts.start` → runs `npm start`
2. Else if `package.json` has `main` field → runs `node {main}`
3. Else → runs `node index.js`
- **Systemd Managed**: Runs as a systemd service with auto-restart on failure
---
## Managing SQLite Databases
Each namespace gets its own isolated SQLite databases.
### Creating a Database
```bash
# Create a new database
orama db create my-database
# Output:
# ✅ Database created: my-database
# Home Node: node-abc123
# File Path: /home/debros/.orama/data/sqlite/your-namespace/my-database.db
```
### Executing Queries
```bash
# Create a table
orama db query my-database "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)"
# Insert data
orama db query my-database "INSERT INTO users (name, email) VALUES ('Alice', 'alice@example.com')"
# Query data
orama db query my-database "SELECT * FROM users"
# Output:
# 📊 Query Result
# Rows: 1
#
# id | name | email
# ----------------+-----------------+-------------------------
# 1 | Alice | alice@example.com
```
### Listing Databases
```bash
orama db list
# Output:
# NAME SIZE HOME NODE CREATED
# my-database 12.3 KB node-abc123 2024-01-22 10:30
# prod-database 1.2 MB node-abc123 2024-01-20 09:15
#
# Total: 2
```
### Backing Up to IPFS
```bash
# Create a backup
orama db backup my-database
# Output:
# ✅ Backup created
# CID: QmYxxx...
# Size: 12.3 KB
# List backups
orama db backups my-database
# Output:
# VERSION CID SIZE DATE
# 1 QmYxxx... 12.3 KB 2024-01-22 10:45
# 2 QmZxxx... 15.1 KB 2024-01-22 14:20
```
### Database Features
- ✅ **WAL Mode**: Write-Ahead Logging for better concurrency
- ✅ **Namespace Isolation**: Complete separation between namespaces
- ✅ **Automatic Backups**: Scheduled backups to IPFS every 6 hours
- ✅ **ACID Transactions**: Full SQLite transactional support
- ✅ **Concurrent Reads**: Multiple readers can query simultaneously
---
## How Domains Work
### Domain Assignment
When you deploy an application, it automatically gets a domain:
```
Format: {deployment-name}.orama.network
Example: my-react-app.orama.network
```
### Node-Specific Domains (Optional)
For direct access to a specific node:
```
Format: {deployment-name}.node-{shortID}.orama.network
Example: my-react-app.node-LL1Qvu.orama.network
```
The `shortID` is derived from the node's peer ID (characters 9-14 of the full peer ID).
For example: `12D3KooWLL1QvumH...``LL1Qvu`
### DNS Resolution Flow
1. **Client**: Browser requests `my-react-app.orama.network`
2. **DNS**: CoreDNS server queries RQLite for DNS record
3. **Record**: Returns IP address of a gateway node (round-robin across all nodes)
4. **Gateway**: Receives request with `Host: my-react-app.orama.network` header
5. **Routing**: Domain routing middleware looks up deployment by domain
6. **Cross-Node Proxy**: If deployment is on a different node, request is forwarded
7. **Response**:
- **Static**: Serves content from IPFS
- **Dynamic**: Reverse proxies to the app's local port
### Cross-Node Routing
DNS uses round-robin, so requests may hit any node in the cluster. If a deployment is hosted on a different node than the one receiving the request, the gateway automatically proxies the request to the correct home node.
```
┌─────────────────────────────────────────────────────────────────┐
│ Request Flow Example │
├─────────────────────────────────────────────────────────────────┤
│ │
│ Client │
│ │ │
│ ▼ │
│ DNS (round-robin) ───► Node-2 (141.227.165.154) │
│ │ │
│ ▼ │
│ Check: Is deployment here? │
│ │ │
│ No ─────┴───► Cross-node proxy │
│ │ │
│ ▼ │
│ Node-1 (141.227.165.168) │
│ (Home node for deployment) │
│ │ │
│ ▼ │
│ localhost:10100 │
│ (Deployment process) │
│ │
└─────────────────────────────────────────────────────────────────┘
```
This is **transparent to users** - your app works regardless of which node handles the initial request.
### Custom Domains (Future Feature)
Support for custom domains (e.g., `www.myapp.com`) with TXT record verification.
---
## Full-Stack Application Example
Deploy a complete full-stack application with React frontend, Go backend, and SQLite database.
### Architecture
```
┌─────────────────────────────────────────────┐
│ React Frontend (Static) │
│ Domain: myapp.orama.network │
│ Deployed to IPFS │
└─────────────────┬───────────────────────────┘
│ API Calls
┌─────────────────────────────────────────────┐
│ Go Backend (Dynamic) │
│ Domain: myapp-api.orama.network │
│ Port: 10100 │
│ Systemd Service │
└─────────────────┬───────────────────────────┘
│ SQL Queries
┌─────────────────────────────────────────────┐
│ SQLite Database │
│ Name: myapp-db │
│ File: ~/.orama/data/sqlite/ns/myapp-db.db│
└─────────────────────────────────────────────┘
```
### Step 1: Create the Database
```bash
# Create database
orama db create myapp-db
# Create schema
orama db query myapp-db "CREATE TABLE users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
email TEXT UNIQUE NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)"
# Insert test data
orama db query myapp-db "INSERT INTO users (name, email) VALUES ('Alice', 'alice@example.com')"
```
### Step 2: Deploy Go Backend
**Backend Code** (`main.go`):
```go
package main
import (
"database/sql"
"encoding/json"
"log"
"net/http"
"os"
_ "github.com/mattn/go-sqlite3"
)
type User struct {
ID int `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
CreatedAt string `json:"created_at"`
}
var db *sql.DB
func main() {
// DATABASE_NAME env var is automatically set by Orama
dbPath := os.Getenv("DATABASE_PATH")
if dbPath == "" {
dbPath = "/home/debros/.orama/data/sqlite/" + os.Getenv("NAMESPACE") + "/myapp-db.db"
}
var err error
db, err = sql.Open("sqlite3", dbPath)
if err != nil {
log.Fatal(err)
}
defer db.Close()
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
// CORS middleware
http.HandleFunc("/", corsMiddleware(routes))
log.Printf("Starting server on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
func routes(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/health":
json.NewEncoder(w).Encode(map[string]string{"status": "healthy"})
case "/api/users":
if r.Method == "GET" {
getUsers(w, r)
} else if r.Method == "POST" {
createUser(w, r)
}
default:
http.NotFound(w, r)
}
}
func getUsers(w http.ResponseWriter, r *http.Request) {
rows, err := db.Query("SELECT id, name, email, created_at FROM users ORDER BY id")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer rows.Close()
var users []User
for rows.Next() {
var u User
rows.Scan(&u.ID, &u.Name, &u.Email, &u.CreatedAt)
users = append(users, u)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(users)
}
func createUser(w http.ResponseWriter, r *http.Request) {
var u User
if err := json.NewDecoder(r.Body).Decode(&u); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
result, err := db.Exec("INSERT INTO users (name, email) VALUES (?, ?)", u.Name, u.Email)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
id, _ := result.LastInsertId()
u.ID = int(id)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
json.NewEncoder(w).Encode(u)
}
func corsMiddleware(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
next(w, r)
}
}
```
**Deploy Backend**:
```bash
# Build for Linux
GOOS=linux GOARCH=amd64 go build -o api main.go
# Deploy
orama deploy go ./api --name myapp-api
```
### Step 3: Deploy React Frontend
**Frontend Code** (`src/App.jsx`):
```jsx
import { useEffect, useState } from 'react';
function App() {
const [users, setUsers] = useState([]);
const [name, setName] = useState('');
const [email, setEmail] = useState('');
const API_URL = 'https://myapp-api.orama.network';
useEffect(() => {
fetchUsers();
}, []);
const fetchUsers = async () => {
const response = await fetch(`${API_URL}/api/users`);
const data = await response.json();
setUsers(data);
};
const addUser = async (e) => {
e.preventDefault();
await fetch(`${API_URL}/api/users`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name, email }),
});
setName('');
setEmail('');
fetchUsers();
};
return (
<div>
<h1>Orama Network Full-Stack App</h1>
<h2>Add User</h2>
<form onSubmit={addUser}>
<input
value={name}
onChange={(e) => setName(e.target.value)}
placeholder="Name"
required
/>
<input
value={email}
onChange={(e) => setEmail(e.target.value)}
placeholder="Email"
type="email"
required
/>
<button type="submit">Add User</button>
</form>
<h2>Users</h2>
<ul>
{users.map((user) => (
<li key={user.id}>
{user.name} - {user.email}
</li>
))}
</ul>
</div>
);
}
export default App;
```
**Deploy Frontend**:
```bash
# Build
npm run build
# Deploy
orama deploy static ./dist --name myapp
```
### Step 4: Access Your App
Open your browser to:
- **Frontend**: `https://myapp.orama.network`
- **Backend API**: `https://myapp-api.orama.network/api/users`
### Full-Stack Summary
**Frontend**: React app served from IPFS
**Backend**: Go API running on allocated port
**Database**: SQLite database with ACID transactions
**Domains**: Automatic DNS for both services
**Isolated**: All resources namespaced and secure
---
## Managing Deployments
### List All Deployments
```bash
orama deployments list
# Output:
# NAME TYPE STATUS VERSION CREATED
# my-react-app static active 1 2024-01-22 10:30
# myapp-api go active 1 2024-01-22 10:45
# my-nextjs nextjs active 2 2024-01-22 11:00
#
# Total: 3
```
### Get Deployment Details
```bash
orama deployments get my-react-app
# Output:
# Deployment: my-react-app
#
# ID: dep-abc123
# Type: static
# Status: active
# Version: 1
# Namespace: your-namespace
# Content CID: QmXxxx...
# Memory Limit: 256 MB
# CPU Limit: 50%
# Restart Policy: always
#
# URLs:
# • https://my-react-app.orama.network
#
# Created: 2024-01-22T10:30:00Z
# Updated: 2024-01-22T10:30:00Z
```
### View Logs
```bash
# View last 100 lines
orama deployments logs my-nextjs
# Follow logs in real-time
orama deployments logs my-nextjs --follow
```
### Rollback to Previous Version
```bash
# Rollback to version 1
orama deployments rollback my-nextjs --version 1
# Output:
# ⚠️ Rolling back 'my-nextjs' to version 1. Continue? (y/N): y
#
# ✅ Rollback successful!
#
# Deployment: my-nextjs
# Current Version: 1
# Rolled Back From: 2
# Rolled Back To: 1
# Status: active
```
### Delete Deployment
```bash
orama deployments delete my-old-app
# Output:
# ⚠️ Are you sure you want to delete deployment 'my-old-app'? (y/N): y
#
# ✅ Deployment 'my-old-app' deleted successfully
```
---
## Troubleshooting
### Deployment Issues
**Problem**: Deployment status is "failed"
```bash
# Check deployment details
orama deployments get my-app
# View logs for errors
orama deployments logs my-app
# Common issues:
# - Binary not compiled for Linux (GOOS=linux GOARCH=amd64)
# - Missing dependencies (node_modules not included)
# - Port already in use (shouldn't happen, but check logs)
# - Health check failing (ensure /health endpoint exists)
```
**Problem**: Can't access deployment URL
```bash
# 1. Check deployment status
orama deployments get my-app
# 2. Verify DNS (may take up to 10 seconds to propagate)
dig my-app.orama.network
# 3. For local development, add to /etc/hosts
echo "127.0.0.1 my-app.orama.network" | sudo tee -a /etc/hosts
# 4. Test with Host header
curl -H "Host: my-app.orama.network" http://localhost:6001/
```
### Database Issues
**Problem**: Database not found
```bash
# List all databases
orama db list
# Ensure database name matches exactly (case-sensitive)
# Databases are namespace-isolated
```
**Problem**: SQL query fails
```bash
# Check table exists
orama db query my-db "SELECT name FROM sqlite_master WHERE type='table'"
# Check syntax
orama db query my-db ".schema users"
```
### Authentication Issues
```bash
# Re-authenticate
orama auth logout
orama auth login
# Check token validity
orama auth status
```
### Need Help?
- **Documentation**: Check `/docs` directory
- **Logs**: Gateway logs at `~/.orama/logs/gateway.log`
- **Issues**: Report bugs at GitHub repository
- **Community**: Join our Discord/Telegram
---
## Best Practices
### Security
1. **Never commit sensitive data**: Use environment variables for secrets
2. **Validate inputs**: Always sanitize user input in your backend
3. **HTTPS only**: All deployments automatically use HTTPS in production
4. **CORS**: Configure CORS appropriately for your API
### Performance
1. **Optimize builds**: Minimize bundle sizes (React, Next.js)
2. **Use caching**: Leverage browser caching for static assets
3. **Database indexes**: Add indexes to frequently queried columns
4. **Health checks**: Implement `/health` endpoint for monitoring
### Deployment Workflow
1. **Test locally first**: Ensure your app works before deploying
2. **Use version control**: Track changes in Git
3. **Incremental updates**: Use `--update` flag instead of delete + redeploy
4. **Backup databases**: Regular backups via `orama db backup`
5. **Monitor logs**: Check logs after deployment for errors
---
## Next Steps
- **Explore the API**: See `/docs/GATEWAY_API.md` for HTTP API details
- **Advanced Features**: Custom domains, load balancing, autoscaling (coming soon)
- **Production Deployment**: Install nodes with `orama install` for production clusters
- **Client SDK**: Use the Go/JS SDK for programmatic deployments
---
**Orama Network** - Decentralized Application Platform
Deploy anywhere. Access everywhere. Own everything.

159
docs/DEVNET_INSTALL.md Normal file
View File

@ -0,0 +1,159 @@
# Devnet Installation Commands
This document contains example installation commands for a multi-node devnet cluster.
**Wallet:** `<YOUR_WALLET_ADDRESS>`
**Contact:** `@anon: <YOUR_WALLET_ADDRESS>`
## Node Configuration
| Node | Role | Nameserver | Anyone Relay |
|------|------|------------|--------------|
| ns1 | Genesis | Yes | No |
| ns2 | Nameserver | Yes | Yes (relay-1) |
| ns3 | Nameserver | Yes | Yes (relay-2) |
| node4 | Worker | No | Yes (relay-3) |
| node5 | Worker | No | Yes (relay-4) |
| node6 | Worker | No | No |
**Note:** Store credentials securely (not in version control).
## MyFamily Fingerprints
If running multiple Anyone relays, configure MyFamily with all your relay fingerprints:
```
<FINGERPRINT_1>,<FINGERPRINT_2>,<FINGERPRINT_3>,...
```
## Installation Order
Install nodes **one at a time**, waiting for each to complete before starting the next:
1. ns1 (genesis, no Anyone relay)
2. ns2 (nameserver + relay)
3. ns3 (nameserver + relay)
4. node4 (non-nameserver + relay)
5. node5 (non-nameserver + relay)
6. node6 (non-nameserver, no relay)
## ns1 - Genesis Node (No Anyone Relay)
```bash
# SSH: <user>@<ns1-ip>
sudo orama install --no-pull --pre-built \
--vps-ip <ns1-ip> \
--domain <your-domain.com> \
--base-domain <your-domain.com> \
--nameserver
```
After ns1 is installed, generate invite tokens:
```bash
orama invite --expiry 24h
```
## ns2 - Nameserver + Relay
```bash
# SSH: <user>@<ns2-ip>
sudo orama install --no-pull --pre-built \
--join http://<ns1-ip> --token <TOKEN> \
--vps-ip <ns2-ip> \
--domain <your-domain.com> \
--base-domain <your-domain.com> \
--nameserver \
--anyone-relay --anyone-migrate \
--anyone-nickname <relay-name> \
--anyone-wallet <wallet-address> \
--anyone-contact "<contact-info>" \
--anyone-family "<fingerprint1>,<fingerprint2>,..." \
--anyone-bandwidth 30
```
## ns3 - Nameserver + Relay
```bash
# SSH: <user>@<ns3-ip>
sudo orama install --no-pull --pre-built \
--join http://<ns1-ip> --token <TOKEN> \
--vps-ip <ns3-ip> \
--domain <your-domain.com> \
--base-domain <your-domain.com> \
--nameserver \
--anyone-relay --anyone-migrate \
--anyone-nickname <relay-name> \
--anyone-wallet <wallet-address> \
--anyone-contact "<contact-info>" \
--anyone-family "<fingerprint1>,<fingerprint2>,..." \
--anyone-bandwidth 30
```
## node4 - Non-Nameserver + Relay
```bash
# SSH: <user>@<node4-ip>
sudo orama install --no-pull --pre-built \
--join http://<ns1-ip> --token <TOKEN> \
--vps-ip <node4-ip> \
--domain node4.<your-domain.com> \
--base-domain <your-domain.com> \
--skip-checks \
--anyone-relay --anyone-migrate \
--anyone-nickname <relay-name> \
--anyone-wallet <wallet-address> \
--anyone-contact "<contact-info>" \
--anyone-family "<fingerprint1>,<fingerprint2>,..." \
--anyone-bandwidth 30
```
## node5 - Non-Nameserver + Relay
```bash
# SSH: <user>@<node5-ip>
sudo orama install --no-pull --pre-built \
--join http://<ns1-ip> --token <TOKEN> \
--vps-ip <node5-ip> \
--domain node5.<your-domain.com> \
--base-domain <your-domain.com> \
--skip-checks \
--anyone-relay --anyone-migrate \
--anyone-nickname <relay-name> \
--anyone-wallet <wallet-address> \
--anyone-contact "<contact-info>" \
--anyone-family "<fingerprint1>,<fingerprint2>,..." \
--anyone-bandwidth 30
```
## node6 - Non-Nameserver (No Anyone Relay)
```bash
# SSH: <user>@<node6-ip>
sudo orama install --no-pull --pre-built \
--join http://<ns1-ip> --token <TOKEN> \
--vps-ip <node6-ip> \
--domain node6.<your-domain.com> \
--base-domain <your-domain.com> \
--skip-checks
```
## Verification
After all nodes are installed, verify cluster health:
```bash
# Check RQLite cluster (from any node)
curl -s http://localhost:5001/status | jq -r '.store.raft.state, .store.raft.num_peers'
# Should show: Leader (on one node) and N-1 peers
# Check gateway health
curl -s http://localhost:6001/health
# Check Anyone relay (on nodes with relays)
systemctl status debros-anyone-relay
```

442
docs/DEV_DEPLOY.md Normal file
View File

@ -0,0 +1,442 @@
# Development Guide
## Prerequisites
- Go 1.21+
- Node.js 18+ (for anyone-client in dev mode)
- macOS or Linux
## Building
```bash
# Build all binaries
make build
# Outputs:
# bin/orama-node — the node binary
# bin/orama — the CLI
# bin/gateway — standalone gateway (optional)
# bin/identity — identity tool
# bin/rqlite-mcp — RQLite MCP server
```
## Running Tests
```bash
make test
```
## Running Locally (macOS)
The node runs in "direct mode" on macOS — processes are managed directly instead of via systemd.
```bash
# Start a single node
make run-node
# Start multiple nodes for cluster testing
make run-node2
make run-node3
```
## Deploying to VPS
There are two deployment workflows: **development** (fast iteration, no git required) and **production** (via git).
### Development Deployment (Fast Iteration)
Use this when iterating quickly — no need to commit or push to git.
```bash
# 1. Build the CLI for Linux
GOOS=linux GOARCH=amd64 go build -o orama-cli-linux ./cmd/cli
# 2. Generate a source archive (excludes .git, node_modules, bin/, etc.)
./scripts/generate-source-archive.sh
# Creates: /tmp/network-source.tar.gz
# 3. Copy CLI and source to the VPS
sshpass -p '<password>' scp -o StrictHostKeyChecking=no orama-cli-linux ubuntu@<ip>:/tmp/orama
sshpass -p '<password>' scp -o StrictHostKeyChecking=no /tmp/network-source.tar.gz ubuntu@<ip>:/tmp/
# 4. On the VPS: extract source and install the CLI
ssh ubuntu@<ip>
sudo rm -rf /home/debros/src && sudo mkdir -p /home/debros/src
sudo tar xzf /tmp/network-source.tar.gz -C /home/debros/src
sudo chown -R debros:debros /home/debros/src
sudo mv /tmp/orama /usr/local/bin/orama && sudo chmod +x /usr/local/bin/orama
# 5. Upgrade using local source (skips git pull)
sudo orama upgrade --no-pull --restart
```
### Development Deployment with Pre-Built Binaries (Fastest)
Cross-compile everything locally and skip all Go compilation on the VPS. This is significantly faster because your local machine compiles much faster than the VPS.
```bash
# 1. Cross-compile all binaries for Linux (DeBros + Olric + CoreDNS + Caddy)
make build-linux-all
# Outputs everything to bin-linux/
# 2. Generate a single deploy archive (source + pre-built binaries)
./scripts/generate-source-archive.sh
# Creates: /tmp/network-source.tar.gz (includes bin-linux/ if present)
# 3. Copy the single archive to the VPS
sshpass -p '<password>' scp -o StrictHostKeyChecking=no /tmp/network-source.tar.gz ubuntu@<ip>:/tmp/
# 4. Extract and install everything on the VPS
sshpass -p '<password>' ssh -o StrictHostKeyChecking=no ubuntu@<ip> \
'sudo bash -s' < scripts/extract-deploy.sh
# 5. Install/upgrade with --pre-built (skips ALL Go compilation on VPS)
sudo orama install --no-pull --pre-built --vps-ip <ip> ...
# or
sudo orama upgrade --no-pull --pre-built --restart
```
**What `--pre-built` skips:** Go installation, `make build`, Olric `go install`, CoreDNS build, Caddy/xcaddy build.
**What `--pre-built` still runs:** apt dependencies, RQLite/IPFS/IPFS Cluster downloads (pre-built binary downloads, fast), Anyone relay setup, config generation, systemd service creation.
### Production Deployment (Via Git)
For production releases — pulls source from GitHub on the VPS.
```bash
# 1. Commit and push your changes
git push origin <branch>
# 2. Build the CLI for Linux
GOOS=linux GOARCH=amd64 go build -o orama-cli-linux ./cmd/cli
# 3. Deploy the CLI to the VPS
sshpass -p '<password>' scp orama-cli-linux ubuntu@<ip>:/tmp/orama
ssh ubuntu@<ip> "sudo mv /tmp/orama /usr/local/bin/orama && sudo chmod +x /usr/local/bin/orama"
# 4. Run upgrade (downloads source from GitHub)
ssh ubuntu@<ip> "sudo orama upgrade --branch <branch> --restart"
```
### Upgrading a Multi-Node Cluster (CRITICAL)
**NEVER restart all nodes simultaneously.** RQLite uses Raft consensus and requires a majority (quorum) to function. Restarting all nodes at once can cause cluster splits where nodes elect different leaders or form isolated clusters.
#### Safe Upgrade Procedure (Rolling Restart)
Always upgrade nodes **one at a time**, waiting for each to rejoin before proceeding:
```bash
# 1. Build locally
make build-linux-all
./scripts/generate-source-archive.sh
# Creates: /tmp/network-source.tar.gz (includes bin-linux/)
# 2. Upload to ONE node first (the "hub" node)
sshpass -p '<password>' scp /tmp/network-source.tar.gz ubuntu@<hub-ip>:/tmp/
# 3. Fan out from hub to all other nodes (server-to-server is faster)
ssh ubuntu@<hub-ip>
for ip in <ip2> <ip3> <ip4> <ip5> <ip6>; do
scp /tmp/network-source.tar.gz ubuntu@$ip:/tmp/
done
exit
# 4. Extract on ALL nodes (can be done in parallel, no restart yet)
for ip in <ip1> <ip2> <ip3> <ip4> <ip5> <ip6>; do
ssh ubuntu@$ip 'sudo bash -s' < scripts/extract-deploy.sh
done
# 5. Find the RQLite leader (upgrade this one LAST)
ssh ubuntu@<any-node> 'curl -s http://localhost:5001/status | jq -r .store.raft.state'
# 6. Upgrade FOLLOWER nodes one at a time
# First stop services, then upgrade, which restarts them
ssh ubuntu@<follower-ip> 'sudo orama prod stop && sudo orama upgrade --no-pull --pre-built --restart'
# Wait for rejoin before proceeding to next node
ssh ubuntu@<leader-ip> 'curl -s http://localhost:5001/status | jq -r .store.raft.num_peers'
# Should show expected number of peers (N-1)
# Repeat for each follower...
# 7. Upgrade the LEADER node last
ssh ubuntu@<leader-ip> 'sudo orama prod stop && sudo orama upgrade --no-pull --pre-built --restart'
```
#### What NOT to Do
- **DON'T** stop all nodes, replace binaries, then start all nodes
- **DON'T** run `orama upgrade --restart` on multiple nodes in parallel
- **DON'T** clear RQLite data directories unless doing a full cluster rebuild
- **DON'T** use `systemctl stop debros-node` on multiple nodes simultaneously
#### Recovery from Cluster Split
If nodes get stuck in "Candidate" state or show "leader not found" errors:
1. Identify which node has the most recent data (usually the old leader)
2. Keep that node running as the new leader
3. On each other node, clear RQLite data and restart:
```bash
sudo orama prod stop
sudo rm -rf /home/debros/.orama/data/rqlite
sudo systemctl start debros-node
```
4. The node should automatically rejoin using its configured `rqlite_join_address`
If automatic rejoin fails, the node may have started without the `-join` flag. Check:
```bash
ps aux | grep rqlited
# Should include: -join 10.0.0.1:7001 (or similar)
```
If `-join` is missing, the node bootstrapped standalone. You'll need to either:
- Restart debros-node (it should detect empty data and use join)
- Or do a full cluster rebuild from CLEAN_NODE.md
### Deploying to Multiple Nodes
To deploy to all nodes, repeat steps 3-5 (dev) or 3-4 (production) for each VPS IP.
**Important:** When using `--restart`, do nodes one at a time (see "Upgrading a Multi-Node Cluster" above).
### CLI Flags Reference
#### `orama install`
| Flag | Description |
|------|-------------|
| `--vps-ip <ip>` | VPS public IP address (required) |
| `--domain <domain>` | Domain for HTTPS certificates. Nameserver nodes use the base domain (e.g., `example.com`); non-nameserver nodes use a subdomain (e.g., `node-4.example.com`) |
| `--base-domain <domain>` | Base domain for deployment routing (e.g., example.com) |
| `--nameserver` | Configure this node as a nameserver (CoreDNS + Caddy) |
| `--join <url>` | Join existing cluster via HTTPS URL (e.g., `https://node1.example.com`) |
| `--token <token>` | Invite token for joining (from `orama invite` on existing node) |
| `--branch <branch>` | Git branch to use (default: main) |
| `--no-pull` | Skip git clone/pull, use existing `/home/debros/src` |
| `--pre-built` | Skip all Go compilation, use pre-built binaries already on disk (see above) |
| `--force` | Force reconfiguration even if already installed |
| `--skip-firewall` | Skip UFW firewall setup |
| `--skip-checks` | Skip minimum resource checks (RAM/CPU) |
| `--anyone-relay` | Install and configure an Anyone relay on this node |
| `--anyone-migrate` | Migrate existing Anyone relay installation (preserves keys/fingerprint) |
| `--anyone-nickname <name>` | Relay nickname (required for relay mode) |
| `--anyone-wallet <addr>` | Ethereum wallet for relay rewards (required for relay mode) |
| `--anyone-contact <info>` | Contact info for relay (required for relay mode) |
| `--anyone-family <fps>` | Comma-separated fingerprints of related relays (MyFamily) |
| `--anyone-orport <port>` | ORPort for relay (default: 9001) |
| `--anyone-exit` | Configure as an exit relay (default: non-exit) |
| `--anyone-bandwidth <pct>` | Limit relay to N% of VPS bandwidth (default: 30, 0=unlimited). Runs a speedtest during install to measure available bandwidth |
| `--anyone-accounting <GB>` | Monthly data cap for relay in GB (0=unlimited) |
#### `orama invite`
| Flag | Description |
|------|-------------|
| `--expiry <duration>` | Token expiry duration (default: 1h, e.g. `--expiry 24h`) |
**Important notes about invite tokens:**
- **Tokens are single-use.** Once a node consumes a token during the join handshake, it cannot be reused. Generate a separate token for each node you want to join.
- **Expiry is checked in UTC.** RQLite uses `datetime('now')` which is always UTC. If your local timezone differs, account for the offset when choosing expiry durations.
- **Use longer expiry for multi-node deployments.** When deploying multiple nodes, use `--expiry 24h` to avoid tokens expiring mid-deployment.
#### `orama upgrade`
| Flag | Description |
|------|-------------|
| `--branch <branch>` | Git branch to pull from |
| `--no-pull` | Skip git pull, use existing source |
| `--pre-built` | Skip all Go compilation, use pre-built binaries already on disk |
| `--restart` | Restart all services after upgrade |
| `--anyone-relay` | Enable Anyone relay (same flags as install) |
| `--anyone-bandwidth <pct>` | Limit relay to N% of VPS bandwidth (default: 30, 0=unlimited) |
| `--anyone-accounting <GB>` | Monthly data cap for relay in GB (0=unlimited) |
#### `orama prod` (Service Management)
Use these commands to manage services on production nodes:
```bash
# Stop all services (debros-node, coredns, caddy)
sudo orama prod stop
# Start all services
sudo orama prod start
# Restart all services
sudo orama prod restart
# Check service status
sudo orama prod status
```
**Note:** Always use `orama prod stop` instead of manually running `systemctl stop`. The CLI ensures all related services (including CoreDNS and Caddy on nameserver nodes) are handled correctly.
### Node Join Flow
```bash
# 1. Genesis node (first node, creates cluster)
# Nameserver nodes use the base domain as --domain
sudo orama install --vps-ip 1.2.3.4 --domain example.com \
--base-domain example.com --nameserver
# 2. On genesis node, generate an invite
orama invite
# Output: sudo orama install --join https://example.com --token <TOKEN> --vps-ip <IP>
# 3. On the new node, run the printed command
# Nameserver nodes use the base domain; non-nameserver nodes use subdomains (e.g., node-4.example.com)
sudo orama install --join https://example.com --token abc123... \
--vps-ip 5.6.7.8 --domain example.com --base-domain example.com --nameserver
```
The join flow establishes a WireGuard VPN tunnel before starting cluster services.
All inter-node communication (RQLite, IPFS, Olric) uses WireGuard IPs (10.0.0.x).
No cluster ports are ever exposed publicly.
#### DNS Prerequisite
The `--join` URL should use the HTTPS domain of the genesis node (e.g., `https://node1.example.com`).
For this to work, the domain registrar for `example.com` must have NS records pointing to the genesis
node's IP so that `node1.example.com` resolves publicly.
**If DNS is not yet configured**, you can use the genesis node's public IP with HTTP as a fallback:
```bash
sudo orama install --join http://1.2.3.4 --vps-ip 5.6.7.8 --token abc123... --nameserver
```
This works because Caddy's `:80` block proxies all HTTP traffic to the gateway. However, once DNS
is properly configured, always use the HTTPS domain URL.
**Important:** Never use `http://<ip>:6001` — port 6001 is the internal gateway and is blocked by
UFW from external access. The join request goes through Caddy on port 80 (HTTP) or 443 (HTTPS),
which proxies to the gateway internally.
## Pre-Install Checklist
Before running `orama install` on a VPS, ensure:
1. **Stop Docker if running.** Docker commonly binds ports 4001 and 8080 which conflict with IPFS. The installer checks for port conflicts and shows which process is using each port, but it's easier to stop Docker first:
```bash
sudo systemctl stop docker docker.socket
sudo systemctl disable docker docker.socket
```
2. **Stop any existing IPFS instance.**
```bash
sudo systemctl stop ipfs
```
3. **Ensure `make` is installed.** Required for building CoreDNS and Caddy from source:
```bash
sudo apt-get install -y make
```
4. **Stop any service on port 53** (for nameserver nodes). The installer handles `systemd-resolved` automatically, but other DNS services (like `bind9` or `dnsmasq`) must be stopped manually.
## Recovering from Failed Joins
If a node partially joins the cluster (registers in RQLite's Raft but then fails or gets cleaned), the remaining cluster can lose quorum permanently. This happens because RQLite thinks there are N voters but only N-1 are reachable.
**Symptoms:** RQLite stuck in "Candidate" state, no leader elected, all writes fail.
**Solution:** Do a full clean reinstall of all affected nodes. Use [CLEAN_NODE.md](CLEAN_NODE.md) to reset each node, then reinstall starting from the genesis node.
**Prevention:** Always ensure a joining node can complete the full installation before it joins. The installer validates port availability upfront to catch conflicts early.
## Debugging Production Issues
Always follow the local-first approach:
1. **Reproduce locally** — set up the same conditions on your machine
2. **Find the root cause** — understand why it's happening
3. **Fix in the codebase** — make changes to the source code
4. **Test locally** — run `make test` and verify
5. **Deploy** — only then deploy the fix to production
Never fix issues directly on the server — those fixes are lost on next deployment.
## Trusting the Self-Signed TLS Certificate
When Let's Encrypt is rate-limited, Caddy falls back to its internal CA (self-signed certificates). Browsers will show security warnings unless you install the root CA certificate.
### Downloading the Root CA Certificate
From VPS 1 (or any node), copy the certificate:
```bash
# Copy the cert to an accessible location on the VPS
ssh ubuntu@<VPS_IP> "sudo cp /var/lib/caddy/.local/share/caddy/pki/authorities/local/root.crt /tmp/caddy-root-ca.crt && sudo chmod 644 /tmp/caddy-root-ca.crt"
# Download to your local machine
scp ubuntu@<VPS_IP>:/tmp/caddy-root-ca.crt ~/Downloads/caddy-root-ca.crt
```
### macOS
```bash
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ~/Downloads/caddy-root-ca.crt
```
This adds the cert system-wide. All browsers (Safari, Chrome, Arc, etc.) will trust it immediately. Firefox uses its own certificate store — go to **Settings > Privacy & Security > Certificates > View Certificates > Import** and import the `.crt` file there.
To remove it later:
```bash
sudo security remove-trusted-cert -d ~/Downloads/caddy-root-ca.crt
```
### iOS (iPhone/iPad)
1. Transfer `caddy-root-ca.crt` to your device (AirDrop, email attachment, or host it on a URL)
2. Open the file — iOS will show "Profile Downloaded"
3. Go to **Settings > General > VPN & Device Management** (or "Profiles" on older iOS)
4. Tap the "Caddy Local Authority" profile and tap **Install**
5. Go to **Settings > General > About > Certificate Trust Settings**
6. Enable **full trust** for "Caddy Local Authority - 2026 ECC Root"
### Android
1. Transfer `caddy-root-ca.crt` to your device
2. Go to **Settings > Security > Encryption & Credentials > Install a certificate > CA certificate**
3. Select the `caddy-root-ca.crt` file
4. Confirm the installation
Note: On Android 7+, user-installed CA certificates are only trusted by apps that explicitly opt in. Chrome will trust it, but some apps may not.
### Windows
```powershell
certutil -addstore -f "ROOT" caddy-root-ca.crt
```
Or double-click the `.crt` file > **Install Certificate** > **Local Machine** > **Place in "Trusted Root Certification Authorities"**.
### Linux
```bash
sudo cp caddy-root-ca.crt /usr/local/share/ca-certificates/caddy-root-ca.crt
sudo update-ca-certificates
```
## Project Structure
See [ARCHITECTURE.md](ARCHITECTURE.md) for the full architecture overview.
Key directories:
```
cmd/
cli/ — CLI entry point (orama command)
node/ — Node entry point (orama-node)
gateway/ — Standalone gateway entry point
pkg/
cli/ — CLI command implementations
gateway/ — HTTP gateway, routes, middleware
deployments/ — Deployment types, service, storage
environments/ — Production (systemd) and development (direct) modes
rqlite/ — Distributed SQLite via RQLite
```

View File

@ -1,734 +0,0 @@
# Gateway API Documentation
## Overview
The Orama Network Gateway provides a unified HTTP/HTTPS API for all network services. It handles authentication, routing, and service coordination.
**Base URL:** `https://api.orama.network` (production) or `http://localhost:6001` (development)
## Authentication
All API requests (except `/health` and `/v1/auth/*`) require authentication.
### Authentication Methods
1. **API Key** (Recommended for server-to-server)
2. **JWT Token** (Recommended for user sessions)
3. **Wallet Signature** (For blockchain integration)
### Using API Keys
Include your API key in the `Authorization` header:
```bash
curl -H "Authorization: Bearer your-api-key-here" \
https://api.orama.network/v1/status
```
Or in the `X-API-Key` header:
```bash
curl -H "X-API-Key: your-api-key-here" \
https://api.orama.network/v1/status
```
### Using JWT Tokens
```bash
curl -H "Authorization: Bearer your-jwt-token-here" \
https://api.orama.network/v1/status
```
## Base Endpoints
### Health Check
```http
GET /health
```
**Response:**
```json
{
"status": "ok",
"timestamp": "2024-01-20T10:30:00Z"
}
```
### Status
```http
GET /v1/status
```
**Response:**
```json
{
"version": "0.80.0",
"uptime": "24h30m15s",
"services": {
"rqlite": "healthy",
"ipfs": "healthy",
"olric": "healthy"
}
}
```
### Version
```http
GET /v1/version
```
**Response:**
```json
{
"version": "0.80.0",
"commit": "abc123...",
"built": "2024-01-20T00:00:00Z"
}
```
## Authentication API
### Get Challenge (Wallet Auth)
Generate a nonce for wallet signature.
```http
POST /v1/auth/challenge
Content-Type: application/json
{
"wallet": "0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb",
"purpose": "login",
"namespace": "default"
}
```
**Response:**
```json
{
"wallet": "0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb",
"namespace": "default",
"nonce": "a1b2c3d4e5f6...",
"purpose": "login",
"expires_at": "2024-01-20T10:35:00Z"
}
```
### Verify Signature
Verify wallet signature and issue JWT + API key.
```http
POST /v1/auth/verify
Content-Type: application/json
{
"wallet": "0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb",
"signature": "0x...",
"nonce": "a1b2c3d4e5f6...",
"namespace": "default"
}
```
**Response:**
```json
{
"jwt_token": "eyJhbGciOiJIUzI1NiIs...",
"refresh_token": "refresh_abc123...",
"api_key": "api_xyz789...",
"expires_in": 900,
"namespace": "default"
}
```
### Refresh Token
Refresh an expired JWT token.
```http
POST /v1/auth/refresh
Content-Type: application/json
{
"refresh_token": "refresh_abc123..."
}
```
**Response:**
```json
{
"jwt_token": "eyJhbGciOiJIUzI1NiIs...",
"expires_in": 900
}
```
### Logout
Revoke refresh tokens.
```http
POST /v1/auth/logout
Authorization: Bearer your-jwt-token
{
"all": false
}
```
**Response:**
```json
{
"message": "logged out successfully"
}
```
### Whoami
Get current authentication info.
```http
GET /v1/auth/whoami
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"authenticated": true,
"method": "api_key",
"api_key": "api_xyz789...",
"namespace": "default"
}
```
## Storage API (IPFS)
### Upload File
```http
POST /v1/storage/upload
Authorization: Bearer your-api-key
Content-Type: multipart/form-data
file: <binary data>
```
Or with JSON:
```http
POST /v1/storage/upload
Authorization: Bearer your-api-key
Content-Type: application/json
{
"data": "base64-encoded-data",
"filename": "document.pdf",
"pin": true,
"encrypt": false
}
```
**Response:**
```json
{
"cid": "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG",
"size": 1024,
"filename": "document.pdf"
}
```
### Get File
```http
GET /v1/storage/get/:cid
Authorization: Bearer your-api-key
```
**Response:** Binary file data or JSON (if `Accept: application/json`)
### Pin File
```http
POST /v1/storage/pin
Authorization: Bearer your-api-key
Content-Type: application/json
{
"cid": "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG",
"replication_factor": 3
}
```
**Response:**
```json
{
"cid": "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG",
"status": "pinned"
}
```
### Unpin File
```http
DELETE /v1/storage/unpin/:cid
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"message": "unpinned successfully"
}
```
### Get Pin Status
```http
GET /v1/storage/status/:cid
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"cid": "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG",
"status": "pinned",
"replicas": 3,
"peers": ["12D3KooW...", "12D3KooW..."]
}
```
## Cache API (Olric)
### Set Value
```http
PUT /v1/cache/put
Authorization: Bearer your-api-key
Content-Type: application/json
{
"key": "user:123",
"value": {"name": "Alice", "email": "alice@example.com"},
"ttl": 300
}
```
**Response:**
```json
{
"message": "value set successfully"
}
```
### Get Value
```http
GET /v1/cache/get?key=user:123
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"key": "user:123",
"value": {"name": "Alice", "email": "alice@example.com"}
}
```
### Get Multiple Values
```http
POST /v1/cache/mget
Authorization: Bearer your-api-key
Content-Type: application/json
{
"keys": ["user:1", "user:2", "user:3"]
}
```
**Response:**
```json
{
"results": {
"user:1": {"name": "Alice"},
"user:2": {"name": "Bob"},
"user:3": null
}
}
```
### Delete Value
```http
DELETE /v1/cache/delete?key=user:123
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"message": "deleted successfully"
}
```
### Scan Keys
```http
GET /v1/cache/scan?pattern=user:*&limit=100
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"keys": ["user:1", "user:2", "user:3"],
"count": 3
}
```
## Database API (RQLite)
### Execute SQL
```http
POST /v1/rqlite/exec
Authorization: Bearer your-api-key
Content-Type: application/json
{
"sql": "INSERT INTO users (name, email) VALUES (?, ?)",
"args": ["Alice", "alice@example.com"]
}
```
**Response:**
```json
{
"last_insert_id": 123,
"rows_affected": 1
}
```
### Query SQL
```http
POST /v1/rqlite/query
Authorization: Bearer your-api-key
Content-Type: application/json
{
"sql": "SELECT * FROM users WHERE id = ?",
"args": [123]
}
```
**Response:**
```json
{
"columns": ["id", "name", "email"],
"rows": [
[123, "Alice", "alice@example.com"]
]
}
```
### Get Schema
```http
GET /v1/rqlite/schema
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"tables": [
{
"name": "users",
"schema": "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, email TEXT)"
}
]
}
```
## Pub/Sub API
### Publish Message
```http
POST /v1/pubsub/publish
Authorization: Bearer your-api-key
Content-Type: application/json
{
"topic": "chat",
"data": "SGVsbG8sIFdvcmxkIQ==",
"namespace": "default"
}
```
**Response:**
```json
{
"message": "published successfully"
}
```
### List Topics
```http
GET /v1/pubsub/topics
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"topics": ["chat", "notifications", "events"]
}
```
### Subscribe (WebSocket)
```http
GET /v1/pubsub/ws?topic=chat
Authorization: Bearer your-api-key
Upgrade: websocket
```
**WebSocket Messages:**
Incoming (from server):
```json
{
"type": "message",
"topic": "chat",
"data": "SGVsbG8sIFdvcmxkIQ==",
"timestamp": "2024-01-20T10:30:00Z"
}
```
Outgoing (to server):
```json
{
"type": "publish",
"topic": "chat",
"data": "SGVsbG8sIFdvcmxkIQ=="
}
```
### Presence
```http
GET /v1/pubsub/presence?topic=chat
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"topic": "chat",
"members": [
{"id": "user-123", "joined_at": "2024-01-20T10:00:00Z"},
{"id": "user-456", "joined_at": "2024-01-20T10:15:00Z"}
]
}
```
## Serverless API (WASM)
### Deploy Function
```http
POST /v1/functions
Authorization: Bearer your-api-key
Content-Type: multipart/form-data
name: hello-world
namespace: default
description: Hello world function
wasm: <binary WASM file>
memory_limit: 64
timeout: 30
```
**Response:**
```json
{
"id": "fn_abc123",
"name": "hello-world",
"namespace": "default",
"wasm_cid": "QmXxx...",
"version": 1,
"created_at": "2024-01-20T10:30:00Z"
}
```
### Invoke Function
```http
POST /v1/functions/hello-world/invoke
Authorization: Bearer your-api-key
Content-Type: application/json
{
"name": "Alice"
}
```
**Response:**
```json
{
"result": "Hello, Alice!",
"execution_time_ms": 15,
"memory_used_mb": 2.5
}
```
### List Functions
```http
GET /v1/functions?namespace=default
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"functions": [
{
"name": "hello-world",
"description": "Hello world function",
"version": 1,
"created_at": "2024-01-20T10:30:00Z"
}
]
}
```
### Delete Function
```http
DELETE /v1/functions/hello-world?namespace=default
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"message": "function deleted successfully"
}
```
### Get Function Logs
```http
GET /v1/functions/hello-world/logs?limit=100
Authorization: Bearer your-api-key
```
**Response:**
```json
{
"logs": [
{
"timestamp": "2024-01-20T10:30:00Z",
"level": "info",
"message": "Function invoked",
"invocation_id": "inv_xyz789"
}
]
}
```
## Error Responses
All errors follow a consistent format:
```json
{
"code": "NOT_FOUND",
"message": "user with ID '123' not found",
"details": {
"resource": "user",
"id": "123"
},
"trace_id": "trace-abc123"
}
```
### Common Error Codes
| Code | HTTP Status | Description |
|------|-------------|-------------|
| `VALIDATION_ERROR` | 400 | Invalid input |
| `UNAUTHORIZED` | 401 | Authentication required |
| `FORBIDDEN` | 403 | Permission denied |
| `NOT_FOUND` | 404 | Resource not found |
| `CONFLICT` | 409 | Resource already exists |
| `TIMEOUT` | 408 | Operation timeout |
| `RATE_LIMIT_EXCEEDED` | 429 | Too many requests |
| `SERVICE_UNAVAILABLE` | 503 | Service unavailable |
| `INTERNAL` | 500 | Internal server error |
## Rate Limiting
The API implements rate limiting per API key:
- **Default:** 100 requests per minute
- **Burst:** 200 requests
Rate limit headers:
```
X-RateLimit-Limit: 100
X-RateLimit-Remaining: 95
X-RateLimit-Reset: 1611144000
```
When rate limited:
```json
{
"code": "RATE_LIMIT_EXCEEDED",
"message": "rate limit exceeded",
"details": {
"limit": 100,
"retry_after": 60
}
}
```
## Pagination
List endpoints support pagination:
```http
GET /v1/functions?limit=10&offset=20
```
Response includes pagination metadata:
```json
{
"data": [...],
"pagination": {
"total": 100,
"limit": 10,
"offset": 20,
"has_more": true
}
}
```
## Webhooks (Future)
Coming soon: webhook support for event notifications.
## Support
- API Issues: https://github.com/DeBrosOfficial/network/issues
- OpenAPI Spec: `openapi/gateway.yaml`
- SDK Documentation: `docs/CLIENT_SDK.md`

248
docs/NAMESERVER_SETUP.md Normal file
View File

@ -0,0 +1,248 @@
# Nameserver Setup Guide
This guide explains how to configure your domain registrar to use Orama Network nodes as authoritative nameservers.
## Overview
When you install Orama with the `--nameserver` flag, the node runs CoreDNS to serve DNS records for your domain. This enables:
- Dynamic DNS for deployments (e.g., `myapp.node-abc123.dbrs.space`)
- Wildcard DNS support for all subdomains
- ACME DNS-01 challenges for automatic SSL certificates
## Prerequisites
Before setting up nameservers, you need:
1. **Domain ownership** - A domain you control (e.g., `dbrs.space`)
2. **3+ VPS nodes** - Recommended for redundancy
3. **Static IP addresses** - Each VPS must have a static public IP
4. **Access to registrar DNS settings** - Admin access to your domain registrar
## Understanding DNS Records
### NS Records (Nameserver Records)
NS records tell the internet which servers are authoritative for your domain:
```
dbrs.space. IN NS ns1.dbrs.space.
dbrs.space. IN NS ns2.dbrs.space.
dbrs.space. IN NS ns3.dbrs.space.
```
### Glue Records
Glue records are A records that provide IP addresses for nameservers that are under the same domain. They're required because:
- `ns1.dbrs.space` is under `dbrs.space`
- To resolve `ns1.dbrs.space`, you need to query `dbrs.space` nameservers
- But those nameservers ARE `ns1.dbrs.space` - circular dependency!
- Glue records break this cycle by providing IPs at the registry level
```
ns1.dbrs.space. IN A 141.227.165.168
ns2.dbrs.space. IN A 141.227.165.154
ns3.dbrs.space. IN A 141.227.156.51
```
## Installation
### Step 1: Install Orama on Each VPS
Install Orama with the `--nameserver` flag on each VPS that will serve as a nameserver:
```bash
# On VPS 1 (ns1)
sudo orama install \
--nameserver \
--domain dbrs.space \
--vps-ip 141.227.165.168
# On VPS 2 (ns2)
sudo orama install \
--nameserver \
--domain dbrs.space \
--vps-ip 141.227.165.154
# On VPS 3 (ns3)
sudo orama install \
--nameserver \
--domain dbrs.space \
--vps-ip 141.227.156.51
```
### Step 2: Configure Your Registrar
#### For Namecheap
1. **Log into Namecheap Dashboard**
- Go to https://www.namecheap.com
- Navigate to **Domain List****Manage** (next to your domain)
2. **Add Glue Records (Personal DNS Servers)**
- Go to **Advanced DNS** tab
- Scroll down to **Personal DNS Servers** section
- Click **Add Nameserver**
- Add each nameserver with its IP:
| Nameserver | IP Address |
|------------|------------|
| ns1.yourdomain.com | 141.227.165.168 |
| ns2.yourdomain.com | 141.227.165.154 |
| ns3.yourdomain.com | 141.227.156.51 |
3. **Set Custom Nameservers**
- Go back to the **Domain** tab
- Under **Nameservers**, select **Custom DNS**
- Add your nameserver hostnames:
- ns1.yourdomain.com
- ns2.yourdomain.com
- ns3.yourdomain.com
- Click the green checkmark to save
4. **Wait for Propagation**
- DNS changes can take 24-48 hours to propagate globally
- Most changes are visible within 1-4 hours
#### For GoDaddy
1. Log into GoDaddy account
2. Go to **My Products****DNS** for your domain
3. Under **Nameservers**, click **Change**
4. Select **Enter my own nameservers**
5. Add your nameserver hostnames
6. For glue records, go to **DNS Management** → **Host Names**
7. Add A records for ns1, ns2, ns3
#### For Cloudflare (as Registrar)
1. Log into Cloudflare Dashboard
2. Go to **Domain Registration** → your domain
3. Under **Nameservers**, change to custom
4. Note: Cloudflare Registrar may require contacting support for glue records
#### For Google Domains
1. Log into Google Domains
2. Select your domain → **DNS**
3. Under **Name servers**, select **Use custom name servers**
4. Add your nameserver hostnames
5. For glue records, click **Add** under **Glue records**
## Verification
### Step 1: Verify NS Records
After propagation, check that NS records are visible:
```bash
# Check NS records from Google DNS
dig NS yourdomain.com @8.8.8.8
# Expected output should show:
# yourdomain.com. IN NS ns1.yourdomain.com.
# yourdomain.com. IN NS ns2.yourdomain.com.
# yourdomain.com. IN NS ns3.yourdomain.com.
```
### Step 2: Verify Glue Records
Check that glue records resolve:
```bash
# Check glue records
dig A ns1.yourdomain.com @8.8.8.8
dig A ns2.yourdomain.com @8.8.8.8
dig A ns3.yourdomain.com @8.8.8.8
# Each should return the correct IP address
```
### Step 3: Test CoreDNS
Query your nameservers directly:
```bash
# Test a query against ns1
dig @ns1.yourdomain.com test.yourdomain.com
# Test wildcard resolution
dig @ns1.yourdomain.com myapp.node-abc123.yourdomain.com
```
### Step 4: Verify from Multiple Locations
Use online tools to verify global propagation:
- https://dnschecker.org
- https://www.whatsmydns.net
## Troubleshooting
### DNS Not Resolving
1. **Check CoreDNS is running:**
```bash
sudo systemctl status coredns
```
2. **Check CoreDNS logs:**
```bash
sudo journalctl -u coredns -f
```
3. **Verify port 53 is open:**
```bash
sudo ufw status
# Port 53 (TCP/UDP) should be allowed
```
4. **Test locally:**
```bash
dig @localhost yourdomain.com
```
### Glue Records Not Propagating
- Glue records are stored at the registry level, not DNS level
- They can take longer to propagate (up to 48 hours)
- Verify at your registrar that they were saved correctly
- Some registrars require the domain to be using their nameservers first
### SERVFAIL Errors
Usually indicates CoreDNS configuration issues:
1. Check Corefile syntax
2. Verify RQLite connectivity
3. Check firewall rules
## Security Considerations
### Firewall Rules
Only expose necessary ports:
```bash
# Allow DNS from anywhere
sudo ufw allow 53/tcp
sudo ufw allow 53/udp
# Restrict admin ports to internal network
sudo ufw allow from 10.0.0.0/8 to any port 8080 # Health
sudo ufw allow from 10.0.0.0/8 to any port 9153 # Metrics
```
### Rate Limiting
Consider adding rate limiting to prevent DNS amplification attacks.
This can be configured in the CoreDNS Corefile.
## Multi-Node Coordination
When running multiple nameservers:
1. **All nodes share the same RQLite cluster** - DNS records are automatically synchronized
2. **Install in order** - First node bootstraps, others join
3. **Same domain configuration** - All nodes must use the same `--domain` value
## Related Documentation
- [CoreDNS RQLite Plugin](../pkg/coredns/README.md) - Technical details
- [Deployment Guide](./DEPLOYMENT_GUIDE.md) - Full deployment instructions
- [Architecture](./ARCHITECTURE.md) - System architecture overview

View File

@ -1,476 +0,0 @@
# Orama Network - Security Deployment Guide
**Date:** January 18, 2026
**Status:** Production-Ready
**Audit Completed By:** Claude Code Security Audit
---
## Executive Summary
This document outlines the security hardening measures applied to the 4-node Orama Network production cluster. All critical vulnerabilities identified in the security audit have been addressed.
**Security Status:** ✅ SECURED FOR PRODUCTION
---
## Server Inventory
| Server ID | IP Address | Domain | OS | Role |
|-----------|------------|--------|-----|------|
| VPS 1 | 51.83.128.181 | node-kv4la8.debros.network | Ubuntu 22.04 | Gateway + Cluster Node |
| VPS 2 | 194.61.28.7 | node-7prvNa.debros.network | Ubuntu 24.04 | Gateway + Cluster Node |
| VPS 3 | 83.171.248.66 | node-xn23dq.debros.network | Ubuntu 24.04 | Gateway + Cluster Node |
| VPS 4 | 62.72.44.87 | node-nns4n5.debros.network | Ubuntu 24.04 | Gateway + Cluster Node |
---
## Services Running on Each Server
| Service | Port(s) | Purpose | Public Access |
|---------|---------|---------|---------------|
| **orama-node** | 80, 443, 7001 | API Gateway | Yes (80, 443 only) |
| **rqlited** | 5001, 7002 | Distributed SQLite DB | Cluster only |
| **ipfs** | 4101, 4501, 8080 | Content-addressed storage | Cluster only |
| **ipfs-cluster** | 9094, 9098 | IPFS cluster management | Cluster only |
| **olric-server** | 3320, 3322 | Distributed cache | Cluster only |
| **anon** (Anyone proxy) | 9001, 9050, 9051 | Anonymity proxy | Cluster only |
| **libp2p** | 4001 | P2P networking | Yes (public P2P) |
| **SSH** | 22 | Remote access | Yes |
---
## Security Measures Implemented
### 1. Firewall Configuration (UFW)
**Status:** ✅ Enabled on all 4 servers
#### Public Ports (Open to Internet)
- **22/tcp** - SSH (with hardening)
- **80/tcp** - HTTP (redirects to HTTPS)
- **443/tcp** - HTTPS (Let's Encrypt production certificates)
- **4001/tcp** - libp2p swarm (P2P networking)
#### Cluster-Only Ports (Restricted to 4 Server IPs)
All the following ports are ONLY accessible from the 4 cluster IPs:
- **5001/tcp** - rqlite HTTP API
- **7001/tcp** - SNI Gateway
- **7002/tcp** - rqlite Raft consensus
- **9094/tcp** - IPFS Cluster API
- **9098/tcp** - IPFS Cluster communication
- **3322/tcp** - Olric distributed cache
- **4101/tcp** - IPFS swarm (cluster internal)
#### Firewall Rules Example
```bash
sudo ufw default deny incoming
sudo ufw default allow outgoing
sudo ufw allow 22/tcp comment "SSH"
sudo ufw allow 80/tcp comment "HTTP"
sudo ufw allow 443/tcp comment "HTTPS"
sudo ufw allow 4001/tcp comment "libp2p swarm"
# Cluster-only access for sensitive services
sudo ufw allow from 51.83.128.181 to any port 5001 proto tcp
sudo ufw allow from 194.61.28.7 to any port 5001 proto tcp
sudo ufw allow from 83.171.248.66 to any port 5001 proto tcp
sudo ufw allow from 62.72.44.87 to any port 5001 proto tcp
# (repeat for ports 7001, 7002, 9094, 9098, 3322, 4101)
sudo ufw enable
```
### 2. SSH Hardening
**Location:** `/etc/ssh/sshd_config.d/99-hardening.conf`
**Configuration:**
```bash
PermitRootLogin yes # Root login allowed with SSH keys
PasswordAuthentication yes # Password auth enabled (you have keys configured)
PubkeyAuthentication yes # SSH key authentication enabled
PermitEmptyPasswords no # No empty passwords
X11Forwarding no # X11 disabled for security
MaxAuthTries 3 # Max 3 login attempts
ClientAliveInterval 300 # Keep-alive every 5 minutes
ClientAliveCountMax 2 # Disconnect after 2 failed keep-alives
```
**Your SSH Keys Added:**
- ✅ `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPcGZPX2iHXWO8tuyyDkHPS5eByPOktkw3+ugcw79yQO`
- ✅ `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDgCWmycaBN3aAZJcM2w4+Xi2zrTwN78W8oAiQywvMEkubqNNWHF6I3...`
Both keys are installed on all 4 servers in:
- VPS 1: `/home/ubuntu/.ssh/authorized_keys`
- VPS 2, 3, 4: `/root/.ssh/authorized_keys`
### 3. Fail2ban Protection
**Status:** ✅ Installed and running on all 4 servers
**Purpose:** Automatically bans IPs after failed SSH login attempts
**Check Status:**
```bash
sudo systemctl status fail2ban
```
### 4. Security Updates
**Status:** ✅ All security updates applied (as of Jan 18, 2026)
**Update Command:**
```bash
sudo apt update && sudo apt upgrade -y
```
### 5. Let's Encrypt TLS Certificates
**Status:** ✅ Production certificates (NOT staging)
**Configuration:**
- **Provider:** Let's Encrypt (ACME v2 Production)
- **Auto-renewal:** Enabled via autocert
- **Cache Directory:** `/home/debros/.orama/tls-cache/`
- **Domains:**
- node-kv4la8.debros.network (VPS 1)
- node-7prvNa.debros.network (VPS 2)
- node-xn23dq.debros.network (VPS 3)
- node-nns4n5.debros.network (VPS 4)
**Certificate Files:**
- Account key: `/home/debros/.orama/tls-cache/acme_account+key`
- Certificates auto-managed by autocert
**Verification:**
```bash
curl -I https://node-kv4la8.debros.network
# Should return valid SSL certificate
```
---
## Cluster Configuration
### RQLite Cluster
**Nodes:**
- 51.83.128.181:7002 (Leader)
- 194.61.28.7:7002
- 83.171.248.66:7002
- 62.72.44.87:7002
**Test Cluster Health:**
```bash
ssh ubuntu@51.83.128.181
curl -s http://localhost:5001/status | jq '.store.nodes'
```
**Expected Output:**
```json
[
{"id":"194.61.28.7:7002","addr":"194.61.28.7:7002","suffrage":"Voter"},
{"id":"51.83.128.181:7002","addr":"51.83.128.181:7002","suffrage":"Voter"},
{"id":"62.72.44.87:7002","addr":"62.72.44.87:7002","suffrage":"Voter"},
{"id":"83.171.248.66:7002","addr":"83.171.248.66:7002","suffrage":"Voter"}
]
```
### IPFS Cluster
**Test Cluster Health:**
```bash
ssh ubuntu@51.83.128.181
curl -s http://localhost:9094/id | jq '.cluster_peers'
```
**Expected:** All 4 peer IDs listed
### Olric Cache Cluster
**Port:** 3320 (localhost), 3322 (cluster communication)
**Test:**
```bash
ssh ubuntu@51.83.128.181
ss -tulpn | grep olric
```
---
## Access Credentials
### SSH Access
**VPS 1:**
```bash
ssh ubuntu@51.83.128.181
# OR using your SSH key:
ssh -i ~/.ssh/ssh-sotiris/id_ed25519 ubuntu@51.83.128.181
```
**VPS 2, 3, 4:**
```bash
ssh root@194.61.28.7
ssh root@83.171.248.66
ssh root@62.72.44.87
```
**Important:** Password authentication is still enabled, but your SSH keys are configured for passwordless access.
---
## Testing & Verification
### 1. Test External Port Access (From Your Machine)
```bash
# These should be BLOCKED (timeout or connection refused):
nc -zv 51.83.128.181 5001 # rqlite API - should be blocked
nc -zv 51.83.128.181 7002 # rqlite Raft - should be blocked
nc -zv 51.83.128.181 9094 # IPFS cluster - should be blocked
# These should be OPEN:
nc -zv 51.83.128.181 22 # SSH - should succeed
nc -zv 51.83.128.181 80 # HTTP - should succeed
nc -zv 51.83.128.181 443 # HTTPS - should succeed
nc -zv 51.83.128.181 4001 # libp2p - should succeed
```
### 2. Test Domain Access
```bash
curl -I https://node-kv4la8.debros.network
curl -I https://node-7prvNa.debros.network
curl -I https://node-xn23dq.debros.network
curl -I https://node-nns4n5.debros.network
```
All should return `HTTP/1.1 200 OK` or similar with valid SSL certificates.
### 3. Test Cluster Communication (From VPS 1)
```bash
ssh ubuntu@51.83.128.181
# Test rqlite cluster
curl -s http://localhost:5001/status | jq -r '.store.nodes[].id'
# Test IPFS cluster
curl -s http://localhost:9094/id | jq -r '.cluster_peers[]'
# Check all services running
ps aux | grep -E "(orama-node|rqlited|ipfs|olric)" | grep -v grep
```
---
## Maintenance & Operations
### Firewall Management
**View current rules:**
```bash
sudo ufw status numbered
```
**Add a new allowed IP for cluster services:**
```bash
sudo ufw allow from NEW_IP_ADDRESS to any port 5001 proto tcp
sudo ufw allow from NEW_IP_ADDRESS to any port 7002 proto tcp
# etc.
```
**Delete a rule:**
```bash
sudo ufw status numbered # Get rule number
sudo ufw delete [NUMBER]
```
### SSH Management
**Test SSH config without applying:**
```bash
sudo sshd -t
```
**Reload SSH after config changes:**
```bash
sudo systemctl reload ssh
```
**View SSH login attempts:**
```bash
sudo journalctl -u ssh | tail -50
```
### Fail2ban Management
**Check banned IPs:**
```bash
sudo fail2ban-client status sshd
```
**Unban an IP:**
```bash
sudo fail2ban-client set sshd unbanip IP_ADDRESS
```
### Security Updates
**Check for updates:**
```bash
apt list --upgradable
```
**Apply updates:**
```bash
sudo apt update && sudo apt upgrade -y
```
**Reboot if kernel updated:**
```bash
sudo reboot
```
---
## Security Improvements Completed
### Before Security Audit:
- ❌ No firewall enabled
- ❌ rqlite database exposed to internet (port 5001, 7002)
- ❌ IPFS cluster management exposed (port 9094, 9098)
- ❌ Olric cache exposed (port 3322)
- ❌ Root login enabled without restrictions (VPS 2, 3, 4)
- ❌ No fail2ban on 3 out of 4 servers
- ❌ 19-39 security updates pending
### After Security Hardening:
- ✅ UFW firewall enabled on all servers
- ✅ Sensitive ports restricted to cluster IPs only
- ✅ SSH hardened with key authentication
- ✅ Fail2ban protecting all servers
- ✅ All security updates applied
- ✅ Let's Encrypt production certificates verified
- ✅ Cluster communication tested and working
- ✅ External access verified (HTTP/HTTPS only)
---
## Recommended Next Steps (Optional)
These were not implemented per your request but are recommended for future consideration:
1. **VPN/Private Networking** - Use WireGuard or Tailscale for encrypted cluster communication instead of firewall rules
2. **Automated Security Updates** - Enable unattended-upgrades for automatic security patches
3. **Monitoring & Alerting** - Set up Prometheus/Grafana for service monitoring
4. **Regular Security Audits** - Run `lynis` or `rkhunter` monthly for security checks
---
## Important Notes
### Let's Encrypt Configuration
The Orama Network gateway uses **autocert** from Go's `golang.org/x/crypto/acme/autocert` package. The configuration is in:
**File:** `/home/debros/.orama/configs/node.yaml`
**Relevant settings:**
```yaml
http_gateway:
https:
enabled: true
domain: "node-kv4la8.debros.network"
auto_cert: true
cache_dir: "/home/debros/.orama/tls-cache"
http_port: 80
https_port: 443
email: "admin@node-kv4la8.debros.network"
```
**Important:** There is NO `letsencrypt_staging` flag set, which means it defaults to **production Let's Encrypt**. This is correct for production deployment.
### Firewall Persistence
UFW rules are persistent across reboots. The firewall will automatically start on boot.
### SSH Key Access
Both of your SSH keys are configured on all servers. You can access:
- VPS 1: `ssh -i ~/.ssh/ssh-sotiris/id_ed25519 ubuntu@51.83.128.181`
- VPS 2-4: `ssh -i ~/.ssh/ssh-sotiris/id_ed25519 root@IP_ADDRESS`
Password authentication is still enabled as a fallback, but keys are recommended.
---
## Emergency Access
If you get locked out:
1. **VPS Provider Console:** All major VPS providers offer web-based console access
2. **Password Access:** Password auth is still enabled on all servers
3. **SSH Keys:** Two keys configured for redundancy
**Disable firewall temporarily (emergency only):**
```bash
sudo ufw disable
# Fix the issue
sudo ufw enable
```
---
## Verification Checklist
Use this checklist to verify the security hardening:
- [ ] All 4 servers have UFW firewall enabled
- [ ] SSH is hardened (MaxAuthTries 3, X11Forwarding no)
- [ ] Your SSH keys work on all servers
- [ ] Fail2ban is running on all servers
- [ ] Security updates are current
- [ ] rqlite port 5001 is NOT accessible from internet
- [ ] rqlite port 7002 is NOT accessible from internet
- [ ] IPFS cluster ports 9094, 9098 are NOT accessible from internet
- [ ] Domains are accessible via HTTPS with valid certificates
- [ ] RQLite cluster shows all 4 nodes
- [ ] IPFS cluster shows all 4 peers
- [ ] All services are running (5 processes per server)
---
## Contact & Support
For issues or questions about this deployment:
- **Security Audit Date:** January 18, 2026
- **Configuration Files:** `/home/debros/.orama/configs/`
- **Firewall Rules:** `/etc/ufw/`
- **SSH Config:** `/etc/ssh/sshd_config.d/99-hardening.conf`
- **TLS Certs:** `/home/debros/.orama/tls-cache/`
---
## Changelog
### January 18, 2026 - Production Security Hardening
**Changes:**
1. Added UFW firewall rules on all 4 VPS servers
2. Restricted sensitive ports (5001, 7002, 9094, 9098, 3322, 4101) to cluster IPs only
3. Hardened SSH configuration
4. Added your 2 SSH keys to all servers
5. Installed fail2ban on VPS 1, 2, 3 (VPS 4 already had it)
6. Applied all pending security updates (23-39 packages per server)
7. Verified Let's Encrypt is using production (not staging)
8. Tested all services: rqlite, IPFS, libp2p, Olric clusters
9. Verified all 4 domains are accessible via HTTPS
**Result:** Production-ready secure deployment ✅
---
**END OF DEPLOYMENT GUIDE**

View File

@ -1,294 +0,0 @@
//go:build e2e
package e2e
import (
"context"
"net/http"
"testing"
"time"
"unicode"
)
func TestAuth_MissingAPIKey(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request without auth headers
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/network/status", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for missing auth, got %d (auth may not be enforced on this endpoint)", resp.StatusCode)
}
}
func TestAuth_InvalidAPIKey(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with invalid API key
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer invalid-key-xyz")
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for invalid key, got %d", resp.StatusCode)
}
}
func TestAuth_CacheWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request cache endpoint without auth
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/cache/health",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
// Should fail with 401 or 403
if status != http.StatusUnauthorized && status != http.StatusForbidden {
t.Logf("warning: expected 401/403 for cache without auth, got %d", status)
}
}
func TestAuth_StorageWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request storage endpoint without auth
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/storage/status/QmTest",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
// Should fail with 401 or 403
if status != http.StatusUnauthorized && status != http.StatusForbidden {
t.Logf("warning: expected 401/403 for storage without auth, got %d", status)
}
}
func TestAuth_RQLiteWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request rqlite endpoint without auth
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/rqlite/schema",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
// Should fail with 401 or 403
if status != http.StatusUnauthorized && status != http.StatusForbidden {
t.Logf("warning: expected 401/403 for rqlite without auth, got %d", status)
}
}
func TestAuth_MalformedBearerToken(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with malformed bearer token
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
// Missing "Bearer " prefix
req.Header.Set("Authorization", "invalid-token-format")
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for malformed token, got %d", resp.StatusCode)
}
}
func TestAuth_ExpiredJWT(t *testing.T) {
// Skip if JWT is not being used
if GetJWT() == "" && GetAPIKey() == "" {
t.Skip("No JWT or API key configured")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// This test would require an expired JWT token
// For now, test with a clearly invalid JWT structure
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer expired.jwt.token")
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for expired JWT, got %d", resp.StatusCode)
}
}
func TestAuth_EmptyBearerToken(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with empty bearer token
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer ")
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should be unauthorized
if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden {
t.Logf("warning: expected 401/403 for empty token, got %d", resp.StatusCode)
}
}
func TestAuth_DuplicateAuthHeaders(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with both API key and invalid JWT
req := &HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/cache/health",
Headers: map[string]string{
"Authorization": "Bearer " + GetAPIKey(),
"X-API-Key": GetAPIKey(),
},
}
_, status, err := req.Do(ctx)
if err != nil {
t.Fatalf("request failed: %v", err)
}
// Should succeed if API key is valid
if status != http.StatusOK {
t.Logf("request with both headers returned %d", status)
}
}
func TestAuth_CaseSensitiveAPIKey(t *testing.T) {
if GetAPIKey() == "" {
t.Skip("No API key configured")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with incorrectly cased API key
apiKey := GetAPIKey()
incorrectKey := ""
for i, ch := range apiKey {
if i%2 == 0 && unicode.IsLetter(ch) {
incorrectKey += string(unicode.ToUpper(ch)) // Convert to uppercase
} else {
incorrectKey += string(ch)
}
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Authorization", "Bearer "+incorrectKey)
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// API keys should be case-sensitive
if resp.StatusCode == http.StatusOK {
t.Logf("warning: API key check may not be case-sensitive (got 200)")
}
}
func TestAuth_HealthEndpointNoAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Health endpoint at /health should not require auth
req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/health", nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
client := NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("request failed: %v", err)
}
defer resp.Body.Close()
// Should succeed without auth
if resp.StatusCode != http.StatusOK {
t.Fatalf("expected 200 for /health without auth, got %d", resp.StatusCode)
}
}

View File

@ -1,6 +1,6 @@
//go:build e2e
package e2e
package cluster_test
import (
"bytes"
@ -10,16 +10,22 @@ import (
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/DeBrosOfficial/network/pkg/ipfs"
)
// Note: These tests connect directly to IPFS Cluster API (localhost:9094)
// and IPFS API (localhost:4501). They are for local development only.
// For production testing, use storage_http_test.go which uses gateway endpoints.
func TestIPFSCluster_Health(t *testing.T) {
e2e.SkipIfProduction(t) // Direct IPFS connection not available in production
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 10 * time.Second,
}
@ -35,12 +41,13 @@ func TestIPFSCluster_Health(t *testing.T) {
}
func TestIPFSCluster_GetPeerCount(t *testing.T) {
e2e.SkipIfProduction(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 10 * time.Second,
}
@ -62,12 +69,13 @@ func TestIPFSCluster_GetPeerCount(t *testing.T) {
}
func TestIPFSCluster_AddFile(t *testing.T) {
e2e.SkipIfProduction(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
@ -94,12 +102,13 @@ func TestIPFSCluster_AddFile(t *testing.T) {
}
func TestIPFSCluster_PinFile(t *testing.T) {
e2e.SkipIfProduction(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
@ -131,12 +140,13 @@ func TestIPFSCluster_PinFile(t *testing.T) {
}
func TestIPFSCluster_PinStatus(t *testing.T) {
e2e.SkipIfProduction(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
@ -164,7 +174,7 @@ func TestIPFSCluster_PinStatus(t *testing.T) {
}
// Give pin time to propagate
Delay(1000)
e2e.Delay(1000)
// Get status
status, err := client.PinStatus(ctx, cid)
@ -188,12 +198,13 @@ func TestIPFSCluster_PinStatus(t *testing.T) {
}
func TestIPFSCluster_UnpinFile(t *testing.T) {
e2e.SkipIfProduction(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
@ -226,12 +237,13 @@ func TestIPFSCluster_UnpinFile(t *testing.T) {
}
func TestIPFSCluster_GetFile(t *testing.T) {
e2e.SkipIfProduction(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
@ -250,10 +262,10 @@ func TestIPFSCluster_GetFile(t *testing.T) {
cid := addResult.Cid
// Give time for propagation
Delay(1000)
e2e.Delay(1000)
// Get file
rc, err := client.Get(ctx, cid, GetIPFSAPIURL())
rc, err := client.Get(ctx, cid, e2e.GetIPFSAPIURL())
if err != nil {
t.Fatalf("get file failed: %v", err)
}
@ -272,12 +284,13 @@ func TestIPFSCluster_GetFile(t *testing.T) {
}
func TestIPFSCluster_LargeFile(t *testing.T) {
e2e.SkipIfProduction(t)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 60 * time.Second,
}
@ -305,12 +318,13 @@ func TestIPFSCluster_LargeFile(t *testing.T) {
}
func TestIPFSCluster_ReplicationFactor(t *testing.T) {
e2e.SkipIfProduction(t) // Direct IPFS connection not available in production
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}
@ -340,7 +354,7 @@ func TestIPFSCluster_ReplicationFactor(t *testing.T) {
}
// Give time for replication
Delay(2000)
e2e.Delay(2000)
// Check status
status, err := client.PinStatus(ctx, cid)
@ -352,12 +366,13 @@ func TestIPFSCluster_ReplicationFactor(t *testing.T) {
}
func TestIPFSCluster_MultipleFiles(t *testing.T) {
e2e.SkipIfProduction(t) // Direct IPFS connection not available in production
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
logger := NewTestLogger(t)
logger := e2e.NewTestLogger(t)
cfg := ipfs.Config{
ClusterAPIURL: GetIPFSClusterURL(),
ClusterAPIURL: e2e.GetIPFSClusterURL(),
Timeout: 30 * time.Second,
}

View File

@ -1,6 +1,6 @@
//go:build e2e
package e2e
package cluster_test
import (
"context"
@ -8,25 +8,27 @@ import (
"strings"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
)
func TestLibP2P_PeerConnectivity(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create and connect client
c := NewNetworkClient(t)
c := e2e.NewNetworkClient(t)
if err := c.Connect(); err != nil {
t.Fatalf("connect failed: %v", err)
}
defer c.Disconnect()
// Verify peer connectivity through the gateway
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
URL: e2e.GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
@ -39,7 +41,7 @@ func TestLibP2P_PeerConnectivity(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -50,30 +52,30 @@ func TestLibP2P_PeerConnectivity(t *testing.T) {
}
func TestLibP2P_BootstrapPeers(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
bootstrapPeers := GetBootstrapPeers()
bootstrapPeers := e2e.GetBootstrapPeers()
if len(bootstrapPeers) == 0 {
t.Skipf("E2E_BOOTSTRAP_PEERS not set; skipping")
}
// Create client with bootstrap peers explicitly set
c := NewNetworkClient(t)
c := e2e.NewNetworkClient(t)
if err := c.Connect(); err != nil {
t.Fatalf("connect failed: %v", err)
}
defer c.Disconnect()
// Give peer discovery time
Delay(2000)
e2e.Delay(2000)
// Verify we're connected (check via gateway status)
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
URL: e2e.GetGatewayURL() + "/v1/network/status",
}
body, status, err := req.Do(ctx)
@ -86,7 +88,7 @@ func TestLibP2P_BootstrapPeers(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -96,15 +98,15 @@ func TestLibP2P_BootstrapPeers(t *testing.T) {
}
func TestLibP2P_MultipleClientConnections(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create multiple clients
c1 := NewNetworkClient(t)
c2 := NewNetworkClient(t)
c3 := NewNetworkClient(t)
c1 := e2e.NewNetworkClient(t)
c2 := e2e.NewNetworkClient(t)
c3 := e2e.NewNetworkClient(t)
if err := c1.Connect(); err != nil {
t.Fatalf("c1 connect failed: %v", err)
@ -122,12 +124,12 @@ func TestLibP2P_MultipleClientConnections(t *testing.T) {
defer c3.Disconnect()
// Give peer discovery time
Delay(2000)
e2e.Delay(2000)
// Verify gateway sees multiple peers
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
URL: e2e.GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
@ -140,7 +142,7 @@ func TestLibP2P_MultipleClientConnections(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -151,12 +153,12 @@ func TestLibP2P_MultipleClientConnections(t *testing.T) {
}
func TestLibP2P_ReconnectAfterDisconnect(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
c := NewNetworkClient(t)
c := e2e.NewNetworkClient(t)
// Connect
if err := c.Connect(); err != nil {
@ -164,9 +166,9 @@ func TestLibP2P_ReconnectAfterDisconnect(t *testing.T) {
}
// Verify connected via gateway
req1 := &HTTPRequest{
req1 := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
URL: e2e.GetGatewayURL() + "/v1/network/status",
}
_, status1, err := req1.Do(ctx)
@ -180,7 +182,7 @@ func TestLibP2P_ReconnectAfterDisconnect(t *testing.T) {
}
// Give time for disconnect to propagate
Delay(500)
e2e.Delay(500)
// Reconnect
if err := c.Connect(); err != nil {
@ -189,9 +191,9 @@ func TestLibP2P_ReconnectAfterDisconnect(t *testing.T) {
defer c.Disconnect()
// Verify connected via gateway again
req2 := &HTTPRequest{
req2 := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
URL: e2e.GetGatewayURL() + "/v1/network/status",
}
_, status2, err := req2.Do(ctx)
@ -201,25 +203,25 @@ func TestLibP2P_ReconnectAfterDisconnect(t *testing.T) {
}
func TestLibP2P_PeerDiscovery(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create client
c := NewNetworkClient(t)
c := e2e.NewNetworkClient(t)
if err := c.Connect(); err != nil {
t.Fatalf("connect failed: %v", err)
}
defer c.Disconnect()
// Give peer discovery time
Delay(3000)
e2e.Delay(3000)
// Get peer list
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
URL: e2e.GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
@ -232,7 +234,7 @@ func TestLibP2P_PeerDiscovery(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -251,22 +253,22 @@ func TestLibP2P_PeerDiscovery(t *testing.T) {
}
func TestLibP2P_PeerAddressFormat(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Create client
c := NewNetworkClient(t)
c := e2e.NewNetworkClient(t)
if err := c.Connect(); err != nil {
t.Fatalf("connect failed: %v", err)
}
defer c.Disconnect()
// Get peer list
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
URL: e2e.GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
@ -279,7 +281,7 @@ func TestLibP2P_PeerAddressFormat(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}

View File

@ -0,0 +1,556 @@
//go:build e2e
package cluster_test
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/require"
)
// =============================================================================
// STRICT NAMESPACE CLUSTER TESTS
// These tests FAIL if things don't work. No t.Skip() for expected functionality.
// =============================================================================
// TestNamespaceCluster_FullProvisioning is a STRICT test that verifies the complete
// namespace cluster provisioning flow. This test FAILS if any component doesn't work.
func TestNamespaceCluster_FullProvisioning(t *testing.T) {
// Generate unique namespace name
newNamespace := fmt.Sprintf("e2e-cluster-%d", time.Now().UnixNano())
env, err := e2e.LoadTestEnvWithNamespace(newNamespace)
require.NoError(t, err, "FATAL: Failed to create test environment for namespace %s", newNamespace)
require.NotEmpty(t, env.APIKey, "FATAL: No API key received - namespace provisioning failed")
t.Logf("Created namespace: %s", newNamespace)
t.Logf("API Key: %s...", env.APIKey[:min(20, len(env.APIKey))])
// Get cluster status to verify provisioning
t.Run("Cluster status shows ready", func(t *testing.T) {
// Query the namespace cluster status
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/namespace/status?name="+newNamespace, nil)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Failed to query cluster status")
defer resp.Body.Close()
bodyBytes, _ := io.ReadAll(resp.Body)
t.Logf("Cluster status response: %s", string(bodyBytes))
// If status endpoint exists and returns cluster info, verify it
if resp.StatusCode == http.StatusOK {
var result map[string]interface{}
if err := json.Unmarshal(bodyBytes, &result); err == nil {
status, _ := result["status"].(string)
if status != "" && status != "ready" && status != "default" {
t.Errorf("FAIL: Cluster status is '%s', expected 'ready'", status)
}
}
}
})
// Verify we can use the namespace for deployments
t.Run("Deployments work on namespace", func(t *testing.T) {
tarballPath := filepath.Join("../../testdata/apps/react-app")
if _, err := os.Stat(tarballPath); os.IsNotExist(err) {
t.Skip("Test tarball not found - skipping deployment test")
}
deploymentName := fmt.Sprintf("cluster-test-%d", time.Now().Unix())
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID, "FAIL: Deployment creation failed on namespace cluster")
t.Logf("Created deployment %s (ID: %s) on namespace %s", deploymentName, deploymentID, newNamespace)
// Cleanup
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Verify deployment is accessible
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/get?id="+deploymentID, nil)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Failed to get deployment")
defer resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode, "FAIL: Cannot retrieve deployment from namespace cluster")
})
}
// TestNamespaceCluster_RQLiteHealth verifies that namespace RQLite cluster is running
// and accepting connections. This test FAILS if RQLite is not accessible.
func TestNamespaceCluster_RQLiteHealth(t *testing.T) {
t.Run("Check namespace port range for RQLite", func(t *testing.T) {
foundRQLite := false
var healthyPorts []int
var unhealthyPorts []int
// Check first few port blocks
for portStart := 10000; portStart <= 10015; portStart += 5 {
rqlitePort := portStart // RQLite HTTP is first port in block
if isPortListening("localhost", rqlitePort) {
t.Logf("Found RQLite instance on port %d", rqlitePort)
foundRQLite = true
// Verify it responds to health check
healthURL := fmt.Sprintf("http://localhost:%d/status", rqlitePort)
healthResp, err := http.Get(healthURL)
if err == nil {
defer healthResp.Body.Close()
if healthResp.StatusCode == http.StatusOK {
healthyPorts = append(healthyPorts, rqlitePort)
t.Logf(" ✓ RQLite on port %d is healthy", rqlitePort)
} else {
unhealthyPorts = append(unhealthyPorts, rqlitePort)
t.Errorf("FAIL: RQLite on port %d returned status %d", rqlitePort, healthResp.StatusCode)
}
} else {
unhealthyPorts = append(unhealthyPorts, rqlitePort)
t.Errorf("FAIL: RQLite on port %d health check failed: %v", rqlitePort, err)
}
}
}
if !foundRQLite {
t.Log("No namespace RQLite instances found in port range 10000-10015")
t.Log("This is expected if no namespaces have been provisioned yet")
} else {
t.Logf("Summary: %d healthy, %d unhealthy RQLite instances", len(healthyPorts), len(unhealthyPorts))
require.Empty(t, unhealthyPorts, "FAIL: Some RQLite instances are unhealthy")
}
})
}
// TestNamespaceCluster_OlricHealth verifies that namespace Olric cluster is running
// and accepting connections.
func TestNamespaceCluster_OlricHealth(t *testing.T) {
t.Run("Check namespace port range for Olric", func(t *testing.T) {
foundOlric := false
foundCount := 0
// Check first few port blocks - Olric memberlist is port_start + 3
for portStart := 10000; portStart <= 10015; portStart += 5 {
olricMemberlistPort := portStart + 3
if isPortListening("localhost", olricMemberlistPort) {
t.Logf("Found Olric memberlist on port %d", olricMemberlistPort)
foundOlric = true
foundCount++
}
}
if !foundOlric {
t.Log("No namespace Olric instances found in port range 10003-10018")
t.Log("This is expected if no namespaces have been provisioned yet")
} else {
t.Logf("Found %d Olric memberlist ports accepting connections", foundCount)
}
})
}
// TestNamespaceCluster_GatewayHealth verifies that namespace Gateway instances are running.
// This test FAILS if gateway binary exists but gateways don't spawn.
func TestNamespaceCluster_GatewayHealth(t *testing.T) {
// Check if gateway binary exists
gatewayBinaryPaths := []string{
"./bin/orama",
"../bin/orama",
"/usr/local/bin/orama",
}
var gatewayBinaryExists bool
var foundPath string
for _, path := range gatewayBinaryPaths {
if _, err := os.Stat(path); err == nil {
gatewayBinaryExists = true
foundPath = path
break
}
}
if !gatewayBinaryExists {
t.Log("Gateway binary not found - namespace gateways will not spawn")
t.Log("Run 'make build' to build the gateway binary")
t.Log("Checked paths:", gatewayBinaryPaths)
// This is a FAILURE if we expect gateway to work
t.Error("FAIL: Gateway binary not found. Run 'make build' first.")
return
}
t.Logf("Gateway binary found at: %s", foundPath)
t.Run("Check namespace port range for Gateway", func(t *testing.T) {
foundGateway := false
var healthyPorts []int
var unhealthyPorts []int
// Check first few port blocks - Gateway HTTP is port_start + 4
for portStart := 10000; portStart <= 10015; portStart += 5 {
gatewayPort := portStart + 4
if isPortListening("localhost", gatewayPort) {
t.Logf("Found Gateway instance on port %d", gatewayPort)
foundGateway = true
// Verify it responds to health check
healthURL := fmt.Sprintf("http://localhost:%d/v1/health", gatewayPort)
healthResp, err := http.Get(healthURL)
if err == nil {
defer healthResp.Body.Close()
if healthResp.StatusCode == http.StatusOK {
healthyPorts = append(healthyPorts, gatewayPort)
t.Logf(" ✓ Gateway on port %d is healthy", gatewayPort)
} else {
unhealthyPorts = append(unhealthyPorts, gatewayPort)
t.Errorf("FAIL: Gateway on port %d returned status %d", gatewayPort, healthResp.StatusCode)
}
} else {
unhealthyPorts = append(unhealthyPorts, gatewayPort)
t.Errorf("FAIL: Gateway on port %d health check failed: %v", gatewayPort, err)
}
}
}
if !foundGateway {
t.Log("No namespace Gateway instances found in port range 10004-10019")
t.Log("This is expected if no namespaces have been provisioned yet")
} else {
t.Logf("Summary: %d healthy, %d unhealthy Gateway instances", len(healthyPorts), len(unhealthyPorts))
require.Empty(t, unhealthyPorts, "FAIL: Some Gateway instances are unhealthy")
}
})
}
// TestNamespaceCluster_ProvisioningCreatesProcesses creates a new namespace and
// verifies that actual processes are spawned. This is the STRICTEST test.
func TestNamespaceCluster_ProvisioningCreatesProcesses(t *testing.T) {
newNamespace := fmt.Sprintf("e2e-strict-%d", time.Now().UnixNano())
// Record ports before provisioning
portsBefore := getListeningPortsInRange(10000, 10099)
t.Logf("Ports in use before provisioning: %v", portsBefore)
// Create namespace
env, err := e2e.LoadTestEnvWithNamespace(newNamespace)
require.NoError(t, err, "FATAL: Failed to create namespace")
require.NotEmpty(t, env.APIKey, "FATAL: No API key - provisioning failed")
t.Logf("Namespace '%s' created successfully", newNamespace)
// Wait a moment for processes to fully start
time.Sleep(3 * time.Second)
// Record ports after provisioning
portsAfter := getListeningPortsInRange(10000, 10099)
t.Logf("Ports in use after provisioning: %v", portsAfter)
// Check if new ports were opened
newPorts := diffPorts(portsBefore, portsAfter)
sort.Ints(newPorts)
t.Logf("New ports opened: %v", newPorts)
t.Run("New ports allocated for namespace cluster", func(t *testing.T) {
if len(newPorts) == 0 {
// This might be OK for default namespace or if using global cluster
t.Log("No new ports detected")
t.Log("Possible reasons:")
t.Log(" - Namespace uses default cluster (expected for 'default')")
t.Log(" - Cluster already existed from previous test")
t.Log(" - Provisioning is handled differently in this environment")
} else {
t.Logf("SUCCESS: %d new ports opened for namespace cluster", len(newPorts))
// Verify the ports follow expected pattern
for _, port := range newPorts {
offset := (port - 10000) % 5
switch offset {
case 0:
t.Logf(" Port %d: RQLite HTTP", port)
case 1:
t.Logf(" Port %d: RQLite Raft", port)
case 2:
t.Logf(" Port %d: Olric HTTP", port)
case 3:
t.Logf(" Port %d: Olric Memberlist", port)
case 4:
t.Logf(" Port %d: Gateway HTTP", port)
}
}
}
})
t.Run("RQLite is accessible on allocated ports", func(t *testing.T) {
rqlitePorts := filterPortsByOffset(newPorts, 0) // RQLite HTTP is offset 0
if len(rqlitePorts) == 0 {
t.Log("No new RQLite ports detected")
return
}
for _, port := range rqlitePorts {
healthURL := fmt.Sprintf("http://localhost:%d/status", port)
resp, err := http.Get(healthURL)
require.NoError(t, err, "FAIL: RQLite on port %d is not responding", port)
resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode,
"FAIL: RQLite on port %d returned status %d", port, resp.StatusCode)
t.Logf("✓ RQLite on port %d is healthy", port)
}
})
t.Run("Olric is accessible on allocated ports", func(t *testing.T) {
olricPorts := filterPortsByOffset(newPorts, 3) // Olric Memberlist is offset 3
if len(olricPorts) == 0 {
t.Log("No new Olric ports detected")
return
}
for _, port := range olricPorts {
conn, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", port), 2*time.Second)
require.NoError(t, err, "FAIL: Olric memberlist on port %d is not responding", port)
conn.Close()
t.Logf("✓ Olric memberlist on port %d is accepting connections", port)
}
})
}
// TestNamespaceCluster_StatusEndpoint tests the /v1/namespace/status endpoint
func TestNamespaceCluster_StatusEndpoint(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
t.Run("Status endpoint returns 404 for non-existent cluster", func(t *testing.T) {
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/namespace/status?id=non-existent-id", nil)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Request should not fail")
defer resp.Body.Close()
require.Equal(t, http.StatusNotFound, resp.StatusCode,
"FAIL: Should return 404 for non-existent cluster, got %d", resp.StatusCode)
})
}
// TestNamespaceCluster_CrossNamespaceAccess verifies namespace isolation
func TestNamespaceCluster_CrossNamespaceAccess(t *testing.T) {
nsA := fmt.Sprintf("ns-a-%d", time.Now().Unix())
nsB := fmt.Sprintf("ns-b-%d", time.Now().Unix())
envA, err := e2e.LoadTestEnvWithNamespace(nsA)
require.NoError(t, err, "FAIL: Cannot create namespace A")
envB, err := e2e.LoadTestEnvWithNamespace(nsB)
require.NoError(t, err, "FAIL: Cannot create namespace B")
// Verify both namespaces have different API keys
require.NotEqual(t, envA.APIKey, envB.APIKey, "FAIL: Namespaces should have different API keys")
t.Logf("Namespace A API key: %s...", envA.APIKey[:min(10, len(envA.APIKey))])
t.Logf("Namespace B API key: %s...", envB.APIKey[:min(10, len(envB.APIKey))])
t.Run("API keys are namespace-scoped", func(t *testing.T) {
// Namespace A should not see namespace B's resources
req, _ := http.NewRequest("GET", envA.GatewayURL+"/v1/deployments/list", nil)
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
resp, err := envA.HTTPClient.Do(req)
require.NoError(t, err, "Request failed")
defer resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode, "Should list deployments")
var result map[string]interface{}
bodyBytes, _ := io.ReadAll(resp.Body)
json.Unmarshal(bodyBytes, &result)
deployments, _ := result["deployments"].([]interface{})
for _, d := range deployments {
dep, ok := d.(map[string]interface{})
if !ok {
continue
}
ns, _ := dep["namespace"].(string)
require.NotEqual(t, nsB, ns,
"FAIL: Namespace A sees Namespace B deployments - isolation broken!")
}
})
}
// TestDeployment_SubdomainFormat tests deployment subdomain format
func TestDeployment_SubdomainFormat(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
tarballPath := filepath.Join("../../testdata/apps/react-app")
if _, err := os.Stat(tarballPath); os.IsNotExist(err) {
t.Skip("Test tarball not found")
}
deploymentName := fmt.Sprintf("subdomain-test-%d", time.Now().UnixNano())
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID, "FAIL: Deployment creation failed")
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Deployment has subdomain with random suffix", func(t *testing.T) {
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/get?id="+deploymentID, nil)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Failed to get deployment")
defer resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode, "Should get deployment")
var result map[string]interface{}
bodyBytes, _ := io.ReadAll(resp.Body)
json.Unmarshal(bodyBytes, &result)
deployment, ok := result["deployment"].(map[string]interface{})
if !ok {
deployment = result
}
subdomain, _ := deployment["subdomain"].(string)
if subdomain != "" {
require.True(t, strings.HasPrefix(subdomain, deploymentName),
"FAIL: Subdomain '%s' should start with deployment name '%s'", subdomain, deploymentName)
suffix := strings.TrimPrefix(subdomain, deploymentName+"-")
if suffix != subdomain { // There was a dash separator
require.Equal(t, 6, len(suffix),
"FAIL: Random suffix should be 6 characters, got %d (%s)", len(suffix), suffix)
}
t.Logf("Deployment subdomain: %s", subdomain)
}
})
}
// TestNamespaceCluster_PortAllocation tests port allocation correctness
func TestNamespaceCluster_PortAllocation(t *testing.T) {
t.Run("Port range is 10000-10099", func(t *testing.T) {
const portRangeStart = 10000
const portRangeEnd = 10099
const portsPerNamespace = 5
const maxNamespacesPerNode = 20
totalPorts := portRangeEnd - portRangeStart + 1
require.Equal(t, 100, totalPorts, "Port range should be 100 ports")
expectedMax := totalPorts / portsPerNamespace
require.Equal(t, maxNamespacesPerNode, expectedMax,
"Max namespaces per node calculation mismatch")
})
t.Run("Port assignments are sequential within block", func(t *testing.T) {
portStart := 10000
ports := map[string]int{
"rqlite_http": portStart + 0,
"rqlite_raft": portStart + 1,
"olric_http": portStart + 2,
"olric_memberlist": portStart + 3,
"gateway_http": portStart + 4,
}
seen := make(map[int]bool)
for name, port := range ports {
require.False(t, seen[port], "FAIL: Port %d for %s is duplicate", port, name)
seen[port] = true
}
})
}
// =============================================================================
// HELPER FUNCTIONS
// =============================================================================
func isPortListening(host string, port int) bool {
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", host, port), 1*time.Second)
if err != nil {
return false
}
conn.Close()
return true
}
func getListeningPortsInRange(start, end int) []int {
var ports []int
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Check ports concurrently for speed
results := make(chan int, end-start+1)
for port := start; port <= end; port++ {
go func(p int) {
select {
case <-ctx.Done():
results <- 0
return
default:
if isPortListening("localhost", p) {
results <- p
} else {
results <- 0
}
}
}(port)
}
for i := 0; i <= end-start; i++ {
if port := <-results; port > 0 {
ports = append(ports, port)
}
}
return ports
}
func diffPorts(before, after []int) []int {
beforeMap := make(map[int]bool)
for _, p := range before {
beforeMap[p] = true
}
var newPorts []int
for _, p := range after {
if !beforeMap[p] {
newPorts = append(newPorts, p)
}
}
return newPorts
}
func filterPortsByOffset(ports []int, offset int) []int {
var filtered []int
for _, p := range ports {
if (p-10000)%5 == offset {
filtered = append(filtered, p)
}
}
return filtered
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -0,0 +1,447 @@
//go:build e2e
package cluster_test
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestNamespaceIsolation creates two namespaces once and runs all isolation
// subtests against them. This keeps namespace usage to 2 regardless of how
// many isolation scenarios we test.
func TestNamespaceIsolation(t *testing.T) {
envA, err := e2e.LoadTestEnvWithNamespace("namespace-a-" + fmt.Sprintf("%d", time.Now().Unix()))
require.NoError(t, err, "Failed to create namespace A environment")
envB, err := e2e.LoadTestEnvWithNamespace("namespace-b-" + fmt.Sprintf("%d", time.Now().Unix()))
require.NoError(t, err, "Failed to create namespace B environment")
t.Run("Deployments", func(t *testing.T) {
testNamespaceIsolationDeployments(t, envA, envB)
})
t.Run("SQLiteDatabases", func(t *testing.T) {
testNamespaceIsolationSQLiteDatabases(t, envA, envB)
})
t.Run("IPFSContent", func(t *testing.T) {
testNamespaceIsolationIPFSContent(t, envA, envB)
})
t.Run("OlricCache", func(t *testing.T) {
testNamespaceIsolationOlricCache(t, envA, envB)
})
}
func testNamespaceIsolationDeployments(t *testing.T, envA, envB *e2e.E2ETestEnv) {
tarballPath := filepath.Join("../../testdata/apps/react-app")
// Create deployment in namespace-a
deploymentNameA := "test-app-ns-a"
deploymentIDA := e2e.CreateTestDeployment(t, envA, deploymentNameA, tarballPath)
defer func() {
if !envA.SkipCleanup {
e2e.DeleteDeployment(t, envA, deploymentIDA)
}
}()
// Create deployment in namespace-b
deploymentNameB := "test-app-ns-b"
deploymentIDB := e2e.CreateTestDeployment(t, envB, deploymentNameB, tarballPath)
defer func() {
if !envB.SkipCleanup {
e2e.DeleteDeployment(t, envB, deploymentIDB)
}
}()
t.Run("Namespace-A cannot list Namespace-B deployments", func(t *testing.T) {
req, _ := http.NewRequest("GET", envA.GatewayURL+"/v1/deployments/list", nil)
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
resp, err := envA.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
var result map[string]interface{}
bodyBytes, _ := io.ReadAll(resp.Body)
require.NoError(t, json.Unmarshal(bodyBytes, &result), "Should decode JSON")
deployments, ok := result["deployments"].([]interface{})
require.True(t, ok, "Deployments should be an array")
// Should only see namespace-a deployments
for _, d := range deployments {
dep, ok := d.(map[string]interface{})
if !ok {
continue
}
assert.NotEqual(t, deploymentNameB, dep["name"], "Should not see namespace-b deployment")
}
t.Logf("✓ Namespace A cannot see Namespace B deployments")
})
t.Run("Namespace-A cannot access Namespace-B deployment by ID", func(t *testing.T) {
req, _ := http.NewRequest("GET", envA.GatewayURL+"/v1/deployments/get?id="+deploymentIDB, nil)
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
resp, err := envA.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
// Should return 404 or 403
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
"Should block cross-namespace access")
t.Logf("✓ Namespace A cannot access Namespace B deployment (status: %d)", resp.StatusCode)
})
t.Run("Namespace-A cannot delete Namespace-B deployment", func(t *testing.T) {
req, _ := http.NewRequest("DELETE", envA.GatewayURL+"/v1/deployments/delete?id="+deploymentIDB, nil)
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
resp, err := envA.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
"Should block cross-namespace deletion")
// Verify deployment still exists for namespace-b
req2, _ := http.NewRequest("GET", envB.GatewayURL+"/v1/deployments/get?id="+deploymentIDB, nil)
req2.Header.Set("Authorization", "Bearer "+envB.APIKey)
resp2, err := envB.HTTPClient.Do(req2)
require.NoError(t, err, "Should execute request")
defer resp2.Body.Close()
assert.Equal(t, http.StatusOK, resp2.StatusCode, "Deployment should still exist in namespace B")
t.Logf("✓ Namespace A cannot delete Namespace B deployment")
})
}
func testNamespaceIsolationSQLiteDatabases(t *testing.T, envA, envB *e2e.E2ETestEnv) {
// Create database in namespace-a
dbNameA := "users-db-a"
e2e.CreateSQLiteDB(t, envA, dbNameA)
defer func() {
if !envA.SkipCleanup {
e2e.DeleteSQLiteDB(t, envA, dbNameA)
}
}()
// Create database in namespace-b
dbNameB := "users-db-b"
e2e.CreateSQLiteDB(t, envB, dbNameB)
defer func() {
if !envB.SkipCleanup {
e2e.DeleteSQLiteDB(t, envB, dbNameB)
}
}()
t.Run("Namespace-A cannot list Namespace-B databases", func(t *testing.T) {
req, _ := http.NewRequest("GET", envA.GatewayURL+"/v1/db/sqlite/list", nil)
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
resp, err := envA.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
var result map[string]interface{}
bodyBytes, _ := io.ReadAll(resp.Body)
require.NoError(t, json.Unmarshal(bodyBytes, &result), "Should decode JSON")
databases, ok := result["databases"].([]interface{})
require.True(t, ok, "Databases should be an array")
for _, db := range databases {
database, ok := db.(map[string]interface{})
if !ok {
continue
}
assert.NotEqual(t, dbNameB, database["database_name"], "Should not see namespace-b database")
}
t.Logf("✓ Namespace A cannot see Namespace B databases")
})
t.Run("Namespace-A cannot query Namespace-B database", func(t *testing.T) {
reqBody := map[string]interface{}{
"database_name": dbNameB,
"query": "SELECT * FROM users",
}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/db/sqlite/query", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := envA.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Equal(t, http.StatusNotFound, resp.StatusCode, "Should block cross-namespace query")
t.Logf("✓ Namespace A cannot query Namespace B database")
})
t.Run("Namespace-A cannot backup Namespace-B database", func(t *testing.T) {
reqBody := map[string]string{"database_name": dbNameB}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/db/sqlite/backup", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := envA.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Equal(t, http.StatusNotFound, resp.StatusCode, "Should block cross-namespace backup")
t.Logf("✓ Namespace A cannot backup Namespace B database")
})
}
func testNamespaceIsolationIPFSContent(t *testing.T, envA, envB *e2e.E2ETestEnv) {
// Upload file in namespace-a
cidA := e2e.UploadTestFile(t, envA, "test-file-a.txt", "Content from namespace A")
defer func() {
if !envA.SkipCleanup {
e2e.UnpinFile(t, envA, cidA)
}
}()
t.Run("Namespace-B cannot GET Namespace-A IPFS content", func(t *testing.T) {
req, _ := http.NewRequest("GET", envB.GatewayURL+"/v1/storage/get/"+cidA, nil)
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
resp, err := envB.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
"Should block cross-namespace IPFS GET")
t.Logf("✓ Namespace B cannot GET Namespace A IPFS content (status: %d)", resp.StatusCode)
})
t.Run("Namespace-B cannot PIN Namespace-A IPFS content", func(t *testing.T) {
reqBody := map[string]string{
"cid": cidA,
"name": "stolen-content",
}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/storage/pin", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := envB.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
"Should block cross-namespace PIN")
t.Logf("✓ Namespace B cannot PIN Namespace A IPFS content (status: %d)", resp.StatusCode)
})
t.Run("Namespace-B cannot UNPIN Namespace-A IPFS content", func(t *testing.T) {
req, _ := http.NewRequest("DELETE", envB.GatewayURL+"/v1/storage/unpin/"+cidA, nil)
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
resp, err := envB.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Contains(t, []int{http.StatusNotFound, http.StatusForbidden}, resp.StatusCode,
"Should block cross-namespace UNPIN")
t.Logf("✓ Namespace B cannot UNPIN Namespace A IPFS content (status: %d)", resp.StatusCode)
})
t.Run("Namespace-A can list only their own IPFS pins", func(t *testing.T) {
t.Skip("List pins endpoint not implemented yet - namespace isolation enforced at GET/PIN/UNPIN levels")
})
}
func testNamespaceIsolationOlricCache(t *testing.T, envA, envB *e2e.E2ETestEnv) {
dmap := "test-cache"
keyA := "user-session-123"
valueA := `{"user_id": "alice", "token": "secret-token-a"}`
t.Run("Namespace-A sets cache key", func(t *testing.T) {
reqBody := map[string]interface{}{
"dmap": dmap,
"key": keyA,
"value": valueA,
"ttl": "300s",
}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/cache/put", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+envA.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := envA.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should set cache key successfully")
t.Logf("✓ Namespace A set cache key")
})
t.Run("Namespace-B cannot GET Namespace-A cache key", func(t *testing.T) {
reqBody := map[string]interface{}{
"dmap": dmap,
"key": keyA,
}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/cache/get", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := envB.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
// Should return 404 (key doesn't exist in namespace-b)
assert.Equal(t, http.StatusNotFound, resp.StatusCode, "Should not find key in different namespace")
t.Logf("✓ Namespace B cannot GET Namespace A cache key")
})
t.Run("Namespace-B cannot DELETE Namespace-A cache key", func(t *testing.T) {
reqBody := map[string]string{
"dmap": dmap,
"key": keyA,
}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/cache/delete", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := envB.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Contains(t, []int{http.StatusOK, http.StatusNotFound}, resp.StatusCode)
// Verify key still exists for namespace-a
reqBody2 := map[string]interface{}{
"dmap": dmap,
"key": keyA,
}
bodyBytes2, _ := json.Marshal(reqBody2)
req2, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/cache/get", bytes.NewReader(bodyBytes2))
req2.Header.Set("Authorization", "Bearer "+envA.APIKey)
req2.Header.Set("Content-Type", "application/json")
resp2, err := envA.HTTPClient.Do(req2)
require.NoError(t, err, "Should execute request")
defer resp2.Body.Close()
assert.Equal(t, http.StatusOK, resp2.StatusCode, "Key should still exist in namespace A")
var result map[string]interface{}
bodyBytes3, _ := io.ReadAll(resp2.Body)
require.NoError(t, json.Unmarshal(bodyBytes3, &result), "Should decode result")
// Parse expected JSON string for comparison
var expectedValue map[string]interface{}
json.Unmarshal([]byte(valueA), &expectedValue)
assert.Equal(t, expectedValue, result["value"], "Value should match")
t.Logf("✓ Namespace B cannot DELETE Namespace A cache key")
})
t.Run("Namespace-B can set same key name in their namespace", func(t *testing.T) {
// Same key name, different namespace should be allowed
valueB := `{"user_id": "bob", "token": "secret-token-b"}`
reqBody := map[string]interface{}{
"dmap": dmap,
"key": keyA, // Same key name as namespace-a
"value": valueB,
"ttl": "300s",
}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/cache/put", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+envB.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := envB.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should set key in namespace B")
// Verify namespace-a still has their value
reqBody2 := map[string]interface{}{
"dmap": dmap,
"key": keyA,
}
bodyBytes2, _ := json.Marshal(reqBody2)
req2, _ := http.NewRequest("POST", envA.GatewayURL+"/v1/cache/get", bytes.NewReader(bodyBytes2))
req2.Header.Set("Authorization", "Bearer "+envA.APIKey)
req2.Header.Set("Content-Type", "application/json")
resp2, _ := envA.HTTPClient.Do(req2)
defer resp2.Body.Close()
var resultA map[string]interface{}
bodyBytesA, _ := io.ReadAll(resp2.Body)
require.NoError(t, json.Unmarshal(bodyBytesA, &resultA), "Should decode result A")
// Parse expected JSON string for comparison
var expectedValueA map[string]interface{}
json.Unmarshal([]byte(valueA), &expectedValueA)
assert.Equal(t, expectedValueA, resultA["value"], "Namespace A value should be unchanged")
// Verify namespace-b has their different value
reqBody3 := map[string]interface{}{
"dmap": dmap,
"key": keyA,
}
bodyBytes3, _ := json.Marshal(reqBody3)
req3, _ := http.NewRequest("POST", envB.GatewayURL+"/v1/cache/get", bytes.NewReader(bodyBytes3))
req3.Header.Set("Authorization", "Bearer "+envB.APIKey)
req3.Header.Set("Content-Type", "application/json")
resp3, _ := envB.HTTPClient.Do(req3)
defer resp3.Body.Close()
var resultB map[string]interface{}
bodyBytesB, _ := io.ReadAll(resp3.Body)
require.NoError(t, json.Unmarshal(bodyBytesB, &resultB), "Should decode result B")
// Parse expected JSON string for comparison
var expectedValueB map[string]interface{}
json.Unmarshal([]byte(valueB), &expectedValueB)
assert.Equal(t, expectedValueB, resultB["value"], "Namespace B value should be different")
t.Logf("✓ Namespace B can set same key name independently")
t.Logf(" - Namespace A value: %s", valueA)
t.Logf(" - Namespace B value: %s", valueB)
})
}

View File

@ -0,0 +1,338 @@
//go:build e2e
package cluster_test
import (
"encoding/json"
"fmt"
"net"
"net/http"
"strings"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/require"
)
// =============================================================================
// STRICT OLRIC CACHE DISTRIBUTION TESTS
// These tests verify that Olric cache data is properly distributed across nodes.
// Tests FAIL if distribution doesn't work - no skips, no warnings.
// =============================================================================
// getOlricNodeAddresses returns HTTP addresses of Olric nodes
// Note: Olric HTTP port is typically on port 3320 for the main cluster
func getOlricNodeAddresses() []string {
// In dev mode, we have a single Olric instance
// In production, each node runs its own Olric instance
return []string{
"http://localhost:3320",
}
}
// TestOlric_BasicDistribution verifies cache operations work across the cluster.
func TestOlric_BasicDistribution(t *testing.T) {
// Note: Not using SkipIfMissingGateway() since LoadTestEnv() creates its own API key
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "FAIL: Could not load test environment")
require.NotEmpty(t, env.APIKey, "FAIL: No API key available")
dmap := fmt.Sprintf("dist_test_%d", time.Now().UnixNano())
t.Run("Put_and_get_from_same_gateway", func(t *testing.T) {
key := fmt.Sprintf("key_%d", time.Now().UnixNano())
value := fmt.Sprintf("value_%d", time.Now().UnixNano())
// Put
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
require.NoError(t, err, "FAIL: Could not put value to cache")
// Get
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.NoError(t, err, "FAIL: Could not get value from cache")
require.Equal(t, value, retrieved, "FAIL: Retrieved value doesn't match")
t.Logf(" ✓ Put/Get works: %s = %s", key, value)
})
t.Run("Multiple_keys_distributed", func(t *testing.T) {
// Put multiple keys (should be distributed across partitions)
keys := make(map[string]string)
for i := 0; i < 20; i++ {
key := fmt.Sprintf("dist_key_%d_%d", i, time.Now().UnixNano())
value := fmt.Sprintf("dist_value_%d", i)
keys[key] = value
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
require.NoError(t, err, "FAIL: Could not put key %s", key)
}
t.Logf(" Put 20 keys to cache")
// Verify all keys are retrievable
for key, expectedValue := range keys {
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.NoError(t, err, "FAIL: Could not get key %s", key)
require.Equal(t, expectedValue, retrieved, "FAIL: Value mismatch for key %s", key)
}
t.Logf(" ✓ All 20 keys are retrievable")
})
}
// TestOlric_ConcurrentAccess verifies cache handles concurrent operations correctly.
func TestOlric_ConcurrentAccess(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "FAIL: Could not load test environment")
dmap := fmt.Sprintf("concurrent_test_%d", time.Now().UnixNano())
t.Run("Concurrent_writes_to_same_key", func(t *testing.T) {
key := fmt.Sprintf("concurrent_key_%d", time.Now().UnixNano())
// Launch multiple goroutines writing to the same key
done := make(chan error, 10)
for i := 0; i < 10; i++ {
go func(idx int) {
value := fmt.Sprintf("concurrent_value_%d", idx)
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
done <- err
}(i)
}
// Wait for all writes
var errors []error
for i := 0; i < 10; i++ {
if err := <-done; err != nil {
errors = append(errors, err)
}
}
require.Empty(t, errors, "FAIL: %d concurrent writes failed: %v", len(errors), errors)
// The key should have ONE of the values (last write wins)
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.NoError(t, err, "FAIL: Could not get key after concurrent writes")
require.Contains(t, retrieved, "concurrent_value_", "FAIL: Value doesn't match expected pattern")
t.Logf(" ✓ Concurrent writes succeeded, final value: %s", retrieved)
})
t.Run("Concurrent_reads_and_writes", func(t *testing.T) {
key := fmt.Sprintf("rw_key_%d", time.Now().UnixNano())
initialValue := "initial_value"
// Set initial value
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, initialValue)
require.NoError(t, err, "FAIL: Could not set initial value")
// Launch concurrent readers and writers
done := make(chan error, 20)
// 10 readers
for i := 0; i < 10; i++ {
go func() {
_, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
done <- err
}()
}
// 10 writers
for i := 0; i < 10; i++ {
go func(idx int) {
value := fmt.Sprintf("updated_value_%d", idx)
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
done <- err
}(i)
}
// Wait for all operations
var readErrors, writeErrors []error
for i := 0; i < 20; i++ {
if err := <-done; err != nil {
if i < 10 {
readErrors = append(readErrors, err)
} else {
writeErrors = append(writeErrors, err)
}
}
}
require.Empty(t, readErrors, "FAIL: %d reads failed", len(readErrors))
require.Empty(t, writeErrors, "FAIL: %d writes failed", len(writeErrors))
t.Logf(" ✓ Concurrent read/write operations succeeded")
})
}
// TestOlric_NamespaceClusterCache verifies cache works in namespace-specific clusters.
func TestOlric_NamespaceClusterCache(t *testing.T) {
// Create a new namespace
namespace := fmt.Sprintf("cache-test-%d", time.Now().UnixNano())
env, err := e2e.LoadTestEnvWithNamespace(namespace)
require.NoError(t, err, "FAIL: Could not create namespace for cache test")
require.NotEmpty(t, env.APIKey, "FAIL: No API key")
t.Logf("Created namespace %s", namespace)
dmap := fmt.Sprintf("ns_cache_%d", time.Now().UnixNano())
t.Run("Cache_operations_work_in_namespace", func(t *testing.T) {
key := fmt.Sprintf("ns_key_%d", time.Now().UnixNano())
value := fmt.Sprintf("ns_value_%d", time.Now().UnixNano())
// Put using namespace API key
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
require.NoError(t, err, "FAIL: Could not put value in namespace cache")
// Get
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.NoError(t, err, "FAIL: Could not get value from namespace cache")
require.Equal(t, value, retrieved, "FAIL: Value mismatch in namespace cache")
t.Logf(" ✓ Namespace cache operations work: %s = %s", key, value)
})
// Check if namespace Olric instances are running (port 10003 offset in port blocks)
var nsOlricPorts []int
for port := 10003; port <= 10098; port += 5 {
conn, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", port), 1*time.Second)
if err == nil {
conn.Close()
nsOlricPorts = append(nsOlricPorts, port)
}
}
if len(nsOlricPorts) > 0 {
t.Logf("Found %d namespace Olric memberlist ports: %v", len(nsOlricPorts), nsOlricPorts)
t.Run("Namespace_Olric_nodes_connected", func(t *testing.T) {
// Verify all namespace Olric nodes can be reached
for _, port := range nsOlricPorts {
conn, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", port), 2*time.Second)
require.NoError(t, err, "FAIL: Cannot connect to namespace Olric on port %d", port)
conn.Close()
t.Logf(" ✓ Namespace Olric memberlist on port %d is reachable", port)
}
})
}
}
// TestOlric_DataConsistency verifies data remains consistent across operations.
func TestOlric_DataConsistency(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "FAIL: Could not load test environment")
dmap := fmt.Sprintf("consistency_test_%d", time.Now().UnixNano())
t.Run("Update_preserves_latest_value", func(t *testing.T) {
key := fmt.Sprintf("update_key_%d", time.Now().UnixNano())
// Write multiple times
for i := 1; i <= 5; i++ {
value := fmt.Sprintf("version_%d", i)
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
require.NoError(t, err, "FAIL: Could not update key to version %d", i)
}
// Final read should return latest version
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.NoError(t, err, "FAIL: Could not read final value")
require.Equal(t, "version_5", retrieved, "FAIL: Latest version not preserved")
t.Logf(" ✓ Latest value preserved after 5 updates")
})
t.Run("Delete_removes_key", func(t *testing.T) {
key := fmt.Sprintf("delete_key_%d", time.Now().UnixNano())
value := "to_be_deleted"
// Put
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
require.NoError(t, err, "FAIL: Could not put value")
// Verify it exists
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.NoError(t, err, "FAIL: Could not get value before delete")
require.Equal(t, value, retrieved)
// Delete (POST with JSON body)
deleteBody := map[string]interface{}{
"dmap": dmap,
"key": key,
}
deleteBytes, _ := json.Marshal(deleteBody)
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/cache/delete", strings.NewReader(string(deleteBytes)))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+env.APIKey)
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Delete request failed")
resp.Body.Close()
require.True(t, resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNoContent,
"FAIL: Delete returned unexpected status %d", resp.StatusCode)
// Verify key is gone
_, err = e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.Error(t, err, "FAIL: Key should not exist after delete")
require.Contains(t, err.Error(), "not found", "FAIL: Expected 'not found' error")
t.Logf(" ✓ Delete properly removes key")
})
}
// TestOlric_TTLExpiration verifies TTL expiration works.
// NOTE: TTL is currently parsed but not applied by the cache handler (TODO in set_handler.go).
// This test is skipped until TTL support is fully implemented.
func TestOlric_TTLExpiration(t *testing.T) {
t.Skip("TTL support not yet implemented in cache handler - see set_handler.go lines 88-98")
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "FAIL: Could not load test environment")
dmap := fmt.Sprintf("ttl_test_%d", time.Now().UnixNano())
t.Run("Key_expires_after_TTL", func(t *testing.T) {
key := fmt.Sprintf("ttl_key_%d", time.Now().UnixNano())
value := "expires_soon"
ttlSeconds := 3
// Put with TTL (TTL is a duration string like "3s", "1m", etc.)
reqBody := map[string]interface{}{
"dmap": dmap,
"key": key,
"value": value,
"ttl": fmt.Sprintf("%ds", ttlSeconds),
}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/cache/put", strings.NewReader(string(bodyBytes)))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+env.APIKey)
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Put with TTL failed")
resp.Body.Close()
require.True(t, resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusCreated,
"FAIL: Put returned status %d", resp.StatusCode)
// Verify key exists immediately
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.NoError(t, err, "FAIL: Could not get key immediately after put")
require.Equal(t, value, retrieved)
t.Logf(" Key exists immediately after put")
// Wait for TTL to expire (plus buffer)
time.Sleep(time.Duration(ttlSeconds+2) * time.Second)
// Key should be gone
_, err = e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.Error(t, err, "FAIL: Key should have expired after %d seconds", ttlSeconds)
require.Contains(t, err.Error(), "not found", "FAIL: Expected 'not found' error after TTL")
t.Logf(" ✓ Key expired after %d seconds as expected", ttlSeconds)
})
}

View File

@ -0,0 +1,479 @@
//go:build e2e
package cluster_test
import (
"context"
"fmt"
"net/http"
"sync"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/require"
)
// =============================================================================
// STRICT RQLITE CLUSTER TESTS
// These tests verify that RQLite cluster operations work correctly.
// Tests FAIL if operations don't work - no skips, no warnings.
// =============================================================================
// TestRQLite_ClusterHealth verifies the RQLite cluster is healthy and operational.
func TestRQLite_ClusterHealth(t *testing.T) {
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Check RQLite schema endpoint (proves cluster is reachable)
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: e2e.GetGatewayURL() + "/v1/rqlite/schema",
}
body, status, err := req.Do(ctx)
require.NoError(t, err, "FAIL: Could not reach RQLite cluster")
require.Equal(t, http.StatusOK, status, "FAIL: RQLite schema endpoint returned %d: %s", status, string(body))
var schemaResp map[string]interface{}
err = e2e.DecodeJSON(body, &schemaResp)
require.NoError(t, err, "FAIL: Could not decode RQLite schema response")
// Schema endpoint should return tables array
_, hasTables := schemaResp["tables"]
require.True(t, hasTables, "FAIL: RQLite schema response missing 'tables' field")
t.Logf(" ✓ RQLite cluster is healthy and responding")
}
// TestRQLite_WriteReadConsistency verifies data written can be read back consistently.
func TestRQLite_WriteReadConsistency(t *testing.T) {
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
table := e2e.GenerateTableName()
// Cleanup
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
// Create table
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, value TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)",
table,
),
},
}
_, status, err := createReq.Do(ctx)
require.NoError(t, err, "FAIL: Create table request failed")
require.True(t, status == http.StatusCreated || status == http.StatusOK,
"FAIL: Create table returned status %d", status)
t.Logf("Created table %s", table)
t.Run("Write_then_read_returns_same_data", func(t *testing.T) {
uniqueValue := fmt.Sprintf("test_value_%d", time.Now().UnixNano())
// Insert
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s (value) VALUES ('%s')", table, uniqueValue),
},
},
}
_, status, err := insertReq.Do(ctx)
require.NoError(t, err, "FAIL: Insert request failed")
require.Equal(t, http.StatusOK, status, "FAIL: Insert returned status %d", status)
// Read back
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT value FROM %s WHERE value = '%s'", table, uniqueValue),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err, "FAIL: Query request failed")
require.Equal(t, http.StatusOK, status, "FAIL: Query returned status %d", status)
var queryResp map[string]interface{}
err = e2e.DecodeJSON(body, &queryResp)
require.NoError(t, err, "FAIL: Could not decode query response")
// Verify we got our value back
count, ok := queryResp["count"].(float64)
require.True(t, ok, "FAIL: Response missing 'count' field")
require.Equal(t, float64(1), count, "FAIL: Expected 1 row, got %v", count)
t.Logf(" ✓ Written value '%s' was read back correctly", uniqueValue)
})
t.Run("Multiple_writes_all_readable", func(t *testing.T) {
// Insert multiple values
var statements []string
for i := 0; i < 10; i++ {
statements = append(statements,
fmt.Sprintf("INSERT INTO %s (value) VALUES ('batch_%d')", table, i))
}
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": statements,
},
}
_, status, err := insertReq.Do(ctx)
require.NoError(t, err, "FAIL: Batch insert failed")
require.Equal(t, http.StatusOK, status, "FAIL: Batch insert returned status %d", status)
// Count all batch rows
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) as cnt FROM %s WHERE value LIKE 'batch_%%'", table),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err, "FAIL: Count query failed")
require.Equal(t, http.StatusOK, status, "FAIL: Count query returned status %d", status)
var queryResp map[string]interface{}
e2e.DecodeJSON(body, &queryResp)
if rows, ok := queryResp["rows"].([]interface{}); ok && len(rows) > 0 {
row := rows[0].([]interface{})
count := int(row[0].(float64))
require.Equal(t, 10, count, "FAIL: Expected 10 batch rows, got %d", count)
}
t.Logf(" ✓ All 10 batch writes are readable")
})
}
// TestRQLite_TransactionAtomicity verifies transactions are atomic.
func TestRQLite_TransactionAtomicity(t *testing.T) {
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
table := e2e.GenerateTableName()
// Cleanup
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
// Create table
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, value TEXT UNIQUE)",
table,
),
},
}
_, status, err := createReq.Do(ctx)
require.NoError(t, err, "FAIL: Create table failed")
require.True(t, status == http.StatusCreated || status == http.StatusOK,
"FAIL: Create table returned status %d", status)
t.Run("Successful_transaction_commits_all", func(t *testing.T) {
txReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s (value) VALUES ('tx_val_1')", table),
fmt.Sprintf("INSERT INTO %s (value) VALUES ('tx_val_2')", table),
fmt.Sprintf("INSERT INTO %s (value) VALUES ('tx_val_3')", table),
},
},
}
_, status, err := txReq.Do(ctx)
require.NoError(t, err, "FAIL: Transaction request failed")
require.Equal(t, http.StatusOK, status, "FAIL: Transaction returned status %d", status)
// Verify all 3 rows exist
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) FROM %s WHERE value LIKE 'tx_val_%%'", table),
},
}
body, _, _ := queryReq.Do(ctx)
var queryResp map[string]interface{}
e2e.DecodeJSON(body, &queryResp)
if rows, ok := queryResp["rows"].([]interface{}); ok && len(rows) > 0 {
row := rows[0].([]interface{})
count := int(row[0].(float64))
require.Equal(t, 3, count, "FAIL: Transaction didn't commit all 3 rows - got %d", count)
}
t.Logf(" ✓ Transaction committed all 3 rows atomically")
})
t.Run("Updates_preserve_consistency", func(t *testing.T) {
// Update a value
updateReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("UPDATE %s SET value = 'tx_val_1_updated' WHERE value = 'tx_val_1'", table),
},
},
}
_, status, err := updateReq.Do(ctx)
require.NoError(t, err, "FAIL: Update request failed")
require.Equal(t, http.StatusOK, status, "FAIL: Update returned status %d", status)
// Verify update took effect
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT value FROM %s WHERE value = 'tx_val_1_updated'", table),
},
}
body, _, _ := queryReq.Do(ctx)
var queryResp map[string]interface{}
e2e.DecodeJSON(body, &queryResp)
count, _ := queryResp["count"].(float64)
require.Equal(t, float64(1), count, "FAIL: Update didn't take effect")
t.Logf(" ✓ Update preserved consistency")
})
}
// TestRQLite_ConcurrentWrites verifies the cluster handles concurrent writes correctly.
func TestRQLite_ConcurrentWrites(t *testing.T) {
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
table := e2e.GenerateTableName()
// Cleanup
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
// Create table
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, worker INTEGER, seq INTEGER)",
table,
),
},
}
_, status, err := createReq.Do(ctx)
require.NoError(t, err, "FAIL: Create table failed")
require.True(t, status == http.StatusCreated || status == http.StatusOK,
"FAIL: Create table returned status %d", status)
t.Run("Concurrent_inserts_all_succeed", func(t *testing.T) {
numWorkers := 5
insertsPerWorker := 10
expectedTotal := numWorkers * insertsPerWorker
var wg sync.WaitGroup
errChan := make(chan error, numWorkers*insertsPerWorker)
for w := 0; w < numWorkers; w++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
for i := 0; i < insertsPerWorker; i++ {
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s (worker, seq) VALUES (%d, %d)", table, workerID, i),
},
},
}
_, status, err := insertReq.Do(ctx)
if err != nil {
errChan <- fmt.Errorf("worker %d insert %d failed: %w", workerID, i, err)
return
}
if status != http.StatusOK {
errChan <- fmt.Errorf("worker %d insert %d got status %d", workerID, i, status)
return
}
}
}(w)
}
wg.Wait()
close(errChan)
// Collect errors
var errors []error
for err := range errChan {
errors = append(errors, err)
}
require.Empty(t, errors, "FAIL: %d concurrent inserts failed: %v", len(errors), errors)
// Verify total count
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) FROM %s", table),
},
}
body, _, _ := queryReq.Do(ctx)
var queryResp map[string]interface{}
e2e.DecodeJSON(body, &queryResp)
if rows, ok := queryResp["rows"].([]interface{}); ok && len(rows) > 0 {
row := rows[0].([]interface{})
count := int(row[0].(float64))
require.Equal(t, expectedTotal, count,
"FAIL: Expected %d total rows from concurrent inserts, got %d", expectedTotal, count)
}
t.Logf(" ✓ All %d concurrent inserts succeeded", expectedTotal)
})
}
// TestRQLite_NamespaceClusterOperations verifies RQLite works in namespace clusters.
func TestRQLite_NamespaceClusterOperations(t *testing.T) {
// Create a new namespace
namespace := fmt.Sprintf("rqlite-test-%d", time.Now().UnixNano())
env, err := e2e.LoadTestEnvWithNamespace(namespace)
require.NoError(t, err, "FAIL: Could not create namespace for RQLite test")
require.NotEmpty(t, env.APIKey, "FAIL: No API key - namespace provisioning failed")
t.Logf("Created namespace %s", namespace)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
table := e2e.GenerateTableName()
// Cleanup
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
Headers: map[string]string{"Authorization": "Bearer " + env.APIKey},
}
dropReq.Do(context.Background())
}()
t.Run("Namespace_RQLite_create_insert_query", func(t *testing.T) {
// Create table in namespace cluster
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/rqlite/create-table",
Headers: map[string]string{"Authorization": "Bearer " + env.APIKey},
Body: map[string]interface{}{
"schema": fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, value TEXT)",
table,
),
},
}
_, status, err := createReq.Do(ctx)
require.NoError(t, err, "FAIL: Create table in namespace failed")
require.True(t, status == http.StatusCreated || status == http.StatusOK,
"FAIL: Create table returned status %d", status)
// Insert data
uniqueValue := fmt.Sprintf("ns_value_%d", time.Now().UnixNano())
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/rqlite/transaction",
Headers: map[string]string{"Authorization": "Bearer " + env.APIKey},
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s (value) VALUES ('%s')", table, uniqueValue),
},
},
}
_, status, err = insertReq.Do(ctx)
require.NoError(t, err, "FAIL: Insert in namespace failed")
require.Equal(t, http.StatusOK, status, "FAIL: Insert returned status %d", status)
// Query data
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/rqlite/query",
Headers: map[string]string{"Authorization": "Bearer " + env.APIKey},
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT value FROM %s WHERE value = '%s'", table, uniqueValue),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err, "FAIL: Query in namespace failed")
require.Equal(t, http.StatusOK, status, "FAIL: Query returned status %d", status)
var queryResp map[string]interface{}
e2e.DecodeJSON(body, &queryResp)
count, _ := queryResp["count"].(float64)
require.Equal(t, float64(1), count, "FAIL: Data not found in namespace cluster")
t.Logf(" ✓ Namespace RQLite operations work correctly")
})
}

View File

@ -0,0 +1,177 @@
//go:build e2e
package cluster
import (
"context"
"encoding/json"
"fmt"
"net/http"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestRQLite_ReadConsistencyLevels tests that different consistency levels work.
func TestRQLite_ReadConsistencyLevels(t *testing.T) {
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
gatewayURL := e2e.GetGatewayURL()
table := e2e.GenerateTableName()
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
// Create table
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, val TEXT)", table),
},
}
_, status, err := createReq.Do(ctx)
require.NoError(t, err)
require.True(t, status == http.StatusOK || status == http.StatusCreated, "create table got %d", status)
// Insert data
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s(val) VALUES ('consistency-test')", table),
},
},
}
_, status, err = insertReq.Do(ctx)
require.NoError(t, err)
require.Equal(t, http.StatusOK, status)
t.Run("Default consistency read", func(t *testing.T) {
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT * FROM %s", table),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, status)
t.Logf("Default read: %s", string(body))
})
t.Run("Strong consistency read", func(t *testing.T) {
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/query?level=strong",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT * FROM %s", table),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, status)
t.Logf("Strong read: %s", string(body))
})
t.Run("Weak consistency read", func(t *testing.T) {
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/query?level=weak",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT * FROM %s", table),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, status)
t.Logf("Weak read: %s", string(body))
})
}
// TestRQLite_WriteAfterMultipleReads verifies write-read cycles stay consistent.
func TestRQLite_WriteAfterMultipleReads(t *testing.T) {
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
gatewayURL := e2e.GetGatewayURL()
table := e2e.GenerateTableName()
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, counter INTEGER DEFAULT 0)", table),
},
}
_, status, err := createReq.Do(ctx)
require.NoError(t, err)
require.True(t, status == http.StatusOK || status == http.StatusCreated)
// Write-read cycle 10 times
for i := 1; i <= 10; i++ {
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s(counter) VALUES (%d)", table, i),
},
},
}
_, status, err := insertReq.Do(ctx)
require.NoError(t, err, "insert %d failed", i)
require.Equal(t, http.StatusOK, status, "insert %d got status %d", i, status)
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) as cnt FROM %s", table),
},
}
body, _, _ := queryReq.Do(ctx)
t.Logf("Iteration %d: %s", i, string(body))
}
// Final verification
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: gatewayURL + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) as cnt FROM %s", table),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err)
require.Equal(t, http.StatusOK, status)
var result map[string]interface{}
json.Unmarshal(body, &result)
t.Logf("Final count result: %s", string(body))
}

171
e2e/config.go Normal file
View File

@ -0,0 +1,171 @@
//go:build e2e
package e2e
import (
"os"
"path/filepath"
"testing"
"gopkg.in/yaml.v2"
)
// E2EConfig holds the configuration for E2E tests
type E2EConfig struct {
// Mode can be "local" or "production"
Mode string `yaml:"mode"`
// BaseDomain is the domain used for deployment routing (e.g., "dbrs.space" or "orama.network")
BaseDomain string `yaml:"base_domain"`
// Servers is a list of production servers (only used when mode=production)
Servers []ServerConfig `yaml:"servers"`
// Nameservers is a list of nameserver hostnames (e.g., ["ns1.dbrs.space", "ns2.dbrs.space"])
Nameservers []string `yaml:"nameservers"`
// APIKey is the API key for production testing (auto-discovered if empty)
APIKey string `yaml:"api_key"`
}
// ServerConfig holds configuration for a single production server
type ServerConfig struct {
Name string `yaml:"name"`
IP string `yaml:"ip"`
User string `yaml:"user"`
Password string `yaml:"password"`
IsNameserver bool `yaml:"is_nameserver"`
}
// DefaultConfig returns the default configuration for local development
func DefaultConfig() *E2EConfig {
return &E2EConfig{
Mode: "local",
BaseDomain: "orama.network",
Servers: []ServerConfig{},
Nameservers: []string{},
APIKey: "",
}
}
// LoadE2EConfig loads the E2E test configuration from e2e/config.yaml
// Falls back to defaults if the file doesn't exist
func LoadE2EConfig() (*E2EConfig, error) {
// Try multiple locations for the config file
configPaths := []string{
"config.yaml", // Relative to e2e directory (when running from e2e/)
"e2e/config.yaml", // Relative to project root
"../e2e/config.yaml", // From subdirectory within e2e/
}
// Also try absolute path based on working directory
if cwd, err := os.Getwd(); err == nil {
configPaths = append(configPaths, filepath.Join(cwd, "config.yaml"))
configPaths = append(configPaths, filepath.Join(cwd, "e2e", "config.yaml"))
// Go up one level if we're in a subdirectory
configPaths = append(configPaths, filepath.Join(cwd, "..", "config.yaml"))
}
var configData []byte
var readErr error
for _, path := range configPaths {
data, err := os.ReadFile(path)
if err == nil {
configData = data
break
}
readErr = err
}
// If no config file found, return defaults
if configData == nil {
// Check if running in production mode via environment variable
if os.Getenv("E2E_MODE") == "production" {
return nil, readErr // Config file required for production mode
}
return DefaultConfig(), nil
}
var cfg E2EConfig
if err := yaml.Unmarshal(configData, &cfg); err != nil {
return nil, err
}
// Apply defaults for empty values
if cfg.Mode == "" {
cfg.Mode = "local"
}
if cfg.BaseDomain == "" {
cfg.BaseDomain = "orama.network"
}
return &cfg, nil
}
// IsProductionMode returns true if running in production mode
func IsProductionMode() bool {
// Check environment variable first
if os.Getenv("E2E_MODE") == "production" {
return true
}
cfg, err := LoadE2EConfig()
if err != nil {
return false
}
return cfg.Mode == "production"
}
// IsLocalMode returns true if running in local mode
func IsLocalMode() bool {
return !IsProductionMode()
}
// SkipIfLocal skips the test if running in local mode
// Use this for tests that require real production infrastructure
func SkipIfLocal(t *testing.T) {
t.Helper()
if IsLocalMode() {
t.Skip("Skipping: requires production environment (set mode: production in e2e/config.yaml)")
}
}
// SkipIfProduction skips the test if running in production mode
// Use this for tests that should only run locally
func SkipIfProduction(t *testing.T) {
t.Helper()
if IsProductionMode() {
t.Skip("Skipping: local-only test")
}
}
// GetServerIPs returns a list of all server IP addresses from config
func GetServerIPs(cfg *E2EConfig) []string {
if cfg == nil {
return nil
}
ips := make([]string, 0, len(cfg.Servers))
for _, server := range cfg.Servers {
if server.IP != "" {
ips = append(ips, server.IP)
}
}
return ips
}
// GetNameserverServers returns servers configured as nameservers
func GetNameserverServers(cfg *E2EConfig) []ServerConfig {
if cfg == nil {
return nil
}
var nameservers []ServerConfig
for _, server := range cfg.Servers {
if server.IsNameserver {
nameservers = append(nameservers, server)
}
}
return nameservers
}

45
e2e/config.yaml.example Normal file
View File

@ -0,0 +1,45 @@
# E2E Test Configuration
#
# Copy this file to config.yaml and fill in your values.
# config.yaml is git-ignored and should contain your actual credentials.
#
# Usage:
# cp config.yaml.example config.yaml
# # Edit config.yaml with your server credentials
# go test -v -tags e2e ./e2e/...
# Test mode: "local" or "production"
# - local: Tests run against `make dev` cluster on localhost
# - production: Tests run against real VPS servers
mode: local
# Base domain for deployment routing
# - Local: orama.network (default)
# - Production: dbrs.space (or your custom domain)
base_domain: orama.network
# Production servers (only used when mode=production)
# Add your VPS servers here with their credentials
servers:
# Example:
# - name: vps-1
# ip: 1.2.3.4
# user: ubuntu
# password: "your-password-here"
# is_nameserver: true
# - name: vps-2
# ip: 5.6.7.8
# user: ubuntu
# password: "another-password"
# is_nameserver: false
# Nameserver hostnames (for DNS tests in production)
# These should match your NS records
nameservers:
# Example:
# - ns1.yourdomain.com
# - ns2.yourdomain.com
# API key for production testing
# Leave empty to auto-discover from RQLite or create fresh key
api_key: ""

View File

@ -0,0 +1,223 @@
//go:build e2e
package deployments_test
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"sync"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDeploy_InvalidTarball verifies that uploading an invalid/corrupt tarball
// returns a clean error (not a 500 or panic).
func TestDeploy_InvalidTarball(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
deploymentName := fmt.Sprintf("invalid-tar-%d", time.Now().Unix())
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(deploymentName + "\r\n")
// Write invalid tarball data (random bytes, not a real gzip)
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.WriteString("this is not a valid tarball content at all!!!")
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
require.NoError(t, err)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
respBody, _ := io.ReadAll(resp.Body)
t.Logf("Status: %d, Body: %s", resp.StatusCode, string(respBody))
// Should return an error, not 2xx (ideally 400, but server currently returns 500)
assert.True(t, resp.StatusCode >= 400,
"Invalid tarball should return error (got %d)", resp.StatusCode)
}
// TestDeploy_EmptyTarball verifies that uploading an empty file returns an error.
func TestDeploy_EmptyTarball(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
deploymentName := fmt.Sprintf("empty-tar-%d", time.Now().Unix())
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(deploymentName + "\r\n")
// Empty tarball
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
require.NoError(t, err)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
respBody, _ := io.ReadAll(resp.Body)
t.Logf("Status: %d, Body: %s", resp.StatusCode, string(respBody))
assert.True(t, resp.StatusCode >= 400,
"Empty tarball should return error (got %d)", resp.StatusCode)
}
// TestDeploy_MissingName verifies that deploying without a name returns an error.
func TestDeploy_MissingName(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
tarballPath := filepath.Join("../../testdata/apps/react-app")
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
// No name field
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
// Create tarball from directory for the "no name" test
tarData, err := exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
if err != nil {
t.Skip("Failed to create tarball from test app")
}
body.Write(tarData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
require.NoError(t, err)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.True(t, resp.StatusCode >= 400,
"Missing name should return error (got %d)", resp.StatusCode)
}
// TestDeploy_ConcurrentSameName verifies that deploying two apps with the same
// name concurrently doesn't cause data corruption.
func TestDeploy_ConcurrentSameName(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
deploymentName := fmt.Sprintf("concurrent-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
var wg sync.WaitGroup
results := make([]int, 2)
ids := make([]string, 2)
// Pre-create tarball once for both goroutines
tarData, err := exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
if err != nil {
t.Skip("Failed to create tarball from test app")
}
for i := 0; i < 2; i++ {
wg.Add(1)
go func(idx int) {
defer wg.Done()
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(deploymentName + "\r\n")
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.Write(tarData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
results[idx] = resp.StatusCode
var result map[string]interface{}
json.NewDecoder(resp.Body).Decode(&result)
if id, ok := result["deployment_id"].(string); ok {
ids[idx] = id
} else if id, ok := result["id"].(string); ok {
ids[idx] = id
}
}(i)
}
wg.Wait()
t.Logf("Concurrent deploy results: status1=%d status2=%d id1=%s id2=%s",
results[0], results[1], ids[0], ids[1])
// At least one should succeed
successCount := 0
for _, status := range results {
if status == http.StatusCreated {
successCount++
}
}
assert.GreaterOrEqual(t, successCount, 1,
"At least one concurrent deploy should succeed")
// Cleanup
for _, id := range ids {
if id != "" {
e2e.DeleteDeployment(t, env, id)
}
}
}
func readFileBytes(path string) ([]byte, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return io.ReadAll(f)
}

View File

@ -0,0 +1,308 @@
//go:build e2e
package deployments_test
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestGoBackendWithSQLite tests Go backend deployment with hosted SQLite connectivity
// 1. Create hosted SQLite database
// 2. Deploy Go backend with DATABASE_NAME env var
// 3. POST /api/users → verify insert
// 4. GET /api/users → verify read
// 5. Cleanup
func TestGoBackendWithSQLite(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("go-sqlite-test-%d", time.Now().Unix())
dbName := fmt.Sprintf("test-db-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/go-api")
var deploymentID string
// Cleanup after test
defer func() {
if !env.SkipCleanup {
if deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
// Delete the test database
deleteSQLiteDB(t, env, dbName)
}
}()
t.Run("Create SQLite database", func(t *testing.T) {
e2e.CreateSQLiteDB(t, env, dbName)
t.Logf("Created database: %s", dbName)
})
t.Run("Deploy Go backend with DATABASE_NAME", func(t *testing.T) {
deploymentID = createGoDeployment(t, env, deploymentName, tarballPath, map[string]string{
"DATABASE_NAME": dbName,
"GATEWAY_URL": env.GatewayURL,
"API_KEY": env.APIKey,
})
require.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
t.Logf("Created Go deployment: %s (ID: %s)", deploymentName, deploymentID)
})
t.Run("Wait for deployment to become healthy", func(t *testing.T) {
healthy := e2e.WaitForHealthy(t, env, deploymentID, 90*time.Second)
require.True(t, healthy, "Deployment should become healthy")
t.Logf("Deployment is healthy")
})
t.Run("Test health endpoint", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/health")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "Health check should return 200")
body, _ := io.ReadAll(resp.Body)
var health map[string]interface{}
require.NoError(t, json.Unmarshal(body, &health))
assert.Contains(t, []string{"healthy", "ok"}, health["status"])
t.Logf("Health response: %+v", health)
})
t.Run("POST /api/notes - create note", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
noteData := map[string]string{
"title": "Test Note",
"content": "This is a test note",
}
body, _ := json.Marshal(noteData)
req, err := http.NewRequest("POST", env.GatewayURL+"/api/notes", bytes.NewBuffer(body))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/json")
req.Host = domain
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusCreated, resp.StatusCode, "Should create note successfully")
var note map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&note))
assert.Equal(t, "Test Note", note["title"])
assert.Equal(t, "This is a test note", note["content"])
t.Logf("Created note: %+v", note)
})
t.Run("GET /api/notes - list notes", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/api/notes")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
var notes []map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&notes))
assert.GreaterOrEqual(t, len(notes), 1, "Should have at least one note")
found := false
for _, note := range notes {
if note["title"] == "Test Note" {
found = true
break
}
}
assert.True(t, found, "Test note should be in the list")
t.Logf("Notes count: %d", len(notes))
})
t.Run("DELETE /api/notes - delete note", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
// First get the note ID
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/api/notes")
defer resp.Body.Close()
var notes []map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&notes))
var noteID int
for _, note := range notes {
if note["title"] == "Test Note" {
noteID = int(note["id"].(float64))
break
}
}
require.NotZero(t, noteID, "Should find test note ID")
req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/api/notes/%d", env.GatewayURL, noteID), nil)
require.NoError(t, err)
req.Host = domain
deleteResp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer deleteResp.Body.Close()
assert.Equal(t, http.StatusOK, deleteResp.StatusCode, "Should delete note successfully")
t.Logf("Deleted note ID: %d", noteID)
})
}
// createGoDeployment creates a Go backend deployment with environment variables
func createGoDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string, envVars map[string]string) string {
t.Helper()
var fileData []byte
info, err := os.Stat(tarballPath)
if err != nil {
t.Fatalf("failed to stat tarball path: %v", err)
}
if info.IsDir() {
// Build Go binary for linux/amd64, then tar it
tmpDir, err := os.MkdirTemp("", "go-deploy-*")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
binaryPath := filepath.Join(tmpDir, "app")
buildCmd := exec.Command("go", "build", "-o", binaryPath, ".")
buildCmd.Dir = tarballPath
buildCmd.Env = append(os.Environ(), "GOOS=linux", "GOARCH=amd64", "CGO_ENABLED=0")
if out, err := buildCmd.CombinedOutput(); err != nil {
t.Fatalf("failed to build Go app: %v\n%s", err, string(out))
}
fileData, err = exec.Command("tar", "-czf", "-", "-C", tmpDir, ".").Output()
if err != nil {
t.Fatalf("failed to create tarball: %v", err)
}
} else {
file, err := os.Open(tarballPath)
if err != nil {
t.Fatalf("failed to open tarball: %v", err)
}
defer file.Close()
fileData, _ = io.ReadAll(file)
}
// Create multipart form
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
// Write name field
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(name + "\r\n")
// Write environment variables
for key, value := range envVars {
body.WriteString("--" + boundary + "\r\n")
body.WriteString(fmt.Sprintf("Content-Disposition: form-data; name=\"env_%s\"\r\n\r\n", key))
body.WriteString(value + "\r\n")
}
// Write tarball file
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.Write(fileData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/go/upload", body)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Fatalf("failed to execute request: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("Deployment upload failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var result map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if id, ok := result["deployment_id"].(string); ok {
return id
}
if id, ok := result["id"].(string); ok {
return id
}
t.Fatalf("Deployment response missing id field: %+v", result)
return ""
}
// deleteSQLiteDB deletes a SQLite database
func deleteSQLiteDB(t *testing.T, env *e2e.E2ETestEnv, dbName string) {
t.Helper()
req, err := http.NewRequest("DELETE", env.GatewayURL+"/v1/db/"+dbName, nil)
if err != nil {
t.Logf("warning: failed to create delete request: %v", err)
return
}
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Logf("warning: failed to delete database: %v", err)
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Logf("warning: delete database returned status %d", resp.StatusCode)
}
}

View File

@ -0,0 +1,264 @@
//go:build e2e
package deployments_test
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestNextJSDeployment_SSR tests Next.js deployment with SSR and API routes
// 1. Deploy Next.js app
// 2. Test SSR page (verify server-rendered HTML)
// 3. Test API routes (/api/hello, /api/data)
// 4. Test static assets
// 5. Cleanup
func TestNextJSDeployment_SSR(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("nextjs-ssr-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/nextjs-ssr.tar.gz")
var deploymentID string
// Check if tarball exists
if _, err := os.Stat(tarballPath); os.IsNotExist(err) {
t.Skip("Next.js SSR tarball not found at " + tarballPath)
}
// Cleanup after test
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Deploy Next.js SSR app", func(t *testing.T) {
deploymentID = createNextJSDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
t.Logf("Created Next.js deployment: %s (ID: %s)", deploymentName, deploymentID)
})
t.Run("Wait for deployment to become healthy", func(t *testing.T) {
healthy := e2e.WaitForHealthy(t, env, deploymentID, 120*time.Second)
require.True(t, healthy, "Deployment should become healthy")
t.Logf("Deployment is healthy")
})
t.Run("Verify deployment in database", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
assert.Equal(t, deploymentName, deployment["name"], "Deployment name should match")
deploymentType, ok := deployment["type"].(string)
require.True(t, ok, "Type should be a string")
assert.Contains(t, deploymentType, "nextjs", "Type should be nextjs")
t.Logf("Deployment type: %s", deploymentType)
})
t.Run("Test SSR page - verify server-rendered HTML", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "SSR page should return 200")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err, "Should read response body")
bodyStr := string(body)
// Verify HTML is server-rendered (contains actual content, not just loading state)
assert.Contains(t, bodyStr, "Orama Network Next.js Test", "Should contain app title")
assert.Contains(t, bodyStr, "Server-Side Rendering Test", "Should contain SSR test marker")
assert.Contains(t, resp.Header.Get("Content-Type"), "text/html", "Should be HTML content")
t.Logf("SSR page loaded successfully")
t.Logf("Content-Type: %s", resp.Header.Get("Content-Type"))
})
t.Run("Test API route - /api/hello", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/api/hello")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "API route should return 200")
var result map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result), "Should decode JSON response")
assert.Contains(t, result["message"], "Hello", "Should contain hello message")
assert.NotEmpty(t, result["timestamp"], "Should have timestamp")
t.Logf("API /hello response: %+v", result)
})
t.Run("Test API route - /api/data", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/api/data")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "API data route should return 200")
var result map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result), "Should decode JSON response")
// Just verify it returns valid JSON
t.Logf("API /data response: %+v", result)
})
t.Run("Test static asset - _next directory", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
// First, get the main page to find the actual static asset path
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
bodyStr := string(body)
// Look for _next/static references in the HTML
if strings.Contains(bodyStr, "_next/static") {
t.Logf("Found _next/static references in HTML")
// Try to fetch a common static chunk
// The exact path depends on Next.js build output
// We'll just verify the _next directory structure is accessible
chunkResp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/_next/static/chunks/main.js")
defer chunkResp.Body.Close()
// It's OK if specific files don't exist (they have hashed names)
// Just verify we don't get a 500 error
assert.NotEqual(t, http.StatusInternalServerError, chunkResp.StatusCode,
"Static asset request should not cause server error")
t.Logf("Static asset request status: %d", chunkResp.StatusCode)
} else {
t.Logf("No _next/static references found (may be using different bundling)")
}
})
t.Run("Test 404 handling", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/nonexistent-page-xyz")
defer resp.Body.Close()
// Next.js should handle 404 gracefully
// Could be 404 or 200 depending on catch-all routes
assert.Contains(t, []int{200, 404}, resp.StatusCode,
"Should return either 200 (catch-all) or 404")
t.Logf("404 handling: status=%d", resp.StatusCode)
})
}
// createNextJSDeployment creates a Next.js deployment
func createNextJSDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) string {
t.Helper()
file, err := os.Open(tarballPath)
if err != nil {
t.Fatalf("failed to open tarball: %v", err)
}
defer file.Close()
// Create multipart form
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
// Write name field
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(name + "\r\n")
// Write ssr field (enable SSR mode)
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"ssr\"\r\n\r\n")
body.WriteString("true\r\n")
// Write tarball file
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
fileData, _ := io.ReadAll(file)
body.Write(fileData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/nextjs/upload", body)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
// Use a longer timeout for large Next.js uploads (can be 50MB+)
uploadClient := e2e.NewHTTPClient(5 * time.Minute)
resp, err := uploadClient.Do(req)
if err != nil {
t.Fatalf("failed to execute request: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("Deployment upload failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var result map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
if id, ok := result["deployment_id"].(string); ok {
return id
}
if id, ok := result["id"].(string); ok {
return id
}
t.Fatalf("Deployment response missing id field: %+v", result)
return ""
}

View File

@ -0,0 +1,203 @@
//go:build e2e
package deployments_test
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNodeJSDeployment_FullFlow(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("test-nodejs-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/node-api")
var deploymentID string
// Cleanup after test
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Upload Node.js backend", func(t *testing.T) {
deploymentID = createNodeJSDeployment(t, env, deploymentName, tarballPath)
assert.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
t.Logf("Created deployment: %s (ID: %s)", deploymentName, deploymentID)
})
t.Run("Wait for deployment to become healthy", func(t *testing.T) {
healthy := e2e.WaitForHealthy(t, env, deploymentID, 90*time.Second)
assert.True(t, healthy, "Deployment should become healthy within timeout")
t.Logf("Deployment is healthy")
})
t.Run("Test health endpoint", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
// Get the deployment URLs (can be array of strings or map)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
// Test via Host header (localhost testing)
resp := e2e.TestDeploymentWithHostHeader(t, env, extractDomain(nodeURL), "/health")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "Health check should return 200")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var health map[string]interface{}
require.NoError(t, json.Unmarshal(body, &health))
assert.Contains(t, []string{"healthy", "ok"}, health["status"],
"Health status should be 'healthy' or 'ok'")
t.Logf("Health check passed: %v", health)
})
t.Run("Test API endpoint", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
// Test health endpoint (node-api app serves /health)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/health")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
var result map[string]interface{}
require.NoError(t, json.Unmarshal(body, &result))
assert.NotEmpty(t, result["service"])
t.Logf("API endpoint response: %v", result)
})
}
func createNodeJSDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) string {
t.Helper()
var fileData []byte
info, err := os.Stat(tarballPath)
if err != nil {
t.Fatalf("Failed to stat tarball path: %v", err)
}
if info.IsDir() {
// Create tarball from directory
tarData, err := exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
require.NoError(t, err, "Failed to create tarball from %s", tarballPath)
fileData = tarData
} else {
file, err := os.Open(tarballPath)
require.NoError(t, err, "Failed to open tarball: %s", tarballPath)
defer file.Close()
fileData, _ = io.ReadAll(file)
}
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(name + "\r\n")
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.Write(fileData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/nodejs/upload", body)
require.NoError(t, err)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("Deployment upload failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var result map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result))
if id, ok := result["deployment_id"].(string); ok {
return id
}
if id, ok := result["id"].(string); ok {
return id
}
t.Fatalf("Deployment response missing id field: %+v", result)
return ""
}
// extractNodeURL gets the node URL from deployment response
// Handles both array of strings and map formats
func extractNodeURL(t *testing.T, deployment map[string]interface{}) string {
t.Helper()
// Try as array of strings first (new format)
if urls, ok := deployment["urls"].([]interface{}); ok && len(urls) > 0 {
if url, ok := urls[0].(string); ok {
return url
}
}
// Try as map (legacy format)
if urls, ok := deployment["urls"].(map[string]interface{}); ok {
if url, ok := urls["node"].(string); ok {
return url
}
}
return ""
}
func extractDomain(url string) string {
// Extract domain from URL like "https://myapp.node-xyz.dbrs.space"
// Remove protocol
domain := url
if len(url) > 8 && url[:8] == "https://" {
domain = url[8:]
} else if len(url) > 7 && url[:7] == "http://" {
domain = url[7:]
}
// Remove trailing slash
if len(domain) > 0 && domain[len(domain)-1] == '/' {
domain = domain[:len(domain)-1]
}
return domain
}

View File

@ -0,0 +1,357 @@
//go:build e2e
package deployments_test
import (
"bytes"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestStaticReplica_CreatedOnDeploy verifies that deploying a static app
// creates replica records on a second node.
func TestStaticReplica_CreatedOnDeploy(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("replica-static-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
var deploymentID string
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Deploy static app", func(t *testing.T) {
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
t.Logf("Created deployment: %s (ID: %s)", deploymentName, deploymentID)
})
t.Run("Wait for replica setup", func(t *testing.T) {
// Static replicas should set up quickly (IPFS content)
time.Sleep(10 * time.Second)
})
t.Run("Deployment has replica records", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
// Check that replicas field exists and has entries
replicas, ok := deployment["replicas"].([]interface{})
if !ok {
// Replicas might be in a nested structure or separate endpoint
t.Logf("Deployment response: %+v", deployment)
// Try querying replicas via the deployment details
homeNodeID, _ := deployment["home_node_id"].(string)
require.NotEmpty(t, homeNodeID, "Deployment should have a home_node_id")
t.Logf("Home node: %s", homeNodeID)
// If replicas aren't in the response, that's still okay — we verify
// via DNS and cross-node serving below
t.Log("Replica records not in deployment response; will verify via DNS/serving")
return
}
assert.GreaterOrEqual(t, len(replicas), 1, "Should have at least 1 replica")
t.Logf("Found %d replica records", len(replicas))
for i, r := range replicas {
if replica, ok := r.(map[string]interface{}); ok {
t.Logf(" Replica %d: node=%s status=%s", i, replica["node_id"], replica["status"])
}
}
})
t.Run("Static content served via gateway", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode,
"Static content should be served (got %d: %s)", resp.StatusCode, string(body))
t.Logf("Served via gateway: status=%d", resp.StatusCode)
})
}
// TestDynamicReplica_CreatedOnDeploy verifies that deploying a dynamic (Node.js) app
// creates a replica process on a second node.
func TestDynamicReplica_CreatedOnDeploy(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("replica-nodejs-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/node-api")
var deploymentID string
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Deploy Node.js backend", func(t *testing.T) {
deploymentID = createNodeJSDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
t.Logf("Created deployment: %s (ID: %s)", deploymentName, deploymentID)
})
t.Run("Wait for deployment and replica", func(t *testing.T) {
healthy := e2e.WaitForHealthy(t, env, deploymentID, 90*time.Second)
assert.True(t, healthy, "Deployment should become healthy")
// Extra wait for async replica setup
time.Sleep(15 * time.Second)
})
t.Run("Dynamic app served from both nodes", func(t *testing.T) {
e2e.SkipIfLocal(t)
if len(env.Config.Servers) < 2 {
t.Skip("Requires at least 2 servers")
}
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
if nodeURL == "" {
t.Skip("No node URL in deployment")
}
domain := extractDomain(nodeURL)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/health")
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode,
"Dynamic app should be served via gateway (got %d: %s)", resp.StatusCode, string(body))
t.Logf("Served via gateway: status=%d body=%s", resp.StatusCode, string(body))
})
}
// TestReplica_UpdatePropagation verifies that updating a deployment propagates to replicas.
func TestReplica_UpdatePropagation(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
e2e.SkipIfLocal(t)
if len(env.Config.Servers) < 2 {
t.Skip("Requires at least 2 servers")
}
deploymentName := fmt.Sprintf("replica-update-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
var deploymentID string
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Deploy v1", func(t *testing.T) {
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
time.Sleep(10 * time.Second) // Wait for replica
})
var v1CID string
t.Run("Record v1 CID", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
v1CID, _ = deployment["content_cid"].(string)
require.NotEmpty(t, v1CID)
t.Logf("v1 CID: %s", v1CID)
})
t.Run("Update to v2", func(t *testing.T) {
updateStaticDeployment(t, env, deploymentName, tarballPath)
time.Sleep(10 * time.Second) // Wait for update + replica propagation
})
t.Run("All nodes serve updated version", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
v2CID, _ := deployment["content_cid"].(string)
// v2 CID might be same (same tarball) but version should increment
version, _ := deployment["version"].(float64)
assert.Equal(t, float64(2), version, "Should be version 2")
t.Logf("v2 CID: %s, version: %v", v2CID, version)
// Verify via gateway
dep := e2e.GetDeployment(t, env, deploymentID)
depCID, _ := dep["content_cid"].(string)
assert.Equal(t, v2CID, depCID, "CID should match after update")
})
}
// TestReplica_RollbackPropagation verifies rollback propagates to replica nodes.
func TestReplica_RollbackPropagation(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
e2e.SkipIfLocal(t)
if len(env.Config.Servers) < 2 {
t.Skip("Requires at least 2 servers")
}
deploymentName := fmt.Sprintf("replica-rollback-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
var deploymentID string
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Deploy v1 and update to v2", func(t *testing.T) {
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
time.Sleep(10 * time.Second)
updateStaticDeployment(t, env, deploymentName, tarballPath)
time.Sleep(10 * time.Second)
})
var v1CID string
t.Run("Get v1 CID from versions", func(t *testing.T) {
versions := listVersions(t, env, deploymentName)
if len(versions) > 0 {
v1CID, _ = versions[0]["content_cid"].(string)
}
if v1CID == "" {
// Fall back: v1 CID from current deployment
deployment := e2e.GetDeployment(t, env, deploymentID)
v1CID, _ = deployment["content_cid"].(string)
}
t.Logf("v1 CID for rollback comparison: %s", v1CID)
})
t.Run("Rollback to v1", func(t *testing.T) {
rollbackDeployment(t, env, deploymentName, 1)
time.Sleep(10 * time.Second) // Wait for rollback + replica propagation
})
t.Run("All nodes have rolled-back CID", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
currentCID, _ := deployment["content_cid"].(string)
t.Logf("Post-rollback CID: %s", currentCID)
assert.Equal(t, v1CID, currentCID, "CID should match v1 after rollback")
})
}
// TestReplica_TeardownOnDelete verifies that deleting a deployment removes replicas.
func TestReplica_TeardownOnDelete(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
e2e.SkipIfLocal(t)
if len(env.Config.Servers) < 2 {
t.Skip("Requires at least 2 servers")
}
deploymentName := fmt.Sprintf("replica-delete-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
time.Sleep(10 * time.Second) // Wait for replica
// Get the domain before deletion
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
domain := ""
if nodeURL != "" {
domain = extractDomain(nodeURL)
}
t.Run("Delete deployment", func(t *testing.T) {
e2e.DeleteDeployment(t, env, deploymentID)
time.Sleep(10 * time.Second) // Wait for teardown propagation
})
t.Run("Deployment no longer served on any node", func(t *testing.T) {
if domain == "" {
t.Skip("No domain to test")
}
req, err := http.NewRequest("GET", env.GatewayURL+"/", nil)
require.NoError(t, err)
req.Host = domain
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Logf("Connection failed (expected after deletion)")
return
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode == http.StatusOK {
assert.NotContains(t, string(body), "<div id=\"root\">",
"Deleted deployment should not be served")
}
t.Logf("status=%d (expected non-200)", resp.StatusCode)
})
}
// updateStaticDeployment updates an existing static deployment.
func updateStaticDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) {
t.Helper()
var fileData []byte
info, err := os.Stat(tarballPath)
require.NoError(t, err)
if info.IsDir() {
fileData, err = exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
require.NoError(t, err)
} else {
file, err := os.Open(tarballPath)
require.NoError(t, err)
defer file.Close()
fileData, _ = io.ReadAll(file)
}
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(name + "\r\n")
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.Write(fileData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/update", body)
require.NoError(t, err)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("Update failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
}

View File

@ -0,0 +1,232 @@
//go:build e2e
package deployments_test
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDeploymentRollback_FullFlow tests the complete rollback workflow:
// 1. Deploy v1
// 2. Update to v2
// 3. Verify v2 content
// 4. Rollback to v1
// 5. Verify v1 content is restored
func TestDeploymentRollback_FullFlow(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("rollback-test-%d", time.Now().Unix())
tarballPathV1 := filepath.Join("../../testdata/apps/react-app")
var deploymentID string
// Cleanup after test
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Deploy v1", func(t *testing.T) {
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPathV1)
require.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
t.Logf("Created deployment v1: %s (ID: %s)", deploymentName, deploymentID)
})
t.Run("Verify v1 deployment", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
version, ok := deployment["version"].(float64)
require.True(t, ok, "Version should be a number")
assert.Equal(t, float64(1), version, "Initial version should be 1")
contentCID, ok := deployment["content_cid"].(string)
require.True(t, ok, "Content CID should be a string")
assert.NotEmpty(t, contentCID, "Content CID should not be empty")
t.Logf("v1 version: %v, CID: %s", version, contentCID)
})
var v1CID string
t.Run("Save v1 CID", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
v1CID = deployment["content_cid"].(string)
t.Logf("Saved v1 CID: %s", v1CID)
})
t.Run("Update to v2", func(t *testing.T) {
// Update the deployment with the same tarball (simulates a new version)
updateDeployment(t, env, deploymentName, tarballPathV1)
// Wait for update to complete
time.Sleep(2 * time.Second)
})
t.Run("Verify v2 deployment", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
version, ok := deployment["version"].(float64)
require.True(t, ok, "Version should be a number")
assert.Equal(t, float64(2), version, "Version should be 2 after update")
t.Logf("v2 version: %v", version)
})
t.Run("List deployment versions", func(t *testing.T) {
versions := listVersions(t, env, deploymentName)
t.Logf("Available versions: %+v", versions)
// Should have at least 2 versions in history
assert.GreaterOrEqual(t, len(versions), 1, "Should have version history")
})
t.Run("Rollback to v1", func(t *testing.T) {
rollbackDeployment(t, env, deploymentName, 1)
// Wait for rollback to complete
time.Sleep(2 * time.Second)
})
t.Run("Verify rollback succeeded", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
version, ok := deployment["version"].(float64)
require.True(t, ok, "Version should be a number")
// Note: Version number increases even on rollback (it's a new deployment version)
// But the content_cid should be the same as v1
t.Logf("Post-rollback version: %v", version)
contentCID, ok := deployment["content_cid"].(string)
require.True(t, ok, "Content CID should be a string")
assert.Equal(t, v1CID, contentCID, "Content CID should match v1 after rollback")
t.Logf("Rollback verified - content CID matches v1: %s", contentCID)
})
}
// updateDeployment updates an existing static deployment
func updateDeployment(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) {
t.Helper()
var fileData []byte
info, err := os.Stat(tarballPath)
require.NoError(t, err)
if info.IsDir() {
fileData, err = exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
require.NoError(t, err)
} else {
file, err := os.Open(tarballPath)
require.NoError(t, err, "Failed to open tarball")
defer file.Close()
fileData, _ = io.ReadAll(file)
}
// Create multipart form
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
// Write name field
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(name + "\r\n")
// Write tarball file
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.Write(fileData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/update", body)
require.NoError(t, err, "Failed to create request")
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Failed to execute request")
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("Update failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var result map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result), "Failed to decode response")
t.Logf("Update response: %+v", result)
}
// listVersions lists available versions for a deployment
func listVersions(t *testing.T, env *e2e.E2ETestEnv, name string) []map[string]interface{} {
t.Helper()
req, err := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/versions?name="+name, nil)
require.NoError(t, err, "Failed to create request")
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Failed to execute request")
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Logf("List versions returned status %d: %s", resp.StatusCode, string(bodyBytes))
return nil
}
var result struct {
Versions []map[string]interface{} `json:"versions"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
t.Logf("Failed to decode versions: %v", err)
return nil
}
return result.Versions
}
// rollbackDeployment triggers a rollback to a specific version
func rollbackDeployment(t *testing.T, env *e2e.E2ETestEnv, name string, targetVersion int) {
t.Helper()
reqBody := map[string]interface{}{
"name": name,
"version": targetVersion,
}
bodyBytes, _ := json.Marshal(reqBody)
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/rollback", bytes.NewBuffer(bodyBytes))
require.NoError(t, err, "Failed to create request")
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Failed to execute request")
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("Rollback failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var result map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result), "Failed to decode response")
t.Logf("Rollback response: %+v", result)
}

View File

@ -0,0 +1,210 @@
//go:build e2e
package deployments_test
import (
"fmt"
"io"
"net/http"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestStaticDeployment_FullFlow(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("test-static-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
var deploymentID string
// Cleanup after test
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Upload static tarball", func(t *testing.T) {
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
assert.NotEmpty(t, deploymentID, "Deployment ID should not be empty")
t.Logf("✓ Created deployment: %s (ID: %s)", deploymentName, deploymentID)
})
t.Run("Verify deployment in database", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
assert.Equal(t, deploymentName, deployment["name"], "Deployment name should match")
assert.NotEmpty(t, deployment["content_cid"], "Content CID should not be empty")
// Status might be "deploying" or "active" depending on timing
status, ok := deployment["status"].(string)
require.True(t, ok, "Status should be a string")
assert.Contains(t, []string{"deploying", "active"}, status, "Status should be deploying or active")
t.Logf("✓ Deployment verified in database")
t.Logf(" - Name: %s", deployment["name"])
t.Logf(" - Status: %s", status)
t.Logf(" - CID: %s", deployment["content_cid"])
})
t.Run("Verify DNS record creation", func(t *testing.T) {
// Wait for deployment to become active
time.Sleep(2 * time.Second)
// Get the actual domain from deployment response
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
require.NotEmpty(t, nodeURL, "Deployment should have a URL")
expectedDomain := extractDomain(nodeURL)
// Make request with Host header (localhost testing)
resp := e2e.TestDeploymentWithHostHeader(t, env, expectedDomain, "/")
defer resp.Body.Close()
// Should return 200 with React app HTML
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should return 200 OK")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err, "Should read response body")
bodyStr := string(body)
// Verify React app content
assert.Contains(t, bodyStr, "<div id=\"root\">", "Should contain React root div")
assert.Contains(t, resp.Header.Get("Content-Type"), "text/html", "Content-Type should be text/html")
t.Logf("✓ Domain routing works")
t.Logf(" - Domain: %s", expectedDomain)
t.Logf(" - Status: %d", resp.StatusCode)
t.Logf(" - Content-Type: %s", resp.Header.Get("Content-Type"))
})
t.Run("Verify static assets serve correctly", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
require.NotEmpty(t, nodeURL, "Deployment should have a URL")
expectedDomain := extractDomain(nodeURL)
// Test CSS file (exact path depends on Vite build output)
// We'll just test a few common asset paths
assetPaths := []struct {
path string
contentType string
}{
{"/index.html", "text/html"},
// Note: Asset paths with hashes change on each build
// We'll test what we can
}
for _, asset := range assetPaths {
resp := e2e.TestDeploymentWithHostHeader(t, env, expectedDomain, asset.path)
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
assert.Contains(t, resp.Header.Get("Content-Type"), asset.contentType,
"Content-Type should be %s for %s", asset.contentType, asset.path)
t.Logf("✓ Asset served correctly: %s (%s)", asset.path, asset.contentType)
}
}
})
t.Run("Verify SPA fallback routing", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
require.NotEmpty(t, nodeURL, "Deployment should have a URL")
expectedDomain := extractDomain(nodeURL)
// Request unknown route (should return index.html for SPA)
resp := e2e.TestDeploymentWithHostHeader(t, env, expectedDomain, "/about/team")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "SPA fallback should return 200")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err, "Should read response body")
assert.Contains(t, string(body), "<div id=\"root\">", "Should return index.html for unknown paths")
t.Logf("✓ SPA fallback routing works")
})
t.Run("List deployments", func(t *testing.T) {
req, err := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/list", nil)
require.NoError(t, err, "Should create request")
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "List deployments should return 200")
var result map[string]interface{}
require.NoError(t, e2e.DecodeJSON(mustReadAll(t, resp.Body), &result), "Should decode JSON")
deployments, ok := result["deployments"].([]interface{})
require.True(t, ok, "Deployments should be an array")
assert.GreaterOrEqual(t, len(deployments), 1, "Should have at least one deployment")
// Find our deployment
found := false
for _, d := range deployments {
dep, ok := d.(map[string]interface{})
if !ok {
continue
}
if dep["name"] == deploymentName {
found = true
t.Logf("✓ Found deployment in list: %s", deploymentName)
break
}
}
assert.True(t, found, "Deployment should be in list")
})
t.Run("Delete deployment", func(t *testing.T) {
e2e.DeleteDeployment(t, env, deploymentID)
// Verify deletion - allow time for replication
time.Sleep(3 * time.Second)
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/get?id="+deploymentID, nil)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
t.Logf("Delete verification response: status=%d body=%s", resp.StatusCode, string(body))
// After deletion, either 404 (not found) or 200 with empty/error response is acceptable
if resp.StatusCode == http.StatusOK {
// If 200, check if the deployment is actually gone
t.Logf("Got 200 - this may indicate soft delete or eventual consistency")
}
t.Logf("✓ Deployment deleted successfully")
// Clear deploymentID so cleanup doesn't try to delete again
deploymentID = ""
})
}
func mustReadAll(t *testing.T, r io.Reader) []byte {
t.Helper()
data, err := io.ReadAll(r)
require.NoError(t, err, "Should read all data")
return data
}

View File

@ -15,6 +15,7 @@ import (
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
@ -40,6 +41,73 @@ var (
cacheMutex sync.RWMutex
)
// createAPIKeyWithProvisioning creates an API key for a namespace, handling async provisioning
// For non-default namespaces, this may trigger cluster provisioning and wait for it to complete.
func createAPIKeyWithProvisioning(gatewayURL, wallet, namespace string, timeout time.Duration) (string, error) {
httpClient := NewHTTPClient(10 * time.Second)
makeRequest := func() (*http.Response, []byte, error) {
reqBody := map[string]string{
"wallet": wallet,
"namespace": namespace,
}
bodyBytes, _ := json.Marshal(reqBody)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, "POST", gatewayURL+"/v1/auth/simple-key", bytes.NewReader(bodyBytes))
if err != nil {
return nil, nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := httpClient.Do(req)
if err != nil {
return nil, nil, fmt.Errorf("request failed: %w", err)
}
respBody, _ := io.ReadAll(resp.Body)
resp.Body.Close()
return resp, respBody, nil
}
startTime := time.Now()
for {
if time.Since(startTime) > timeout {
return "", fmt.Errorf("timeout waiting for namespace provisioning")
}
resp, respBody, err := makeRequest()
if err != nil {
return "", err
}
// If we got 200, extract the API key
if resp.StatusCode == http.StatusOK {
var apiKeyResp map[string]interface{}
if err := json.Unmarshal(respBody, &apiKeyResp); err != nil {
return "", fmt.Errorf("failed to decode API key response: %w", err)
}
apiKey, ok := apiKeyResp["api_key"].(string)
if !ok || apiKey == "" {
return "", fmt.Errorf("API key not found in response")
}
return apiKey, nil
}
// If we got 202 Accepted, provisioning is in progress
if resp.StatusCode == http.StatusAccepted {
// Wait and retry - the cluster is being provisioned
time.Sleep(5 * time.Second)
continue
}
// Any other status is an error
return "", fmt.Errorf("API key creation failed with status %d: %s", resp.StatusCode, string(respBody))
}
}
// loadGatewayConfig loads gateway configuration from ~/.orama/gateway.yaml
func loadGatewayConfig() (map[string]interface{}, error) {
configPath, err := config.DefaultPath("gateway.yaml")
@ -80,6 +148,90 @@ func loadNodeConfig(filename string) (map[string]interface{}, error) {
return cfg, nil
}
// loadActiveEnvironment reads ~/.orama/environments.json and returns the active environment's gateway URL.
func loadActiveEnvironment() (string, error) {
homeDir, err := os.UserHomeDir()
if err != nil {
return "", err
}
data, err := os.ReadFile(filepath.Join(homeDir, ".orama", "environments.json"))
if err != nil {
return "", err
}
var envConfig struct {
Environments []struct {
Name string `json:"name"`
GatewayURL string `json:"gateway_url"`
} `json:"environments"`
ActiveEnvironment string `json:"active_environment"`
}
if err := json.Unmarshal(data, &envConfig); err != nil {
return "", err
}
for _, env := range envConfig.Environments {
if env.Name == envConfig.ActiveEnvironment {
return env.GatewayURL, nil
}
}
return "", fmt.Errorf("active environment %q not found", envConfig.ActiveEnvironment)
}
// loadCredentialAPIKey reads ~/.orama/credentials.json and returns the API key for the given gateway URL.
func loadCredentialAPIKey(gatewayURL string) (string, error) {
homeDir, err := os.UserHomeDir()
if err != nil {
return "", err
}
data, err := os.ReadFile(filepath.Join(homeDir, ".orama", "credentials.json"))
if err != nil {
return "", err
}
// credentials.json v2 format: gateways -> url -> credentials[] array
var store struct {
Gateways map[string]json.RawMessage `json:"gateways"`
}
if err := json.Unmarshal(data, &store); err != nil {
return "", err
}
raw, ok := store.Gateways[gatewayURL]
if !ok {
return "", fmt.Errorf("no credentials for gateway %s", gatewayURL)
}
// Try v2 format: { "credentials": [...], "default_index": 0 }
var v2 struct {
Credentials []struct {
APIKey string `json:"api_key"`
Namespace string `json:"namespace"`
} `json:"credentials"`
DefaultIndex int `json:"default_index"`
}
if err := json.Unmarshal(raw, &v2); err == nil && len(v2.Credentials) > 0 {
idx := v2.DefaultIndex
if idx >= len(v2.Credentials) {
idx = 0
}
return v2.Credentials[idx].APIKey, nil
}
// Try v1 format: direct Credentials object { "api_key": "..." }
var v1 struct {
APIKey string `json:"api_key"`
}
if err := json.Unmarshal(raw, &v1); err == nil && v1.APIKey != "" {
return v1.APIKey, nil
}
return "", fmt.Errorf("no API key found in credentials for %s", gatewayURL)
}
// GetGatewayURL returns the gateway base URL from config
func GetGatewayURL() string {
cacheMutex.RLock()
@ -89,7 +241,13 @@ func GetGatewayURL() string {
}
cacheMutex.RUnlock()
// Check environment variable first
// Check environment variables first (ORAMA_GATEWAY_URL takes precedence)
if envURL := os.Getenv("ORAMA_GATEWAY_URL"); envURL != "" {
cacheMutex.Lock()
gatewayURLCache = envURL
cacheMutex.Unlock()
return envURL
}
if envURL := os.Getenv("GATEWAY_URL"); envURL != "" {
cacheMutex.Lock()
gatewayURLCache = envURL
@ -97,6 +255,14 @@ func GetGatewayURL() string {
return envURL
}
// Try to load from orama active environment (~/.orama/environments.json)
if envURL, err := loadActiveEnvironment(); err == nil && envURL != "" {
cacheMutex.Lock()
gatewayURLCache = envURL
cacheMutex.Unlock()
return envURL
}
// Try to load from gateway config
gwCfg, err := loadGatewayConfig()
if err == nil {
@ -153,7 +319,16 @@ func queryAPIKeyFromRQLite() (string, error) {
return envKey, nil
}
// 2. Build database path from bootstrap/node config
// 2. If ORAMA_GATEWAY_URL is set (production mode), query the remote RQLite HTTP API
if gatewayURL := os.Getenv("ORAMA_GATEWAY_URL"); gatewayURL != "" {
apiKey, err := queryAPIKeyFromRemoteRQLite(gatewayURL)
if err == nil && apiKey != "" {
return apiKey, nil
}
// Fall through to local database check if remote fails
}
// 3. Build database path from bootstrap/node config (for local development)
homeDir, err := os.UserHomeDir()
if err != nil {
return "", fmt.Errorf("failed to get home directory: %w", err)
@ -210,7 +385,61 @@ func queryAPIKeyFromRQLite() (string, error) {
return "", fmt.Errorf("failed to retrieve API key from any SQLite database")
}
// GetAPIKey returns the gateway API key from rqlite or cache
// queryAPIKeyFromRemoteRQLite queries the remote RQLite HTTP API for an API key
func queryAPIKeyFromRemoteRQLite(gatewayURL string) (string, error) {
// Parse the gateway URL to extract the host
parsed, err := url.Parse(gatewayURL)
if err != nil {
return "", fmt.Errorf("failed to parse gateway URL: %w", err)
}
// RQLite HTTP API runs on port 5001 (not the gateway port 6001)
rqliteURL := fmt.Sprintf("http://%s:5001/db/query", parsed.Hostname())
// Create request body
reqBody := `["SELECT key FROM api_keys LIMIT 1"]`
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodPost, rqliteURL, strings.NewReader(reqBody))
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to query rqlite: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("rqlite returned status %d", resp.StatusCode)
}
// Parse response
var result struct {
Results []struct {
Columns []string `json:"columns"`
Values [][]interface{} `json:"values"`
} `json:"results"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", fmt.Errorf("failed to decode response: %w", err)
}
if len(result.Results) > 0 && len(result.Results[0].Values) > 0 && len(result.Results[0].Values[0]) > 0 {
if apiKey, ok := result.Results[0].Values[0][0].(string); ok && apiKey != "" {
return apiKey, nil
}
}
return "", fmt.Errorf("no API key found in rqlite")
}
// GetAPIKey returns the gateway API key from credentials.json, env vars, or rqlite
func GetAPIKey() string {
cacheMutex.RLock()
if apiKeyCache != "" {
@ -219,7 +448,24 @@ func GetAPIKey() string {
}
cacheMutex.RUnlock()
// Query rqlite for API key
// 1. Check env var
if envKey := os.Getenv("DEBROS_API_KEY"); envKey != "" {
cacheMutex.Lock()
apiKeyCache = envKey
cacheMutex.Unlock()
return envKey
}
// 2. Try credentials.json for the active gateway
gatewayURL := GetGatewayURL()
if apiKey, err := loadCredentialAPIKey(gatewayURL); err == nil && apiKey != "" {
cacheMutex.Lock()
apiKeyCache = apiKey
cacheMutex.Unlock()
return apiKey
}
// 3. Fall back to querying rqlite directly
apiKey, err := queryAPIKeyFromRQLite()
if err != nil {
return ""
@ -966,3 +1212,559 @@ func (p *WSPubSubClientPair) Close() {
p.Subscriber.Close()
}
}
// ============================================================================
// Deployment Testing Helpers
// ============================================================================
// E2ETestEnv holds the environment configuration for deployment E2E tests
type E2ETestEnv struct {
GatewayURL string
APIKey string
Namespace string
BaseDomain string // Domain for deployment routing (e.g., "dbrs.space")
Config *E2EConfig // Full E2E configuration (for production tests)
HTTPClient *http.Client
SkipCleanup bool
}
// BuildDeploymentDomain returns the full domain for a deployment name
// Format: {name}.{baseDomain} (e.g., "myapp.dbrs.space")
func (env *E2ETestEnv) BuildDeploymentDomain(deploymentName string) string {
return fmt.Sprintf("%s.%s", deploymentName, env.BaseDomain)
}
// LoadTestEnv loads the test environment from environment variables and config file
// If ORAMA_API_KEY is not set, it creates a fresh API key for the default test namespace
func LoadTestEnv() (*E2ETestEnv, error) {
// Load E2E config (for base_domain and production settings)
cfg, err := LoadE2EConfig()
if err != nil {
// If config loading fails in production mode, that's an error
if IsProductionMode() {
return nil, fmt.Errorf("failed to load e2e config: %w", err)
}
// For local mode, use defaults
cfg = DefaultConfig()
}
gatewayURL := os.Getenv("ORAMA_GATEWAY_URL")
if gatewayURL == "" {
gatewayURL = GetGatewayURL()
}
// Check if API key is provided via environment variable, config, or credentials.json
apiKey := os.Getenv("ORAMA_API_KEY")
if apiKey == "" && cfg.APIKey != "" {
apiKey = cfg.APIKey
}
if apiKey == "" {
apiKey = GetAPIKey() // Reads from credentials.json or rqlite
}
namespace := os.Getenv("ORAMA_NAMESPACE")
// If still no API key, create a fresh one for a default test namespace
if apiKey == "" {
if namespace == "" {
namespace = "default-test-ns"
}
// Generate a unique wallet address for this namespace
wallet := fmt.Sprintf("0x%x", []byte(namespace+fmt.Sprintf("%d", time.Now().UnixNano())))
if len(wallet) < 42 {
wallet = wallet + strings.Repeat("0", 42-len(wallet))
}
if len(wallet) > 42 {
wallet = wallet[:42]
}
// Create an API key for this namespace (handles async provisioning for non-default namespaces)
var err error
apiKey, err = createAPIKeyWithProvisioning(gatewayURL, wallet, namespace, 2*time.Minute)
if err != nil {
return nil, fmt.Errorf("failed to create API key for namespace %s: %w", namespace, err)
}
} else if namespace == "" {
namespace = GetClientNamespace()
}
skipCleanup := os.Getenv("ORAMA_SKIP_CLEANUP") == "true"
return &E2ETestEnv{
GatewayURL: gatewayURL,
APIKey: apiKey,
Namespace: namespace,
BaseDomain: cfg.BaseDomain,
Config: cfg,
HTTPClient: NewHTTPClient(30 * time.Second),
SkipCleanup: skipCleanup,
}, nil
}
// LoadTestEnvWithNamespace loads test environment with a specific namespace
// It creates a new API key for the specified namespace to ensure proper isolation
func LoadTestEnvWithNamespace(namespace string) (*E2ETestEnv, error) {
// Load E2E config (for base_domain and production settings)
cfg, err := LoadE2EConfig()
if err != nil {
cfg = DefaultConfig()
}
gatewayURL := os.Getenv("ORAMA_GATEWAY_URL")
if gatewayURL == "" {
gatewayURL = GetGatewayURL()
}
skipCleanup := os.Getenv("ORAMA_SKIP_CLEANUP") == "true"
// Generate a unique wallet address for this namespace
// Using namespace as part of the wallet address for uniqueness
wallet := fmt.Sprintf("0x%x", []byte(namespace+fmt.Sprintf("%d", time.Now().UnixNano())))
if len(wallet) < 42 {
wallet = wallet + strings.Repeat("0", 42-len(wallet))
}
if len(wallet) > 42 {
wallet = wallet[:42]
}
// Create an API key for this namespace (handles async provisioning for non-default namespaces)
apiKey, err := createAPIKeyWithProvisioning(gatewayURL, wallet, namespace, 2*time.Minute)
if err != nil {
return nil, fmt.Errorf("failed to create API key for namespace %s: %w", namespace, err)
}
return &E2ETestEnv{
GatewayURL: gatewayURL,
APIKey: apiKey,
Namespace: namespace,
BaseDomain: cfg.BaseDomain,
Config: cfg,
HTTPClient: NewHTTPClient(30 * time.Second),
SkipCleanup: skipCleanup,
}, nil
}
// tarballFromDir creates a .tar.gz in memory from a directory.
func tarballFromDir(dirPath string) ([]byte, error) {
var buf bytes.Buffer
cmd := exec.Command("tar", "-czf", "-", "-C", dirPath, ".")
cmd.Stdout = &buf
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("tar failed: %w", err)
}
return buf.Bytes(), nil
}
// CreateTestDeployment creates a test deployment and returns its ID.
// tarballPath can be a .tar.gz file or a directory (which will be tarred automatically).
func CreateTestDeployment(t *testing.T, env *E2ETestEnv, name, tarballPath string) string {
t.Helper()
var fileData []byte
info, err := os.Stat(tarballPath)
if err != nil {
t.Fatalf("failed to stat tarball path: %v", err)
}
if info.IsDir() {
// Create tarball from directory
fileData, err = tarballFromDir(tarballPath)
if err != nil {
t.Fatalf("failed to create tarball from dir: %v", err)
}
} else {
fileData, err = os.ReadFile(tarballPath)
if err != nil {
t.Fatalf("failed to read tarball: %v", err)
}
}
// Create multipart form
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
// Write name field
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(name + "\r\n")
// NOTE: We intentionally do NOT send subdomain field
// This ensures only node-specific domains are created: {name}.node-{id}.domain
// Subdomain should only be sent if explicitly requested for custom domains
// Write tarball file
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.Write(fileData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Fatalf("failed to upload deployment: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("deployment upload failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var result map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
// Try both "id" and "deployment_id" field names
if id, ok := result["deployment_id"].(string); ok {
return id
}
if id, ok := result["id"].(string); ok {
return id
}
t.Fatalf("deployment response missing id field: %+v", result)
return ""
}
// DeleteDeployment deletes a deployment by ID
func DeleteDeployment(t *testing.T, env *E2ETestEnv, deploymentID string) {
t.Helper()
req, _ := http.NewRequest("DELETE", env.GatewayURL+"/v1/deployments/delete?id="+deploymentID, nil)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Logf("warning: failed to delete deployment: %v", err)
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Logf("warning: delete deployment returned status %d", resp.StatusCode)
}
}
// GetDeployment retrieves deployment metadata by ID
func GetDeployment(t *testing.T, env *E2ETestEnv, deploymentID string) map[string]interface{} {
t.Helper()
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/get?id="+deploymentID, nil)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Fatalf("failed to get deployment: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("get deployment failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var deployment map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&deployment); err != nil {
t.Fatalf("failed to decode deployment: %v", err)
}
return deployment
}
// CreateSQLiteDB creates a SQLite database for a namespace
func CreateSQLiteDB(t *testing.T, env *E2ETestEnv, dbName string) {
t.Helper()
reqBody := map[string]string{"database_name": dbName}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/db/sqlite/create", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+env.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("create database failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
}
// DeleteSQLiteDB deletes a SQLite database
func DeleteSQLiteDB(t *testing.T, env *E2ETestEnv, dbName string) {
t.Helper()
reqBody := map[string]string{"database_name": dbName}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("DELETE", env.GatewayURL+"/v1/db/sqlite/delete", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+env.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Logf("warning: failed to delete database: %v", err)
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Logf("warning: delete database returned status %d", resp.StatusCode)
}
}
// ExecuteSQLQuery executes a SQL query on a database
func ExecuteSQLQuery(t *testing.T, env *E2ETestEnv, dbName, query string) map[string]interface{} {
t.Helper()
reqBody := map[string]interface{}{
"database_name": dbName,
"query": query,
}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/db/sqlite/query", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+env.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Fatalf("failed to execute query: %v", err)
}
defer resp.Body.Close()
var result map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
t.Fatalf("failed to decode query response: %v", err)
}
if errMsg, ok := result["error"].(string); ok && errMsg != "" {
t.Fatalf("SQL query failed: %s", errMsg)
}
return result
}
// QuerySQLite executes a SELECT query and returns rows
func QuerySQLite(t *testing.T, env *E2ETestEnv, dbName, query string) []map[string]interface{} {
t.Helper()
result := ExecuteSQLQuery(t, env, dbName, query)
rows, ok := result["rows"].([]interface{})
if !ok {
return []map[string]interface{}{}
}
columns, _ := result["columns"].([]interface{})
var results []map[string]interface{}
for _, row := range rows {
rowData, ok := row.([]interface{})
if !ok {
continue
}
rowMap := make(map[string]interface{})
for i, col := range columns {
if i < len(rowData) {
rowMap[col.(string)] = rowData[i]
}
}
results = append(results, rowMap)
}
return results
}
// UploadTestFile uploads a file to IPFS and returns the CID
func UploadTestFile(t *testing.T, env *E2ETestEnv, filename, content string) string {
t.Helper()
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
body.WriteString("--" + boundary + "\r\n")
body.WriteString(fmt.Sprintf("Content-Disposition: form-data; name=\"file\"; filename=\"%s\"\r\n", filename))
body.WriteString("Content-Type: text/plain\r\n\r\n")
body.WriteString(content)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/storage/upload", body)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Fatalf("failed to upload file: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("upload file failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var result map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
t.Fatalf("failed to decode upload response: %v", err)
}
cid, ok := result["cid"].(string)
if !ok {
t.Fatalf("CID not found in response")
}
return cid
}
// UnpinFile unpins a file from IPFS
func UnpinFile(t *testing.T, env *E2ETestEnv, cid string) {
t.Helper()
reqBody := map[string]string{"cid": cid}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/storage/unpin", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+env.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Logf("warning: failed to unpin file: %v", err)
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Logf("warning: unpin file returned status %d", resp.StatusCode)
}
}
// TestDeploymentWithHostHeader tests a deployment by setting the Host header
func TestDeploymentWithHostHeader(t *testing.T, env *E2ETestEnv, host, path string) *http.Response {
t.Helper()
req, err := http.NewRequest("GET", env.GatewayURL+path, nil)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Host = host
resp, err := env.HTTPClient.Do(req)
if err != nil {
t.Fatalf("failed to test deployment: %v", err)
}
return resp
}
// PutToOlric stores a key-value pair in Olric via the gateway HTTP API
func PutToOlric(gatewayURL, apiKey, dmap, key, value string) error {
reqBody := map[string]interface{}{
"dmap": dmap,
"key": key,
"value": value,
}
bodyBytes, _ := json.Marshal(reqBody)
req, err := http.NewRequest("POST", gatewayURL+"/v1/cache/put", strings.NewReader(string(bodyBytes)))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+apiKey)
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("put failed with status %d: %s", resp.StatusCode, string(body))
}
return nil
}
// GetFromOlric retrieves a value from Olric via the gateway HTTP API
func GetFromOlric(gatewayURL, apiKey, dmap, key string) (string, error) {
reqBody := map[string]interface{}{
"dmap": dmap,
"key": key,
}
bodyBytes, _ := json.Marshal(reqBody)
req, err := http.NewRequest("POST", gatewayURL+"/v1/cache/get", strings.NewReader(string(bodyBytes)))
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+apiKey)
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return "", fmt.Errorf("key not found")
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("get failed with status %d: %s", resp.StatusCode, string(body))
}
body, _ := io.ReadAll(resp.Body)
var result map[string]interface{}
if err := json.Unmarshal(body, &result); err != nil {
return "", err
}
if value, ok := result["value"].(string); ok {
return value, nil
}
if value, ok := result["value"]; ok {
return fmt.Sprintf("%v", value), nil
}
return "", fmt.Errorf("value not found in response")
}
// WaitForHealthy waits for a deployment to become healthy
func WaitForHealthy(t *testing.T, env *E2ETestEnv, deploymentID string, timeout time.Duration) bool {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
deployment := GetDeployment(t, env, deploymentID)
if status, ok := deployment["status"].(string); ok && status == "active" {
return true
}
time.Sleep(1 * time.Second)
}
return false
}

View File

@ -1,6 +1,6 @@
//go:build e2e
package e2e
package integration_test
import (
"context"
@ -10,16 +10,18 @@ import (
"sync/atomic"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
)
// TestCache_ConcurrentWrites tests concurrent cache writes
func TestCache_ConcurrentWrites(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
numGoroutines := 10
var wg sync.WaitGroup
var errorCount int32
@ -32,9 +34,9 @@ func TestCache_ConcurrentWrites(t *testing.T) {
key := fmt.Sprintf("key-%d", idx)
value := fmt.Sprintf("value-%d", idx)
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -56,9 +58,9 @@ func TestCache_ConcurrentWrites(t *testing.T) {
}
// Verify all values exist
scanReq := &HTTPRequest{
scanReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/scan",
URL: e2e.GetGatewayURL() + "/v1/cache/scan",
Body: map[string]interface{}{
"dmap": dmap,
},
@ -70,7 +72,7 @@ func TestCache_ConcurrentWrites(t *testing.T) {
}
var scanResp map[string]interface{}
if err := DecodeJSON(body, &scanResp); err != nil {
if err := e2e.DecodeJSON(body, &scanResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -82,19 +84,19 @@ func TestCache_ConcurrentWrites(t *testing.T) {
// TestCache_ConcurrentReads tests concurrent cache reads
func TestCache_ConcurrentReads(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
key := "shared-key"
value := "shared-value"
// Put value first
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -117,9 +119,9 @@ func TestCache_ConcurrentReads(t *testing.T) {
go func() {
defer wg.Done()
getReq := &HTTPRequest{
getReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
URL: e2e.GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -133,7 +135,7 @@ func TestCache_ConcurrentReads(t *testing.T) {
}
var getResp map[string]interface{}
if err := DecodeJSON(body, &getResp); err != nil {
if err := e2e.DecodeJSON(body, &getResp); err != nil {
atomic.AddInt32(&errorCount, 1)
return
}
@ -153,12 +155,12 @@ func TestCache_ConcurrentReads(t *testing.T) {
// TestCache_ConcurrentDeleteAndWrite tests concurrent delete and write
func TestCache_ConcurrentDeleteAndWrite(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
var wg sync.WaitGroup
var errorCount int32
@ -174,9 +176,9 @@ func TestCache_ConcurrentDeleteAndWrite(t *testing.T) {
key := fmt.Sprintf("key-%d", idx)
value := fmt.Sprintf("value-%d", idx)
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -201,9 +203,9 @@ func TestCache_ConcurrentDeleteAndWrite(t *testing.T) {
key := fmt.Sprintf("key-%d", idx)
deleteReq := &HTTPRequest{
deleteReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/delete",
URL: e2e.GetGatewayURL() + "/v1/cache/delete",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -226,21 +228,32 @@ func TestCache_ConcurrentDeleteAndWrite(t *testing.T) {
// TestRQLite_ConcurrentInserts tests concurrent database inserts
func TestRQLite_ConcurrentInserts(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
table := GenerateTableName()
table := e2e.GenerateTableName()
// Cleanup table after test
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)",
table,
)
// Create table
createReq := &HTTPRequest{
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
@ -261,9 +274,9 @@ func TestRQLite_ConcurrentInserts(t *testing.T) {
go func(idx int) {
defer wg.Done()
txReq := &HTTPRequest{
txReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, idx),
@ -285,9 +298,9 @@ func TestRQLite_ConcurrentInserts(t *testing.T) {
}
// Verify count
queryReq := &HTTPRequest{
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
},
@ -299,7 +312,7 @@ func TestRQLite_ConcurrentInserts(t *testing.T) {
}
var countResp map[string]interface{}
if err := DecodeJSON(body, &countResp); err != nil {
if err := e2e.DecodeJSON(body, &countResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -314,21 +327,32 @@ func TestRQLite_ConcurrentInserts(t *testing.T) {
// TestRQLite_LargeBatchTransaction tests a large transaction with many statements
func TestRQLite_LargeBatchTransaction(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
table := GenerateTableName()
table := e2e.GenerateTableName()
// Cleanup table after test
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value TEXT)",
table,
)
// Create table
createReq := &HTTPRequest{
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
@ -348,9 +372,9 @@ func TestRQLite_LargeBatchTransaction(t *testing.T) {
})
}
txReq := &HTTPRequest{
txReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"ops": ops,
},
@ -362,9 +386,9 @@ func TestRQLite_LargeBatchTransaction(t *testing.T) {
}
// Verify count
queryReq := &HTTPRequest{
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
},
@ -376,7 +400,7 @@ func TestRQLite_LargeBatchTransaction(t *testing.T) {
}
var countResp map[string]interface{}
if err := DecodeJSON(body, &countResp); err != nil {
if err := e2e.DecodeJSON(body, &countResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -390,19 +414,19 @@ func TestRQLite_LargeBatchTransaction(t *testing.T) {
// TestCache_TTLExpiryWithSleep tests TTL expiry with a controlled sleep
func TestCache_TTLExpiryWithSleep(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
key := "ttl-expiry-key"
value := "ttl-expiry-value"
// Put value with 2 second TTL
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -417,9 +441,9 @@ func TestCache_TTLExpiryWithSleep(t *testing.T) {
}
// Verify exists immediately
getReq := &HTTPRequest{
getReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
URL: e2e.GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -432,7 +456,7 @@ func TestCache_TTLExpiryWithSleep(t *testing.T) {
}
// Sleep for TTL duration + buffer
Delay(2500)
e2e.Delay(2500)
// Try to get after TTL expires
_, status, err = getReq.Do(ctx)
@ -443,21 +467,21 @@ func TestCache_TTLExpiryWithSleep(t *testing.T) {
// TestCache_ConcurrentWriteAndDelete tests concurrent writes and deletes on same key
func TestCache_ConcurrentWriteAndDelete(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
key := "contested-key"
// Alternate between writes and deletes
numIterations := 5
for i := 0; i < numIterations; i++ {
// Write
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -471,9 +495,9 @@ func TestCache_ConcurrentWriteAndDelete(t *testing.T) {
}
// Read
getReq := &HTTPRequest{
getReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
URL: e2e.GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -486,9 +510,9 @@ func TestCache_ConcurrentWriteAndDelete(t *testing.T) {
}
// Delete
deleteReq := &HTTPRequest{
deleteReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/delete",
URL: e2e.GetGatewayURL() + "/v1/cache/delete",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,

View File

@ -0,0 +1,462 @@
//go:build e2e
package integration_test
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/require"
)
// =============================================================================
// STRICT DATA PERSISTENCE TESTS
// These tests verify that data is properly persisted and survives operations.
// Tests FAIL if data is lost or corrupted.
// =============================================================================
// TestRQLite_DataPersistence verifies that RQLite data is persisted through the gateway.
func TestRQLite_DataPersistence(t *testing.T) {
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
tableName := fmt.Sprintf("persist_test_%d", time.Now().UnixNano())
// Cleanup
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": tableName},
}
dropReq.Do(context.Background())
}()
// Create table
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, value TEXT, version INTEGER)",
tableName,
),
},
}
_, status, err := createReq.Do(ctx)
require.NoError(t, err, "FAIL: Could not create table")
require.True(t, status == http.StatusCreated || status == http.StatusOK,
"FAIL: Create table returned status %d", status)
t.Run("Data_survives_multiple_writes", func(t *testing.T) {
// Insert initial data
var statements []string
for i := 1; i <= 10; i++ {
statements = append(statements,
fmt.Sprintf("INSERT INTO %s (value, version) VALUES ('item_%d', %d)", tableName, i, i))
}
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{"statements": statements},
}
_, status, err := insertReq.Do(ctx)
require.NoError(t, err, "FAIL: Could not insert rows")
require.Equal(t, http.StatusOK, status, "FAIL: Insert returned status %d", status)
// Verify all data exists
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err, "FAIL: Could not count rows")
require.Equal(t, http.StatusOK, status, "FAIL: Count query returned status %d", status)
var queryResp map[string]interface{}
e2e.DecodeJSON(body, &queryResp)
if rows, ok := queryResp["rows"].([]interface{}); ok && len(rows) > 0 {
row := rows[0].([]interface{})
count := int(row[0].(float64))
require.Equal(t, 10, count, "FAIL: Expected 10 rows, got %d", count)
}
// Update data
updateReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("UPDATE %s SET version = version + 100 WHERE version <= 5", tableName),
},
},
}
_, status, err = updateReq.Do(ctx)
require.NoError(t, err, "FAIL: Could not update rows")
require.Equal(t, http.StatusOK, status, "FAIL: Update returned status %d", status)
// Verify updates persisted
queryUpdatedReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) FROM %s WHERE version > 100", tableName),
},
}
body, status, err = queryUpdatedReq.Do(ctx)
require.NoError(t, err, "FAIL: Could not count updated rows")
require.Equal(t, http.StatusOK, status, "FAIL: Count updated query returned status %d", status)
e2e.DecodeJSON(body, &queryResp)
if rows, ok := queryResp["rows"].([]interface{}); ok && len(rows) > 0 {
row := rows[0].([]interface{})
count := int(row[0].(float64))
require.Equal(t, 5, count, "FAIL: Expected 5 updated rows, got %d", count)
}
t.Logf(" ✓ Data persists through multiple write operations")
})
t.Run("Deletes_are_persisted", func(t *testing.T) {
// Delete some rows
deleteReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("DELETE FROM %s WHERE version > 100", tableName),
},
},
}
_, status, err := deleteReq.Do(ctx)
require.NoError(t, err, "FAIL: Could not delete rows")
require.Equal(t, http.StatusOK, status, "FAIL: Delete returned status %d", status)
// Verify deletes persisted
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) FROM %s", tableName),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err, "FAIL: Could not count remaining rows")
require.Equal(t, http.StatusOK, status, "FAIL: Count query returned status %d", status)
var queryResp map[string]interface{}
e2e.DecodeJSON(body, &queryResp)
if rows, ok := queryResp["rows"].([]interface{}); ok && len(rows) > 0 {
row := rows[0].([]interface{})
count := int(row[0].(float64))
require.Equal(t, 5, count, "FAIL: Expected 5 rows after delete, got %d", count)
}
t.Logf(" ✓ Deletes are properly persisted")
})
}
// TestRQLite_DataFilesExist verifies RQLite data files are created on disk.
func TestRQLite_DataFilesExist(t *testing.T) {
homeDir, err := os.UserHomeDir()
require.NoError(t, err, "FAIL: Could not get home directory")
// Check for RQLite data directories
dataLocations := []string{
filepath.Join(homeDir, ".orama", "node-1", "rqlite"),
filepath.Join(homeDir, ".orama", "node-2", "rqlite"),
filepath.Join(homeDir, ".orama", "node-3", "rqlite"),
filepath.Join(homeDir, ".orama", "node-4", "rqlite"),
filepath.Join(homeDir, ".orama", "node-5", "rqlite"),
}
foundDataDirs := 0
for _, dataDir := range dataLocations {
if _, err := os.Stat(dataDir); err == nil {
foundDataDirs++
t.Logf(" ✓ Found RQLite data directory: %s", dataDir)
// Check for Raft log files
entries, _ := os.ReadDir(dataDir)
for _, entry := range entries {
t.Logf(" - %s", entry.Name())
}
}
}
require.Greater(t, foundDataDirs, 0,
"FAIL: No RQLite data directories found - data may not be persisted")
t.Logf(" Found %d RQLite data directories", foundDataDirs)
}
// TestOlric_DataPersistence verifies Olric cache data persistence.
// Note: Olric is an in-memory cache, so this tests data survival during runtime.
func TestOlric_DataPersistence(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "FAIL: Could not load test environment")
dmap := fmt.Sprintf("persist_cache_%d", time.Now().UnixNano())
t.Run("Cache_data_survives_multiple_operations", func(t *testing.T) {
// Put multiple keys
keys := make(map[string]string)
for i := 0; i < 10; i++ {
key := fmt.Sprintf("persist_key_%d", i)
value := fmt.Sprintf("persist_value_%d", i)
keys[key] = value
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
require.NoError(t, err, "FAIL: Could not put key %s", key)
}
// Perform other operations
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, "other_key", "other_value")
require.NoError(t, err, "FAIL: Could not put other key")
// Verify original keys still exist
for key, expectedValue := range keys {
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
require.NoError(t, err, "FAIL: Key %s not found after other operations", key)
require.Equal(t, expectedValue, retrieved, "FAIL: Value mismatch for key %s", key)
}
t.Logf(" ✓ Cache data survives multiple operations")
})
}
// TestNamespaceCluster_DataPersistence verifies namespace-specific data is isolated and persisted.
func TestNamespaceCluster_DataPersistence(t *testing.T) {
// Create namespace
namespace := fmt.Sprintf("persist-ns-%d", time.Now().UnixNano())
env, err := e2e.LoadTestEnvWithNamespace(namespace)
require.NoError(t, err, "FAIL: Could not create namespace")
t.Logf("Created namespace: %s", namespace)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
t.Run("Namespace_data_is_isolated", func(t *testing.T) {
// Create data via gateway API
tableName := fmt.Sprintf("ns_data_%d", time.Now().UnixNano())
req := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/rqlite/create-table",
Headers: map[string]string{
"Authorization": "Bearer " + env.APIKey,
},
Body: map[string]interface{}{
"schema": fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, value TEXT)", tableName),
},
}
_, status, err := req.Do(ctx)
require.NoError(t, err, "FAIL: Could not create table in namespace")
require.True(t, status == http.StatusOK || status == http.StatusCreated,
"FAIL: Create table returned status %d", status)
// Insert data
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/rqlite/transaction",
Headers: map[string]string{
"Authorization": "Bearer " + env.APIKey,
},
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s (value) VALUES ('ns_test_value')", tableName),
},
},
}
_, status, err = insertReq.Do(ctx)
require.NoError(t, err, "FAIL: Could not insert into namespace table")
require.Equal(t, http.StatusOK, status, "FAIL: Insert returned status %d", status)
// Verify data exists
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/rqlite/query",
Headers: map[string]string{
"Authorization": "Bearer " + env.APIKey,
},
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT value FROM %s", tableName),
},
}
body, status, err := queryReq.Do(ctx)
require.NoError(t, err, "FAIL: Could not query namespace table")
require.Equal(t, http.StatusOK, status, "FAIL: Query returned status %d", status)
var queryResp map[string]interface{}
json.Unmarshal(body, &queryResp)
count, _ := queryResp["count"].(float64)
require.Equal(t, float64(1), count, "FAIL: Expected 1 row in namespace table")
t.Logf(" ✓ Namespace data is isolated and persisted")
})
}
// TestIPFS_DataPersistence verifies IPFS content is persisted and pinned.
// Note: Detailed IPFS tests are in storage_http_test.go. This test uses the helper from env.go.
func TestIPFS_DataPersistence(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "FAIL: Could not load test environment")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
t.Run("Uploaded_content_persists", func(t *testing.T) {
// Use helper function to upload content via multipart form
content := fmt.Sprintf("persistent content %d", time.Now().UnixNano())
cid := e2e.UploadTestFile(t, env, "persist_test.txt", content)
require.NotEmpty(t, cid, "FAIL: No CID returned from upload")
t.Logf(" Uploaded content with CID: %s", cid)
// Verify content can be retrieved
getReq := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: env.GatewayURL + "/v1/storage/get/" + cid,
Headers: map[string]string{
"Authorization": "Bearer " + env.APIKey,
},
}
respBody, status, err := getReq.Do(ctx)
require.NoError(t, err, "FAIL: Get content failed")
require.Equal(t, http.StatusOK, status, "FAIL: Get returned status %d", status)
require.Contains(t, string(respBody), "persistent content",
"FAIL: Retrieved content doesn't match uploaded content")
t.Logf(" ✓ IPFS content persists and is retrievable")
})
}
// TestSQLite_DataPersistence verifies per-deployment SQLite databases persist.
func TestSQLite_DataPersistence(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "FAIL: Could not load test environment")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dbName := fmt.Sprintf("persist_db_%d", time.Now().UnixNano())
t.Run("SQLite_database_persists", func(t *testing.T) {
// Create database
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/db/sqlite/create",
Headers: map[string]string{
"Authorization": "Bearer " + env.APIKey,
},
Body: map[string]interface{}{
"database_name": dbName,
},
}
_, status, err := createReq.Do(ctx)
require.NoError(t, err, "FAIL: Create database failed")
require.True(t, status == http.StatusOK || status == http.StatusCreated,
"FAIL: Create returned status %d", status)
t.Logf(" Created SQLite database: %s", dbName)
// Create table and insert data
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/db/sqlite/query",
Headers: map[string]string{
"Authorization": "Bearer " + env.APIKey,
},
Body: map[string]interface{}{
"database_name": dbName,
"query": "CREATE TABLE IF NOT EXISTS test_table (id INTEGER PRIMARY KEY, data TEXT)",
},
}
_, status, err = queryReq.Do(ctx)
require.NoError(t, err, "FAIL: Create table failed")
require.Equal(t, http.StatusOK, status, "FAIL: Create table returned status %d", status)
// Insert data
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/db/sqlite/query",
Headers: map[string]string{
"Authorization": "Bearer " + env.APIKey,
},
Body: map[string]interface{}{
"database_name": dbName,
"query": "INSERT INTO test_table (data) VALUES ('persistent_data')",
},
}
_, status, err = insertReq.Do(ctx)
require.NoError(t, err, "FAIL: Insert failed")
require.Equal(t, http.StatusOK, status, "FAIL: Insert returned status %d", status)
// Verify data persists
selectReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: env.GatewayURL + "/v1/db/sqlite/query",
Headers: map[string]string{
"Authorization": "Bearer " + env.APIKey,
},
Body: map[string]interface{}{
"database_name": dbName,
"query": "SELECT data FROM test_table",
},
}
body, status, err := selectReq.Do(ctx)
require.NoError(t, err, "FAIL: Select failed")
require.Equal(t, http.StatusOK, status, "FAIL: Select returned status %d", status)
require.Contains(t, string(body), "persistent_data",
"FAIL: Data not found in SQLite database")
t.Logf(" ✓ SQLite database data persists")
})
t.Run("SQLite_database_listed", func(t *testing.T) {
// List databases to verify it was persisted
listReq := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: env.GatewayURL + "/v1/db/sqlite/list",
Headers: map[string]string{
"Authorization": "Bearer " + env.APIKey,
},
}
body, status, err := listReq.Do(ctx)
require.NoError(t, err, "FAIL: List databases failed")
require.Equal(t, http.StatusOK, status, "FAIL: List returned status %d", status)
require.Contains(t, string(body), dbName,
"FAIL: Created database not found in list")
t.Logf(" ✓ SQLite database appears in list")
})
}

View File

@ -0,0 +1,356 @@
//go:build e2e
package integration_test
import (
"encoding/json"
"fmt"
"io"
"net/http"
"path/filepath"
"strings"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDomainRouting_BasicRouting(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("test-routing-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Wait for deployment to be active
time.Sleep(2 * time.Second)
// Get deployment details for debugging
deployment := e2e.GetDeployment(t, env, deploymentID)
t.Logf("Deployment created: ID=%s, CID=%s, Name=%s, Status=%s",
deploymentID, deployment["content_cid"], deployment["name"], deployment["status"])
t.Run("Standard domain resolves", func(t *testing.T) {
// Domain format: {deploymentName}.{baseDomain}
domain := env.BuildDeploymentDomain(deploymentName)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should return 200 OK")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err, "Should read response body")
assert.Contains(t, string(body), "<div id=\"root\">", "Should serve React app")
assert.Contains(t, resp.Header.Get("Content-Type"), "text/html", "Content-Type should be HTML")
t.Logf("✓ Standard domain routing works: %s", domain)
})
t.Run("Non-debros domain passes through", func(t *testing.T) {
// Request with non-debros domain should not route to deployment
resp := e2e.TestDeploymentWithHostHeader(t, env, "example.com", "/")
defer resp.Body.Close()
// Should either return 404 or pass to default handler
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Non-debros domain should not route to deployment")
t.Logf("✓ Non-debros domains correctly pass through (status: %d)", resp.StatusCode)
})
t.Run("API paths bypass domain routing", func(t *testing.T) {
// /v1/* paths should bypass domain routing and use API key auth
domain := env.BuildDeploymentDomain(deploymentName)
req, _ := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/list", nil)
req.Host = domain
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Should execute request")
defer resp.Body.Close()
// Should return API response, not deployment content
assert.Equal(t, http.StatusOK, resp.StatusCode, "API endpoint should work")
var result map[string]interface{}
bodyBytes, _ := io.ReadAll(resp.Body)
err = json.Unmarshal(bodyBytes, &result)
// Should be JSON API response
assert.NoError(t, err, "Should decode JSON (API response)")
assert.NotNil(t, result["deployments"], "Should have deployments field")
t.Logf("✓ API paths correctly bypass domain routing")
})
t.Run("Well-known paths bypass domain routing", func(t *testing.T) {
domain := env.BuildDeploymentDomain(deploymentName)
// /.well-known/ paths should bypass (used for ACME challenges, etc.)
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/.well-known/acme-challenge/test")
defer resp.Body.Close()
// Should not serve deployment content
// Exact status depends on implementation, but shouldn't be deployment content
body, _ := io.ReadAll(resp.Body)
bodyStr := string(body)
// Shouldn't contain React app content
if resp.StatusCode == http.StatusOK {
assert.NotContains(t, bodyStr, "<div id=\"root\">",
"Well-known paths should not serve deployment content")
}
t.Logf("✓ Well-known paths bypass routing (status: %d)", resp.StatusCode)
})
}
func TestDomainRouting_MultipleDeployments(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
tarballPath := filepath.Join("../../testdata/apps/react-app")
// Create multiple deployments
deployment1Name := fmt.Sprintf("test-multi-1-%d", time.Now().Unix())
deployment2Name := fmt.Sprintf("test-multi-2-%d", time.Now().Unix())
deployment1ID := e2e.CreateTestDeployment(t, env, deployment1Name, tarballPath)
time.Sleep(1 * time.Second)
deployment2ID := e2e.CreateTestDeployment(t, env, deployment2Name, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deployment1ID)
e2e.DeleteDeployment(t, env, deployment2ID)
}
}()
time.Sleep(2 * time.Second)
t.Run("Each deployment routes independently", func(t *testing.T) {
domain1 := env.BuildDeploymentDomain(deployment1Name)
domain2 := env.BuildDeploymentDomain(deployment2Name)
// Test deployment 1
resp1 := e2e.TestDeploymentWithHostHeader(t, env, domain1, "/")
defer resp1.Body.Close()
assert.Equal(t, http.StatusOK, resp1.StatusCode, "Deployment 1 should serve")
// Test deployment 2
resp2 := e2e.TestDeploymentWithHostHeader(t, env, domain2, "/")
defer resp2.Body.Close()
assert.Equal(t, http.StatusOK, resp2.StatusCode, "Deployment 2 should serve")
t.Logf("✓ Multiple deployments route independently")
t.Logf(" - Domain 1: %s", domain1)
t.Logf(" - Domain 2: %s", domain2)
})
t.Run("Wrong domain returns 404", func(t *testing.T) {
// Request with non-existent deployment subdomain
fakeDeploymentDomain := env.BuildDeploymentDomain(fmt.Sprintf("nonexistent-deployment-%d", time.Now().Unix()))
resp := e2e.TestDeploymentWithHostHeader(t, env, fakeDeploymentDomain, "/")
defer resp.Body.Close()
assert.Equal(t, http.StatusNotFound, resp.StatusCode,
"Non-existent deployment should return 404")
t.Logf("✓ Non-existent deployment returns 404")
})
}
func TestDomainRouting_ContentTypes(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("test-content-types-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
time.Sleep(2 * time.Second)
domain := env.BuildDeploymentDomain(deploymentName)
contentTypeTests := []struct {
path string
shouldHave string
description string
}{
{"/", "text/html", "HTML root"},
{"/index.html", "text/html", "HTML file"},
}
for _, test := range contentTypeTests {
t.Run(test.description, func(t *testing.T) {
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, test.path)
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
contentType := resp.Header.Get("Content-Type")
assert.Contains(t, contentType, test.shouldHave,
"Content-Type for %s should contain %s", test.path, test.shouldHave)
t.Logf("✓ %s: %s", test.description, contentType)
} else {
t.Logf("⚠ %s returned status %d", test.path, resp.StatusCode)
}
})
}
}
func TestDomainRouting_SPAFallback(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("test-spa-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
time.Sleep(2 * time.Second)
domain := env.BuildDeploymentDomain(deploymentName)
t.Run("Unknown paths fall back to index.html", func(t *testing.T) {
unknownPaths := []string{
"/about",
"/users/123",
"/settings/profile",
"/some/deep/nested/path",
}
for _, path := range unknownPaths {
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, path)
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
// Should return index.html for SPA routing
assert.Equal(t, http.StatusOK, resp.StatusCode,
"SPA fallback should return 200 for %s", path)
assert.Contains(t, string(body), "<div id=\"root\">",
"SPA fallback should return index.html for %s", path)
}
t.Logf("✓ SPA fallback routing verified for %d paths", len(unknownPaths))
})
}
// TestDeployment_DomainFormat verifies that deployment URLs use the correct format:
// - CORRECT: {name}-{random}.{baseDomain} (e.g., "myapp-f3o4if.dbrs.space")
// - WRONG: {name}.node-{shortID}.{baseDomain} (should NOT exist)
func TestDeployment_DomainFormat(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("format-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Wait for deployment
time.Sleep(2 * time.Second)
t.Run("Deployment URL has correct format", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
// Get the deployment URLs
urls, ok := deployment["urls"].([]interface{})
if !ok || len(urls) == 0 {
// Fall back to single url field
if url, ok := deployment["url"].(string); ok && url != "" {
urls = []interface{}{url}
}
}
// Get the subdomain from deployment response
subdomain, _ := deployment["subdomain"].(string)
t.Logf("Deployment subdomain: %s", subdomain)
t.Logf("Deployment URLs: %v", urls)
foundCorrectFormat := false
for _, u := range urls {
urlStr, ok := u.(string)
if !ok {
continue
}
// URL should start with https://{name}-
expectedPrefix := fmt.Sprintf("https://%s-", deploymentName)
if strings.HasPrefix(urlStr, expectedPrefix) {
foundCorrectFormat = true
}
// URL should contain base domain
assert.Contains(t, urlStr, env.BaseDomain,
"URL should contain base domain %s", env.BaseDomain)
// URL should NOT contain node identifier pattern
assert.NotContains(t, urlStr, ".node-",
"URL should NOT have node identifier (got: %s)", urlStr)
}
if len(urls) > 0 {
assert.True(t, foundCorrectFormat, "Should find URL with correct domain format (https://{name}-{random}.{baseDomain})")
}
t.Logf("✓ Domain format verification passed")
t.Logf(" - Format: {name}-{random}.{baseDomain}")
})
t.Run("Domain resolves via Host header", func(t *testing.T) {
// Get the actual subdomain from the deployment
deployment := e2e.GetDeployment(t, env, deploymentID)
subdomain, _ := deployment["subdomain"].(string)
if subdomain == "" {
t.Skip("No subdomain set, skipping host header test")
}
domain := subdomain + "." + env.BaseDomain
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode,
"Domain %s should resolve successfully", domain)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Contains(t, string(body), "<div id=\"root\">",
"Should serve deployment content")
t.Logf("✓ Domain %s resolves correctly", domain)
})
}

View File

@ -0,0 +1,278 @@
//go:build e2e
package integration_test
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFullStack_GoAPI_SQLite(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
appName := fmt.Sprintf("fullstack-app-%d", time.Now().Unix())
backendName := appName + "-backend"
dbName := appName + "-db"
var backendID string
defer func() {
if !env.SkipCleanup {
if backendID != "" {
e2e.DeleteDeployment(t, env, backendID)
}
e2e.DeleteSQLiteDB(t, env, dbName)
}
}()
// Step 1: Create SQLite database
t.Run("Create SQLite database", func(t *testing.T) {
e2e.CreateSQLiteDB(t, env, dbName)
// Create users table
query := `CREATE TABLE users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
email TEXT UNIQUE NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)`
e2e.ExecuteSQLQuery(t, env, dbName, query)
// Insert test data
insertQuery := `INSERT INTO users (name, email) VALUES ('Alice', 'alice@example.com')`
result := e2e.ExecuteSQLQuery(t, env, dbName, insertQuery)
assert.NotNil(t, result, "Should execute INSERT successfully")
t.Logf("✓ Database created with users table")
})
// Step 2: Deploy Go backend (this would normally connect to SQLite)
// Note: For now we test the Go backend deployment without actual DB connection
// as that requires environment variable injection during deployment
t.Run("Deploy Go backend", func(t *testing.T) {
tarballPath := filepath.Join("../../testdata/apps/go-api")
// Note: In a real implementation, we would pass DATABASE_NAME env var
// For now, we just test the deployment mechanism
backendID = e2e.CreateTestDeployment(t, env, backendName, tarballPath)
assert.NotEmpty(t, backendID, "Backend deployment ID should not be empty")
t.Logf("✓ Go backend deployed: %s", backendName)
// Wait for deployment to become active
time.Sleep(3 * time.Second)
})
// Step 3: Test database operations
t.Run("Test database CRUD operations", func(t *testing.T) {
// INSERT
insertQuery := `INSERT INTO users (name, email) VALUES ('Bob', 'bob@example.com')`
e2e.ExecuteSQLQuery(t, env, dbName, insertQuery)
// SELECT
users := e2e.QuerySQLite(t, env, dbName, "SELECT * FROM users ORDER BY id")
require.GreaterOrEqual(t, len(users), 2, "Should have at least 2 users")
assert.Equal(t, "Alice", users[0]["name"], "First user should be Alice")
assert.Equal(t, "Bob", users[1]["name"], "Second user should be Bob")
t.Logf("✓ Database CRUD operations work")
t.Logf(" - Found %d users", len(users))
// UPDATE
updateQuery := `UPDATE users SET email = 'alice.new@example.com' WHERE name = 'Alice'`
result := e2e.ExecuteSQLQuery(t, env, dbName, updateQuery)
rowsAffected, ok := result["rows_affected"].(float64)
require.True(t, ok, "Should have rows_affected")
assert.Equal(t, float64(1), rowsAffected, "Should update 1 row")
// Verify update
updated := e2e.QuerySQLite(t, env, dbName, "SELECT email FROM users WHERE name = 'Alice'")
require.Len(t, updated, 1, "Should find Alice")
assert.Equal(t, "alice.new@example.com", updated[0]["email"], "Email should be updated")
t.Logf("✓ UPDATE operation verified")
// DELETE
deleteQuery := `DELETE FROM users WHERE name = 'Bob'`
result = e2e.ExecuteSQLQuery(t, env, dbName, deleteQuery)
rowsAffected, ok = result["rows_affected"].(float64)
require.True(t, ok, "Should have rows_affected")
assert.Equal(t, float64(1), rowsAffected, "Should delete 1 row")
// Verify deletion
remaining := e2e.QuerySQLite(t, env, dbName, "SELECT * FROM users")
assert.Equal(t, 1, len(remaining), "Should have 1 user remaining")
t.Logf("✓ DELETE operation verified")
})
// Step 4: Test backend API endpoints (if deployment is active)
t.Run("Test backend API endpoints", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, backendID)
status, ok := deployment["status"].(string)
if !ok || status != "active" {
t.Skip("Backend deployment not active, skipping API tests")
return
}
backendDomain := env.BuildDeploymentDomain(backendName)
// Test health endpoint
resp := e2e.TestDeploymentWithHostHeader(t, env, backendDomain, "/health")
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
var health map[string]interface{}
bodyBytes, _ := io.ReadAll(resp.Body)
require.NoError(t, json.Unmarshal(bodyBytes, &health), "Should decode health response")
assert.Equal(t, "healthy", health["status"], "Status should be healthy")
assert.Equal(t, "go-backend-test", health["service"], "Service name should match")
t.Logf("✓ Backend health check passed")
} else {
t.Logf("⚠ Health check returned status %d (deployment may still be starting)", resp.StatusCode)
}
// Test users API endpoint
resp2 := e2e.TestDeploymentWithHostHeader(t, env, backendDomain, "/api/users")
defer resp2.Body.Close()
if resp2.StatusCode == http.StatusOK {
var usersResp map[string]interface{}
bodyBytes, _ := io.ReadAll(resp2.Body)
require.NoError(t, json.Unmarshal(bodyBytes, &usersResp), "Should decode users response")
users, ok := usersResp["users"].([]interface{})
require.True(t, ok, "Should have users array")
assert.GreaterOrEqual(t, len(users), 3, "Should have test users")
t.Logf("✓ Backend API endpoint works")
t.Logf(" - Users endpoint returned %d users", len(users))
} else {
t.Logf("⚠ Users API returned status %d (deployment may still be starting)", resp2.StatusCode)
}
})
// Step 5: Test database backup
t.Run("Test database backup", func(t *testing.T) {
reqBody := map[string]string{"database_name": dbName}
bodyBytes, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/db/sqlite/backup", bytes.NewReader(bodyBytes))
req.Header.Set("Authorization", "Bearer "+env.APIKey)
req.Header.Set("Content-Type", "application/json")
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err, "Should execute backup request")
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
var result map[string]interface{}
bodyBytes, _ := io.ReadAll(resp.Body)
require.NoError(t, json.Unmarshal(bodyBytes, &result), "Should decode backup response")
backupCID, ok := result["backup_cid"].(string)
require.True(t, ok, "Should have backup CID")
assert.NotEmpty(t, backupCID, "Backup CID should not be empty")
t.Logf("✓ Database backup created")
t.Logf(" - CID: %s", backupCID)
} else {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Logf("⚠ Backup returned status %d: %s", resp.StatusCode, string(bodyBytes))
}
})
// Step 6: Test concurrent database queries
t.Run("Test concurrent database reads", func(t *testing.T) {
// WAL mode should allow concurrent reads — run sequentially to avoid t.Fatal in goroutines
for i := 0; i < 5; i++ {
users := e2e.QuerySQLite(t, env, dbName, "SELECT * FROM users")
assert.GreaterOrEqual(t, len(users), 0, "Should query successfully")
}
t.Logf("✓ Sequential reads successful")
})
}
func TestFullStack_StaticSite_SQLite(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
appName := fmt.Sprintf("fullstack-static-%d", time.Now().Unix())
frontendName := appName + "-frontend"
dbName := appName + "-db"
var frontendID string
defer func() {
if !env.SkipCleanup {
if frontendID != "" {
e2e.DeleteDeployment(t, env, frontendID)
}
e2e.DeleteSQLiteDB(t, env, dbName)
}
}()
t.Run("Deploy static site and create database", func(t *testing.T) {
// Create database
e2e.CreateSQLiteDB(t, env, dbName)
e2e.ExecuteSQLQuery(t, env, dbName, "CREATE TABLE page_views (id INTEGER PRIMARY KEY, page TEXT, count INTEGER)")
e2e.ExecuteSQLQuery(t, env, dbName, "INSERT INTO page_views (page, count) VALUES ('home', 0)")
// Deploy frontend
tarballPath := filepath.Join("../../testdata/apps/react-app")
frontendID = e2e.CreateTestDeployment(t, env, frontendName, tarballPath)
assert.NotEmpty(t, frontendID, "Frontend deployment should succeed")
t.Logf("✓ Static site deployed with SQLite backend")
// Wait for deployment
time.Sleep(2 * time.Second)
})
t.Run("Test frontend serving and database interaction", func(t *testing.T) {
frontendDomain := env.BuildDeploymentDomain(frontendName)
// Test frontend
resp := e2e.TestDeploymentWithHostHeader(t, env, frontendDomain, "/")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "Frontend should serve")
body, _ := io.ReadAll(resp.Body)
assert.Contains(t, string(body), "<div id=\"root\">", "Should contain React app")
// Simulate page view tracking
e2e.ExecuteSQLQuery(t, env, dbName, "UPDATE page_views SET count = count + 1 WHERE page = 'home'")
// Verify count
views := e2e.QuerySQLite(t, env, dbName, "SELECT count FROM page_views WHERE page = 'home'")
require.Len(t, views, 1, "Should have page view record")
count, ok := views[0]["count"].(float64)
require.True(t, ok, "Count should be a number")
assert.Equal(t, float64(1), count, "Page view count should be incremented")
t.Logf("✓ Full stack integration verified")
t.Logf(" - Frontend: %s", frontendDomain)
t.Logf(" - Database: %s", dbName)
t.Logf(" - Page views tracked: %.0f", count)
})
}

View File

@ -0,0 +1,127 @@
//go:build e2e
package integration
import (
"fmt"
"io"
"net/http"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestIPFS_ContentPinnedOnMultipleNodes verifies that deploying a static app
// makes the IPFS content available across multiple nodes.
func TestIPFS_ContentPinnedOnMultipleNodes(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
if len(env.Config.Servers) < 2 {
t.Skip("Requires at least 2 servers")
}
deploymentName := fmt.Sprintf("ipfs-pin-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
time.Sleep(15 * time.Second) // Wait for IPFS content replication
deployment := e2e.GetDeployment(t, env, deploymentID)
contentCID, _ := deployment["content_cid"].(string)
require.NotEmpty(t, contentCID, "Deployment should have a content CID")
t.Run("Content served via gateway", func(t *testing.T) {
// Extract domain from deployment URLs
urls, _ := deployment["urls"].([]interface{})
require.NotEmpty(t, urls, "Deployment should have URLs")
urlStr, _ := urls[0].(string)
domain := urlStr
if len(urlStr) > 8 && urlStr[:8] == "https://" {
domain = urlStr[8:]
} else if len(urlStr) > 7 && urlStr[:7] == "http://" {
domain = urlStr[7:]
}
if len(domain) > 0 && domain[len(domain)-1] == '/' {
domain = domain[:len(domain)-1]
}
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
t.Logf("status=%d, body=%d bytes", resp.StatusCode, len(body))
assert.Equal(t, http.StatusOK, resp.StatusCode,
"IPFS content should be served via gateway (CID: %s)", contentCID)
})
}
// TestIPFS_LargeFileDeployment verifies that deploying an app with larger
// static assets works correctly.
func TestIPFS_LargeFileDeployment(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
deploymentName := fmt.Sprintf("ipfs-large-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
// The react-vite tarball is our largest test asset
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
time.Sleep(5 * time.Second)
t.Run("Deployment has valid CID", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
contentCID, _ := deployment["content_cid"].(string)
assert.NotEmpty(t, contentCID, "Should have a content CID")
assert.True(t, len(contentCID) > 10, "CID should be a valid IPFS hash")
t.Logf("Content CID: %s", contentCID)
})
t.Run("Static content serves correctly", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
urls, ok := deployment["urls"].([]interface{})
if !ok || len(urls) == 0 {
t.Skip("No URLs in deployment")
}
nodeURL, _ := urls[0].(string)
domain := nodeURL
if len(nodeURL) > 8 && nodeURL[:8] == "https://" {
domain = nodeURL[8:]
} else if len(nodeURL) > 7 && nodeURL[:7] == "http://" {
domain = nodeURL[7:]
}
if len(domain) > 0 && domain[len(domain)-1] == '/' {
domain = domain[:len(domain)-1]
}
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Greater(t, len(body), 100, "Response should have substantial content")
})
}

View File

@ -0,0 +1,142 @@
//go:build e2e && production
package production
import (
"encoding/json"
"fmt"
"io"
"net/http"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestCrossNode_ProxyRouting tests that requests routed through the gateway
// are served correctly for a deployment.
func TestCrossNode_ProxyRouting(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
if len(env.Config.Servers) < 2 {
t.Skip("Cross-node testing requires at least 2 servers in config")
}
deploymentName := fmt.Sprintf("proxy-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Wait for deployment to be active
time.Sleep(3 * time.Second)
domain := env.BuildDeploymentDomain(deploymentName)
t.Logf("Testing routing for: %s", domain)
t.Run("Request via gateway succeeds", func(t *testing.T) {
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode,
"Request should return 200 (got %d: %s)", resp.StatusCode, string(body))
assert.Contains(t, string(body), "<div id=\"root\">",
"Should serve deployment content")
})
}
// TestCrossNode_APIConsistency tests that API responses are consistent
func TestCrossNode_APIConsistency(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("consistency-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Wait for replication
time.Sleep(5 * time.Second)
t.Run("Deployment list contains our deployment", func(t *testing.T) {
req, err := http.NewRequest("GET", env.GatewayURL+"/v1/deployments/list", nil)
require.NoError(t, err)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
var result map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result))
deployments, ok := result["deployments"].([]interface{})
require.True(t, ok, "Response should have deployments array")
t.Logf("Gateway reports %d deployments", len(deployments))
found := false
for _, d := range deployments {
dep, _ := d.(map[string]interface{})
if dep["name"] == deploymentName {
found = true
break
}
}
assert.True(t, found, "Our deployment should be in the list")
})
}
// TestCrossNode_DeploymentGetConsistency tests that deployment details are correct
func TestCrossNode_DeploymentGetConsistency(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("get-consistency-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Wait for replication
time.Sleep(5 * time.Second)
t.Run("Deployment details are correct", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
cid, _ := deployment["content_cid"].(string)
assert.NotEmpty(t, cid, "Should have a content CID")
name, _ := deployment["name"].(string)
assert.Equal(t, deploymentName, name, "Name should match")
t.Logf("Deployment: name=%s, cid=%s, status=%s", name, cid, deployment["status"])
})
}

View File

@ -0,0 +1,333 @@
//go:build e2e && production
package production
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDNS_MultipleARecords verifies that deploying with replicas creates
// multiple A records (one per node) for DNS round-robin.
func TestDNS_MultipleARecords(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
if len(env.Config.Servers) < 2 {
t.Skip("Requires at least 2 servers")
}
deploymentName := fmt.Sprintf("dns-multi-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Wait for replica setup and DNS propagation
time.Sleep(15 * time.Second)
t.Run("DNS returns multiple IPs", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
subdomain, _ := deployment["subdomain"].(string)
if subdomain == "" {
subdomain = deploymentName
}
fqdn := fmt.Sprintf("%s.%s", subdomain, env.BaseDomain)
// Query nameserver directly
nameserverIP := env.Config.Servers[0].IP
resolver := &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{Timeout: 10 * time.Second}
return d.Dial("udp", nameserverIP+":53")
},
}
ctx := context.Background()
ips, err := resolver.LookupHost(ctx, fqdn)
if err != nil {
t.Logf("DNS lookup failed for %s: %v", fqdn, err)
t.Log("Trying net.LookupHost instead...")
ips, err = net.LookupHost(fqdn)
}
if err != nil {
t.Logf("DNS lookup failed: %v (DNS may not be propagated yet)", err)
t.Skip("DNS not yet propagated")
}
t.Logf("DNS returned %d IPs for %s: %v", len(ips), fqdn, ips)
assert.GreaterOrEqual(t, len(ips), 2,
"Should have at least 2 A records (home + replica)")
// Verify returned IPs are from our server list
serverIPs := e2e.GetServerIPs(env.Config)
for _, ip := range ips {
assert.Contains(t, serverIPs, ip,
"DNS IP %s should be one of our servers", ip)
}
})
}
// TestDNS_CleanupOnDelete verifies that deleting a deployment removes all
// DNS records (both home and replica A records).
func TestDNS_CleanupOnDelete(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
deploymentName := fmt.Sprintf("dns-cleanup-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
// Wait for DNS
time.Sleep(10 * time.Second)
// Get subdomain before deletion
deployment := e2e.GetDeployment(t, env, deploymentID)
subdomain, _ := deployment["subdomain"].(string)
if subdomain == "" {
subdomain = deploymentName
}
fqdn := fmt.Sprintf("%s.%s", subdomain, env.BaseDomain)
// Verify DNS works before deletion
t.Run("DNS resolves before deletion", func(t *testing.T) {
nodeURL := extractNodeURLProd(t, deployment)
if nodeURL == "" {
t.Skip("No URL to test")
}
domain := extractDomainProd(nodeURL)
req, _ := http.NewRequest("GET", env.GatewayURL+"/", nil)
req.Host = domain
resp, err := env.HTTPClient.Do(req)
if err == nil {
resp.Body.Close()
t.Logf("Pre-delete: status=%d", resp.StatusCode)
}
})
// Delete
e2e.DeleteDeployment(t, env, deploymentID)
time.Sleep(10 * time.Second)
t.Run("DNS records removed after deletion", func(t *testing.T) {
ips, err := net.LookupHost(fqdn)
if err != nil {
t.Logf("DNS lookup failed (expected): %v", err)
return // Good — no records
}
// If we still get IPs, they might be cached. Log and warn.
if len(ips) > 0 {
t.Logf("WARNING: DNS still returns %d IPs after deletion (may be cached): %v", len(ips), ips)
}
})
}
// TestDNS_CustomSubdomain verifies that deploying with a custom subdomain
// creates DNS records using the custom name.
func TestDNS_CustomSubdomain(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
deploymentName := fmt.Sprintf("dns-custom-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := createDeploymentWithSubdomain(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
time.Sleep(10 * time.Second)
t.Run("Deployment has subdomain with random suffix", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
subdomain, _ := deployment["subdomain"].(string)
require.NotEmpty(t, subdomain, "Deployment should have a subdomain")
t.Logf("Subdomain: %s", subdomain)
// Verify the subdomain starts with the deployment name
assert.Contains(t, subdomain, deploymentName[:10],
"Subdomain should relate to deployment name")
})
}
// TestDNS_RedeployPreservesSubdomain verifies that updating a deployment
// does not change the subdomain/DNS.
func TestDNS_RedeployPreservesSubdomain(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
deploymentName := fmt.Sprintf("dns-preserve-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
time.Sleep(5 * time.Second)
// Get original subdomain
deployment := e2e.GetDeployment(t, env, deploymentID)
originalSubdomain, _ := deployment["subdomain"].(string)
originalURLs := deployment["urls"]
t.Logf("Original subdomain: %s, urls: %v", originalSubdomain, originalURLs)
// Update
updateStaticDeploymentProd(t, env, deploymentName, tarballPath)
time.Sleep(5 * time.Second)
// Verify subdomain unchanged
t.Run("Subdomain unchanged after update", func(t *testing.T) {
updated := e2e.GetDeployment(t, env, deploymentID)
updatedSubdomain, _ := updated["subdomain"].(string)
assert.Equal(t, originalSubdomain, updatedSubdomain,
"Subdomain should not change after update")
t.Logf("After update: subdomain=%s", updatedSubdomain)
})
}
func createDeploymentWithSubdomain(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) string {
t.Helper()
var fileData []byte
info, err := os.Stat(tarballPath)
require.NoError(t, err)
if info.IsDir() {
fileData, err = exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
require.NoError(t, err)
} else {
file, err := os.Open(tarballPath)
require.NoError(t, err)
defer file.Close()
fileData, _ = io.ReadAll(file)
}
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(name + "\r\n")
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.Write(fileData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/upload", body)
require.NoError(t, err)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("Upload failed: status=%d body=%s", resp.StatusCode, string(bodyBytes))
}
var result map[string]interface{}
json.NewDecoder(resp.Body).Decode(&result)
if id, ok := result["deployment_id"].(string); ok {
return id
}
if id, ok := result["id"].(string); ok {
return id
}
t.Fatalf("No id in response: %+v", result)
return ""
}
func updateStaticDeploymentProd(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) {
t.Helper()
var fileData []byte
info, err := os.Stat(tarballPath)
require.NoError(t, err)
if info.IsDir() {
fileData, err = exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
require.NoError(t, err)
} else {
file, err := os.Open(tarballPath)
require.NoError(t, err)
defer file.Close()
fileData, _ = io.ReadAll(file)
}
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(name + "\r\n")
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.Write(fileData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/static/update", body)
require.NoError(t, err)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("Update failed: status=%d body=%s", resp.StatusCode, string(bodyBytes))
}
}

View File

@ -0,0 +1,121 @@
//go:build e2e && production
package production
import (
"context"
"fmt"
"net"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDNS_DeploymentResolution tests that deployed applications are resolvable via DNS
// This test requires production mode as it performs real DNS lookups
func TestDNS_DeploymentResolution(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("dns-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Wait for DNS propagation
domain := env.BuildDeploymentDomain(deploymentName)
t.Logf("Testing DNS resolution for: %s", domain)
t.Run("DNS resolves to valid server IP", func(t *testing.T) {
// Allow some time for DNS propagation
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
var ips []string
var err error
// Poll for DNS resolution
for {
select {
case <-ctx.Done():
t.Fatalf("DNS resolution timeout for %s", domain)
default:
ips, err = net.LookupHost(domain)
if err == nil && len(ips) > 0 {
goto resolved
}
time.Sleep(2 * time.Second)
}
}
resolved:
t.Logf("DNS resolved: %s -> %v", domain, ips)
assert.NotEmpty(t, ips, "Should have IP addresses")
// Verify resolved IP is one of our servers
validIPs := e2e.GetServerIPs(env.Config)
if len(validIPs) > 0 {
found := false
for _, ip := range ips {
for _, validIP := range validIPs {
if ip == validIP {
found = true
break
}
}
}
assert.True(t, found, "Resolved IP should be one of our servers: %v (valid: %v)", ips, validIPs)
}
})
}
// TestDNS_BaseDomainResolution tests that the base domain resolves correctly
func TestDNS_BaseDomainResolution(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
t.Run("Base domain resolves", func(t *testing.T) {
ips, err := net.LookupHost(env.BaseDomain)
require.NoError(t, err, "Base domain %s should resolve", env.BaseDomain)
assert.NotEmpty(t, ips, "Should have IP addresses")
t.Logf("✓ Base domain %s resolves to: %v", env.BaseDomain, ips)
})
}
// TestDNS_WildcardResolution tests wildcard DNS for arbitrary subdomains
func TestDNS_WildcardResolution(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
t.Run("Wildcard subdomain resolves", func(t *testing.T) {
// Test with a random subdomain that doesn't exist as a deployment
randomSubdomain := fmt.Sprintf("random-test-%d.%s", time.Now().UnixNano(), env.BaseDomain)
ips, err := net.LookupHost(randomSubdomain)
if err != nil {
// DNS may not support wildcard - that's OK for some setups
t.Logf("⚠ Wildcard DNS not configured (this may be expected): %v", err)
t.Skip("Wildcard DNS not configured")
return
}
assert.NotEmpty(t, ips, "Wildcard subdomain should resolve")
t.Logf("✓ Wildcard subdomain resolves: %s -> %v", randomSubdomain, ips)
})
}

View File

@ -0,0 +1,234 @@
//go:build e2e && production
package production
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestFailover_HomeNodeDown verifies that when the home node's deployment process
// is down, requests still succeed via the replica node.
func TestFailover_HomeNodeDown(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
if len(env.Config.Servers) < 2 {
t.Skip("Failover testing requires at least 2 servers")
}
// Deploy a Node.js backend so we have a process to stop
deploymentName := fmt.Sprintf("failover-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/node-api")
deploymentID := createNodeJSDeploymentProd(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Wait for deployment and replica
healthy := e2e.WaitForHealthy(t, env, deploymentID, 90*time.Second)
require.True(t, healthy, "Deployment should become healthy")
time.Sleep(20 * time.Second) // Wait for async replica setup
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURLProd(t, deployment)
require.NotEmpty(t, nodeURL)
domain := extractDomainProd(nodeURL)
t.Run("Deployment serves via gateway", func(t *testing.T) {
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/health")
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode,
"Deployment should be served via gateway (got %d: %s)", resp.StatusCode, string(body))
t.Logf("Gateway response: status=%d body=%s", resp.StatusCode, string(body))
})
}
// TestFailover_5xxRetry verifies that if one node returns a gateway error,
// the middleware retries on the next replica.
func TestFailover_5xxRetry(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
if len(env.Config.Servers) < 2 {
t.Skip("Requires at least 2 servers")
}
// Deploy a static app (always works via IPFS, no process to crash)
deploymentName := fmt.Sprintf("retry-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
time.Sleep(10 * time.Second)
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURLProd(t, deployment)
if nodeURL == "" {
t.Skip("No node URL")
}
domain := extractDomainProd(nodeURL)
t.Run("Deployment serves successfully", func(t *testing.T) {
resp := e2e.TestDeploymentWithHostHeader(t, env, domain, "/")
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
assert.Equal(t, http.StatusOK, resp.StatusCode,
"Static content should be served (got %d: %s)", resp.StatusCode, string(body))
})
}
// TestFailover_CrossNodeProxyTimeout verifies that cross-node proxy fails fast
// (within a reasonable timeout) rather than hanging.
func TestFailover_CrossNodeProxyTimeout(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
if len(env.Config.Servers) < 2 {
t.Skip("Requires at least 2 servers")
}
// Make a request to a non-existent deployment — should fail fast
domain := fmt.Sprintf("nonexistent-%d.%s", time.Now().Unix(), env.BaseDomain)
start := time.Now()
req, _ := http.NewRequest("GET", env.GatewayURL+"/", nil)
req.Host = domain
resp, err := env.HTTPClient.Do(req)
elapsed := time.Since(start)
if err != nil {
t.Logf("Request failed in %v: %v", elapsed, err)
} else {
resp.Body.Close()
t.Logf("Got status %d in %v", resp.StatusCode, elapsed)
}
// Should respond within 15 seconds (our proxy timeout is 5s)
assert.Less(t, elapsed.Seconds(), 15.0,
"Request to non-existent deployment should fail fast, took %v", elapsed)
}
func createNodeJSDeploymentProd(t *testing.T, env *e2e.E2ETestEnv, name, tarballPath string) string {
t.Helper()
var fileData []byte
info, err := os.Stat(tarballPath)
require.NoError(t, err, "Failed to stat: %s", tarballPath)
if info.IsDir() {
tarData, err := exec.Command("tar", "-czf", "-", "-C", tarballPath, ".").Output()
require.NoError(t, err, "Failed to create tarball from %s", tarballPath)
fileData = tarData
} else {
file, err := os.Open(tarballPath)
require.NoError(t, err, "Failed to open tarball: %s", tarballPath)
defer file.Close()
fileData, _ = io.ReadAll(file)
}
body := &bytes.Buffer{}
boundary := "----WebKitFormBoundary7MA4YWxkTrZu0gW"
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"name\"\r\n\r\n")
body.WriteString(name + "\r\n")
body.WriteString("--" + boundary + "\r\n")
body.WriteString("Content-Disposition: form-data; name=\"tarball\"; filename=\"app.tar.gz\"\r\n")
body.WriteString("Content-Type: application/gzip\r\n\r\n")
body.Write(fileData)
body.WriteString("\r\n--" + boundary + "--\r\n")
req, err := http.NewRequest("POST", env.GatewayURL+"/v1/deployments/nodejs/upload", body)
require.NoError(t, err)
req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
bodyBytes, _ := io.ReadAll(resp.Body)
t.Fatalf("Deployment upload failed with status %d: %s", resp.StatusCode, string(bodyBytes))
}
var result map[string]interface{}
require.NoError(t, json.NewDecoder(resp.Body).Decode(&result))
if id, ok := result["deployment_id"].(string); ok {
return id
}
if id, ok := result["id"].(string); ok {
return id
}
t.Fatalf("Deployment response missing id: %+v", result)
return ""
}
func extractNodeURLProd(t *testing.T, deployment map[string]interface{}) string {
t.Helper()
if urls, ok := deployment["urls"].([]interface{}); ok && len(urls) > 0 {
if url, ok := urls[0].(string); ok {
return url
}
}
if urls, ok := deployment["urls"].(map[string]interface{}); ok {
if url, ok := urls["node"].(string); ok {
return url
}
}
return ""
}
func extractDomainProd(url string) string {
domain := url
if len(url) > 8 && url[:8] == "https://" {
domain = url[8:]
} else if len(url) > 7 && url[:7] == "http://" {
domain = url[7:]
}
if len(domain) > 0 && domain[len(domain)-1] == '/' {
domain = domain[:len(domain)-1]
}
return domain
}

View File

@ -0,0 +1,191 @@
//go:build e2e && production
package production
import (
"crypto/tls"
"fmt"
"io"
"net/http"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestHTTPS_CertificateValid tests that HTTPS works with a valid certificate
func TestHTTPS_CertificateValid(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("https-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
deploymentID := e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
defer func() {
if !env.SkipCleanup {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
// Wait for deployment and certificate provisioning
time.Sleep(5 * time.Second)
domain := env.BuildDeploymentDomain(deploymentName)
httpsURL := fmt.Sprintf("https://%s", domain)
t.Run("HTTPS connection with certificate verification", func(t *testing.T) {
// Create client that DOES verify certificates
client := &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
// Do NOT skip verification - we want to test real certs
InsecureSkipVerify: false,
},
},
}
req, err := http.NewRequest("GET", httpsURL+"/", nil)
require.NoError(t, err)
resp, err := client.Do(req)
if err != nil {
// Certificate might not be ready yet, or domain might not resolve
t.Logf("⚠ HTTPS request failed (this may be expected if certs are still provisioning): %v", err)
t.Skip("HTTPS not available or certificate not ready")
return
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
t.Logf("HTTPS returned %d (deployment may not be routed yet): %s", resp.StatusCode, string(body))
}
// Check TLS connection state
if resp.TLS != nil {
t.Logf("✓ HTTPS works with valid certificate")
t.Logf(" - Domain: %s", domain)
t.Logf(" - TLS Version: %x", resp.TLS.Version)
t.Logf(" - Cipher Suite: %x", resp.TLS.CipherSuite)
if len(resp.TLS.PeerCertificates) > 0 {
cert := resp.TLS.PeerCertificates[0]
t.Logf(" - Certificate Subject: %s", cert.Subject)
t.Logf(" - Certificate Issuer: %s", cert.Issuer)
t.Logf(" - Valid Until: %s", cert.NotAfter)
}
}
})
}
// TestHTTPS_CertificateDetails tests certificate properties
func TestHTTPS_CertificateDetails(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
t.Run("Base domain certificate", func(t *testing.T) {
httpsURL := fmt.Sprintf("https://%s", env.BaseDomain)
// Connect and get certificate info
conn, err := tls.Dial("tcp", env.BaseDomain+":443", &tls.Config{
InsecureSkipVerify: true, // We just want to inspect the cert
})
if err != nil {
t.Logf("⚠ Could not connect to %s:443: %v", env.BaseDomain, err)
t.Skip("HTTPS not available on base domain")
return
}
defer conn.Close()
certs := conn.ConnectionState().PeerCertificates
require.NotEmpty(t, certs, "Should have certificates")
cert := certs[0]
t.Logf("Certificate for %s:", env.BaseDomain)
t.Logf(" - Subject: %s", cert.Subject)
t.Logf(" - DNS Names: %v", cert.DNSNames)
t.Logf(" - Valid From: %s", cert.NotBefore)
t.Logf(" - Valid Until: %s", cert.NotAfter)
t.Logf(" - Issuer: %s", cert.Issuer)
// Check that certificate covers our domain
coversDomain := false
for _, name := range cert.DNSNames {
if name == env.BaseDomain || name == "*."+env.BaseDomain {
coversDomain = true
break
}
}
assert.True(t, coversDomain, "Certificate should cover %s", env.BaseDomain)
// Check certificate is not expired
assert.True(t, time.Now().Before(cert.NotAfter), "Certificate should not be expired")
assert.True(t, time.Now().After(cert.NotBefore), "Certificate should be valid now")
// Make actual HTTPS request to verify it works
client := &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: false,
},
},
}
resp, err := client.Get(httpsURL)
if err != nil {
t.Logf("⚠ HTTPS request failed: %v", err)
} else {
resp.Body.Close()
t.Logf("✓ HTTPS request succeeded with status %d", resp.StatusCode)
}
})
}
// TestHTTPS_HTTPRedirect tests that HTTP requests are redirected to HTTPS
func TestHTTPS_HTTPRedirect(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
t.Run("HTTP redirects to HTTPS", func(t *testing.T) {
// Create client that doesn't follow redirects
client := &http.Client{
Timeout: 30 * time.Second,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
httpURL := fmt.Sprintf("http://%s", env.BaseDomain)
resp, err := client.Get(httpURL)
if err != nil {
t.Logf("⚠ HTTP request failed: %v", err)
t.Skip("HTTP not available or redirects not configured")
return
}
defer resp.Body.Close()
// Check for redirect
if resp.StatusCode >= 300 && resp.StatusCode < 400 {
location := resp.Header.Get("Location")
t.Logf("✓ HTTP redirects to: %s (status %d)", location, resp.StatusCode)
assert.Contains(t, location, "https://", "Should redirect to HTTPS")
} else if resp.StatusCode == http.StatusOK {
// HTTP might just serve content directly in some configurations
t.Logf("⚠ HTTP returned 200 instead of redirect (HTTPS redirect may not be configured)")
} else {
t.Logf("HTTP returned status %d", resp.StatusCode)
}
})
}

View File

@ -0,0 +1,204 @@
//go:build e2e && production
package production
import (
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"os"
"path/filepath"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestHTTPS_ExternalAccess tests that deployed apps are accessible via HTTPS
// from the public internet with valid SSL certificates.
//
// This test requires:
// - Orama deployed on a VPS with a real domain
// - DNS properly configured
// - Run with: go test -v -tags "e2e production" -run TestHTTPS ./e2e/production/...
func TestHTTPS_ExternalAccess(t *testing.T) {
// Skip if not configured for external testing
externalURL := os.Getenv("ORAMA_EXTERNAL_URL")
if externalURL == "" {
t.Skip("ORAMA_EXTERNAL_URL not set - skipping external HTTPS test")
}
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("https-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
var deploymentID string
// Cleanup after test
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Deploy static app", func(t *testing.T) {
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
t.Logf("Created deployment: %s (ID: %s)", deploymentName, deploymentID)
})
var deploymentDomain string
t.Run("Get deployment domain", func(t *testing.T) {
deployment := e2e.GetDeployment(t, env, deploymentID)
nodeURL := extractNodeURL(t, deployment)
require.NotEmpty(t, nodeURL, "Deployment should have node URL")
deploymentDomain = extractDomain(nodeURL)
t.Logf("Deployment domain: %s", deploymentDomain)
})
t.Run("Wait for DNS propagation", func(t *testing.T) {
// Poll DNS until the domain resolves
deadline := time.Now().Add(2 * time.Minute)
for time.Now().Before(deadline) {
ips, err := net.LookupHost(deploymentDomain)
if err == nil && len(ips) > 0 {
t.Logf("DNS resolved: %s -> %v", deploymentDomain, ips)
return
}
t.Logf("DNS not yet resolved, waiting...")
time.Sleep(5 * time.Second)
}
t.Fatalf("DNS did not resolve within timeout for %s", deploymentDomain)
})
t.Run("Test HTTPS access with valid certificate", func(t *testing.T) {
// Create HTTP client that DOES verify certificates
// (no InsecureSkipVerify - we want to test real SSL)
client := &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
// Use default verification (validates certificate)
InsecureSkipVerify: false,
},
},
}
url := fmt.Sprintf("https://%s/", deploymentDomain)
t.Logf("Testing HTTPS: %s", url)
resp, err := client.Get(url)
require.NoError(t, err, "HTTPS request should succeed with valid certificate")
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode, "Should return 200 OK")
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// Verify it's our React app
assert.Contains(t, string(body), "<div id=\"root\">", "Should serve React app")
t.Logf("HTTPS test passed: %s returned %d", url, resp.StatusCode)
})
t.Run("Verify SSL certificate details", func(t *testing.T) {
conn, err := tls.Dial("tcp", deploymentDomain+":443", nil)
require.NoError(t, err, "TLS dial should succeed")
defer conn.Close()
state := conn.ConnectionState()
require.NotEmpty(t, state.PeerCertificates, "Should have peer certificates")
cert := state.PeerCertificates[0]
t.Logf("Certificate subject: %s", cert.Subject)
t.Logf("Certificate issuer: %s", cert.Issuer)
t.Logf("Certificate valid from: %s to %s", cert.NotBefore, cert.NotAfter)
// Verify certificate is not expired
assert.True(t, time.Now().After(cert.NotBefore), "Certificate should be valid (not before)")
assert.True(t, time.Now().Before(cert.NotAfter), "Certificate should be valid (not expired)")
// Verify domain matches
err = cert.VerifyHostname(deploymentDomain)
assert.NoError(t, err, "Certificate should be valid for domain %s", deploymentDomain)
})
}
// TestHTTPS_DomainFormat verifies deployment URL format
func TestHTTPS_DomainFormat(t *testing.T) {
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
deploymentName := fmt.Sprintf("domain-test-%d", time.Now().Unix())
tarballPath := filepath.Join("../../testdata/apps/react-app")
var deploymentID string
// Cleanup after test
defer func() {
if !env.SkipCleanup && deploymentID != "" {
e2e.DeleteDeployment(t, env, deploymentID)
}
}()
t.Run("Deploy app and verify domain format", func(t *testing.T) {
deploymentID = e2e.CreateTestDeployment(t, env, deploymentName, tarballPath)
require.NotEmpty(t, deploymentID)
deployment := e2e.GetDeployment(t, env, deploymentID)
t.Logf("Deployment URLs: %+v", deployment["urls"])
// Get deployment URL (handles both array and map formats)
deploymentURL := extractNodeURL(t, deployment)
assert.NotEmpty(t, deploymentURL, "Should have deployment URL")
// URL should be simple format: {name}.{baseDomain} (NOT {name}.node-{shortID}.{baseDomain})
if deploymentURL != "" {
assert.NotContains(t, deploymentURL, ".node-", "URL should NOT contain node identifier (simplified format)")
assert.Contains(t, deploymentURL, deploymentName, "URL should contain deployment name")
t.Logf("Deployment URL: %s", deploymentURL)
}
})
}
func extractNodeURL(t *testing.T, deployment map[string]interface{}) string {
t.Helper()
if urls, ok := deployment["urls"].([]interface{}); ok && len(urls) > 0 {
if url, ok := urls[0].(string); ok {
return url
}
}
if urls, ok := deployment["urls"].(map[string]interface{}); ok {
if url, ok := urls["node"].(string); ok {
return url
}
}
return ""
}
func extractDomain(url string) string {
domain := url
if len(url) > 8 && url[:8] == "https://" {
domain = url[8:]
} else if len(url) > 7 && url[:7] == "http://" {
domain = url[7:]
}
if len(domain) > 0 && domain[len(domain)-1] == '/' {
domain = domain[:len(domain)-1]
}
return domain
}

View File

@ -0,0 +1,99 @@
//go:build e2e && production
package production
import (
"fmt"
"io"
"net/http"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestMiddleware_NonExistentDeployment verifies that requests to a non-existent
// deployment return 404 (not 502 or hang).
func TestMiddleware_NonExistentDeployment(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
domain := fmt.Sprintf("does-not-exist-%d.%s", time.Now().Unix(), env.BaseDomain)
req, _ := http.NewRequest("GET", env.GatewayURL+"/", nil)
req.Host = domain
start := time.Now()
resp, err := env.HTTPClient.Do(req)
elapsed := time.Since(start)
if err != nil {
t.Logf("Request failed in %v: %v", elapsed, err)
// Connection refused or timeout is acceptable
assert.Less(t, elapsed.Seconds(), 15.0, "Should fail fast")
return
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
t.Logf("Status: %d, elapsed: %v, body: %s", resp.StatusCode, elapsed, string(body))
// Should be 404 or 502, not 200
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Non-existent deployment should not return 200")
assert.Less(t, elapsed.Seconds(), 15.0, "Should respond fast")
}
// TestMiddleware_InternalAPIAuthRejection verifies that internal replica API
// endpoints reject requests without the proper internal auth header.
func TestMiddleware_InternalAPIAuthRejection(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err)
t.Run("No auth header rejected", func(t *testing.T) {
req, _ := http.NewRequest("POST",
env.GatewayURL+"/v1/internal/deployments/replica/setup", nil)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
// Should be rejected (401 or 403)
assert.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
"Internal API without auth should be rejected (got %d)", resp.StatusCode)
})
t.Run("Wrong auth header rejected", func(t *testing.T) {
req, _ := http.NewRequest("POST",
env.GatewayURL+"/v1/internal/deployments/replica/setup", nil)
req.Header.Set("X-Orama-Internal-Auth", "wrong-token")
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden || resp.StatusCode == http.StatusBadRequest,
"Internal API with wrong auth should be rejected (got %d)", resp.StatusCode)
})
t.Run("Regular API key does not grant internal access", func(t *testing.T) {
req, _ := http.NewRequest("POST",
env.GatewayURL+"/v1/internal/deployments/replica/setup", nil)
req.Header.Set("Authorization", "Bearer "+env.APIKey)
resp, err := env.HTTPClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
// The request may pass auth but fail on bad body — 400 is acceptable
// But it should NOT succeed with 200
assert.NotEqual(t, http.StatusOK, resp.StatusCode,
"Regular API key should not fully authenticate internal endpoints")
})
}

View File

@ -0,0 +1,181 @@
//go:build e2e && production
package production
import (
"context"
"net"
"strings"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestNameserver_NSRecords tests that NS records are properly configured for the domain
func TestNameserver_NSRecords(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
if len(env.Config.Nameservers) == 0 {
t.Skip("No nameservers configured in e2e/config.yaml")
}
t.Run("NS records exist for base domain", func(t *testing.T) {
nsRecords, err := net.LookupNS(env.BaseDomain)
require.NoError(t, err, "Should be able to look up NS records for %s", env.BaseDomain)
require.NotEmpty(t, nsRecords, "Should have NS records")
t.Logf("Found %d NS records for %s:", len(nsRecords), env.BaseDomain)
for _, ns := range nsRecords {
t.Logf(" - %s", ns.Host)
}
// Verify our nameservers are listed
for _, expected := range env.Config.Nameservers {
found := false
for _, ns := range nsRecords {
// Trim trailing dot for comparison
nsHost := strings.TrimSuffix(ns.Host, ".")
if nsHost == expected || nsHost == expected+"." {
found = true
break
}
}
assert.True(t, found, "NS records should include %s", expected)
}
})
}
// TestNameserver_GlueRecords tests that glue records point to correct IPs
func TestNameserver_GlueRecords(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
if len(env.Config.Nameservers) == 0 {
t.Skip("No nameservers configured in e2e/config.yaml")
}
nameserverServers := e2e.GetNameserverServers(env.Config)
if len(nameserverServers) == 0 {
t.Skip("No servers marked as nameservers in config")
}
t.Run("Glue records resolve to correct IPs", func(t *testing.T) {
for i, ns := range env.Config.Nameservers {
ips, err := net.LookupHost(ns)
require.NoError(t, err, "Nameserver %s should resolve", ns)
require.NotEmpty(t, ips, "Nameserver %s should have IP addresses", ns)
t.Logf("Nameserver %s resolves to: %v", ns, ips)
// If we have the expected IP, verify it matches
if i < len(nameserverServers) {
expectedIP := nameserverServers[i].IP
found := false
for _, ip := range ips {
if ip == expectedIP {
found = true
break
}
}
assert.True(t, found, "Glue record for %s should point to %s (got %v)", ns, expectedIP, ips)
}
}
})
}
// TestNameserver_CoreDNSResponds tests that our CoreDNS servers respond to queries
func TestNameserver_CoreDNSResponds(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
nameserverServers := e2e.GetNameserverServers(env.Config)
if len(nameserverServers) == 0 {
t.Skip("No servers marked as nameservers in config")
}
t.Run("CoreDNS servers respond to queries", func(t *testing.T) {
for _, server := range nameserverServers {
t.Run(server.Name, func(t *testing.T) {
// Create a custom resolver that queries this specific server
resolver := &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{
Timeout: 5 * time.Second,
}
return d.DialContext(ctx, "udp", server.IP+":53")
},
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Query the base domain
ips, err := resolver.LookupHost(ctx, env.BaseDomain)
if err != nil {
// Log the error but don't fail - server might be configured differently
t.Logf("⚠ CoreDNS at %s (%s) query error: %v", server.Name, server.IP, err)
return
}
t.Logf("✓ CoreDNS at %s (%s) responded: %s -> %v", server.Name, server.IP, env.BaseDomain, ips)
assert.NotEmpty(t, ips, "CoreDNS should return IP addresses")
})
}
})
}
// TestNameserver_QueryLatency tests DNS query latency from our nameservers
func TestNameserver_QueryLatency(t *testing.T) {
e2e.SkipIfLocal(t)
env, err := e2e.LoadTestEnv()
require.NoError(t, err, "Failed to load test environment")
nameserverServers := e2e.GetNameserverServers(env.Config)
if len(nameserverServers) == 0 {
t.Skip("No servers marked as nameservers in config")
}
t.Run("DNS query latency is acceptable", func(t *testing.T) {
for _, server := range nameserverServers {
resolver := &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{
Timeout: 5 * time.Second,
}
return d.DialContext(ctx, "udp", server.IP+":53")
},
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
start := time.Now()
_, err := resolver.LookupHost(ctx, env.BaseDomain)
latency := time.Since(start)
if err != nil {
t.Logf("⚠ Query to %s failed: %v", server.Name, err)
continue
}
t.Logf("DNS latency from %s (%s): %v", server.Name, server.IP, latency)
// DNS queries should be fast (under 500ms is reasonable)
assert.Less(t, latency, 500*time.Millisecond,
"DNS query to %s should complete in under 500ms", server.Name)
}
})
}

View File

@ -0,0 +1,148 @@
//go:build e2e
package shared
import (
"net/http"
"testing"
"time"
"github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestAuth_ExpiredOrInvalidJWT verifies that an expired/invalid JWT token is rejected.
func TestAuth_ExpiredOrInvalidJWT(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
// Craft an obviously invalid JWT
invalidJWT := "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwiZXhwIjoxfQ.invalid"
req, err := http.NewRequest("GET", gatewayURL+"/v1/deployments/list", nil)
require.NoError(t, err)
req.Header.Set("Authorization", "Bearer "+invalidJWT)
client := e2e.NewHTTPClient(10 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
"Invalid JWT should return 401")
}
// TestAuth_EmptyAPIKey verifies that an empty API key is rejected.
func TestAuth_EmptyAPIKey(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
req, err := http.NewRequest("GET", gatewayURL+"/v1/deployments/list", nil)
require.NoError(t, err)
req.Header.Set("Authorization", "Bearer ")
client := e2e.NewHTTPClient(10 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
"Empty API key should return 401")
}
// TestAuth_SQLInjectionInAPIKey verifies that SQL injection in the API key
// does not bypass authentication.
func TestAuth_SQLInjectionInAPIKey(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
injectionAttempts := []string{
"' OR '1'='1",
"'; DROP TABLE api_keys; --",
"\" OR \"1\"=\"1",
"admin'--",
}
for _, attempt := range injectionAttempts {
t.Run(attempt, func(t *testing.T) {
req, _ := http.NewRequest("GET", gatewayURL+"/v1/deployments/list", nil)
req.Header.Set("Authorization", "Bearer "+attempt)
client := e2e.NewHTTPClient(10 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusUnauthorized, resp.StatusCode,
"SQL injection attempt should be rejected")
})
}
}
// TestAuth_NamespaceScopedAccess verifies that an API key for one namespace
// cannot access another namespace's deployments.
func TestAuth_NamespaceScopedAccess(t *testing.T) {
// Create two environments with different namespaces
env1, err := e2e.LoadTestEnvWithNamespace("auth-test-ns1")
if err != nil {
t.Skip("Could not create namespace env1: " + err.Error())
}
env2, err := e2e.LoadTestEnvWithNamespace("auth-test-ns2")
if err != nil {
t.Skip("Could not create namespace env2: " + err.Error())
}
t.Run("Namespace 1 key cannot list namespace 2 deployments", func(t *testing.T) {
// Use env1's API key to query env2's gateway
// The namespace should be scoped to the API key
req, _ := http.NewRequest("GET", env2.GatewayURL+"/v1/deployments/list", nil)
req.Header.Set("Authorization", "Bearer "+env1.APIKey)
req.Header.Set("X-Namespace", "auth-test-ns2")
resp, err := env1.HTTPClient.Do(req)
if err != nil {
t.Skip("Gateway unreachable")
}
defer resp.Body.Close()
// The API should either reject (403) or return only ns1's deployments
t.Logf("Cross-namespace access returned: %d", resp.StatusCode)
if resp.StatusCode == http.StatusOK {
t.Log("API returned 200 — namespace isolation may be enforced at data level")
}
})
}
// TestAuth_PublicEndpointsNoAuth verifies that health/status endpoints
// don't require authentication.
func TestAuth_PublicEndpointsNoAuth(t *testing.T) {
e2e.SkipIfMissingGateway(t)
gatewayURL := e2e.GetGatewayURL()
client := e2e.NewHTTPClient(10 * time.Second)
publicPaths := []string{
"/v1/health",
"/v1/status",
}
for _, path := range publicPaths {
t.Run(path, func(t *testing.T) {
req, _ := http.NewRequest("GET", gatewayURL+path, nil)
// No auth header
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode,
"%s should be accessible without auth", path)
})
}
}

View File

@ -0,0 +1,333 @@
//go:build e2e
package shared_test
import (
"context"
"net/http"
"testing"
"time"
"unicode"
e2e "github.com/DeBrosOfficial/network/e2e"
"github.com/stretchr/testify/require"
)
// =============================================================================
// STRICT AUTHENTICATION NEGATIVE TESTS
// These tests verify that authentication is properly enforced.
// Tests FAIL if unauthenticated/invalid requests are allowed through.
// =============================================================================
func TestAuth_MissingAPIKey(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request protected endpoint without auth headers
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
require.NoError(t, err, "FAIL: Could not create request")
client := e2e.NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Request failed")
defer resp.Body.Close()
// STRICT: Must reject requests without authentication
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
"FAIL: Protected endpoint allowed request without auth - expected 401/403, got %d", resp.StatusCode)
t.Logf(" ✓ Missing API key correctly rejected with status %d", resp.StatusCode)
}
func TestAuth_InvalidAPIKey(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with invalid API key
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
require.NoError(t, err, "FAIL: Could not create request")
req.Header.Set("Authorization", "Bearer invalid-key-xyz-123456789")
client := e2e.NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Request failed")
defer resp.Body.Close()
// STRICT: Must reject invalid API keys
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
"FAIL: Invalid API key was accepted - expected 401/403, got %d", resp.StatusCode)
t.Logf(" ✓ Invalid API key correctly rejected with status %d", resp.StatusCode)
}
func TestAuth_CacheWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request cache endpoint without auth
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: e2e.GetGatewayURL() + "/v1/cache/health",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
require.NoError(t, err, "FAIL: Request failed")
// STRICT: Cache endpoint must require authentication
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
"FAIL: Cache endpoint accessible without auth - expected 401/403, got %d", status)
t.Logf(" ✓ Cache endpoint correctly requires auth (status %d)", status)
}
func TestAuth_StorageWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request storage endpoint without auth
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: e2e.GetGatewayURL() + "/v1/storage/status/QmTest",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
require.NoError(t, err, "FAIL: Request failed")
// STRICT: Storage endpoint must require authentication
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
"FAIL: Storage endpoint accessible without auth - expected 401/403, got %d", status)
t.Logf(" ✓ Storage endpoint correctly requires auth (status %d)", status)
}
func TestAuth_RQLiteWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request rqlite endpoint without auth
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: e2e.GetGatewayURL() + "/v1/rqlite/schema",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
require.NoError(t, err, "FAIL: Request failed")
// STRICT: RQLite endpoint must require authentication
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
"FAIL: RQLite endpoint accessible without auth - expected 401/403, got %d", status)
t.Logf(" ✓ RQLite endpoint correctly requires auth (status %d)", status)
}
func TestAuth_MalformedBearerToken(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with malformed bearer token (missing "Bearer " prefix)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
require.NoError(t, err, "FAIL: Could not create request")
req.Header.Set("Authorization", "invalid-token-format-no-bearer")
client := e2e.NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Request failed")
defer resp.Body.Close()
// STRICT: Must reject malformed authorization headers
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
"FAIL: Malformed auth header accepted - expected 401/403, got %d", resp.StatusCode)
t.Logf(" ✓ Malformed bearer token correctly rejected (status %d)", resp.StatusCode)
}
func TestAuth_ExpiredJWT(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Test with a clearly invalid JWT structure
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
require.NoError(t, err, "FAIL: Could not create request")
req.Header.Set("Authorization", "Bearer expired.jwt.token.invalid")
client := e2e.NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Request failed")
defer resp.Body.Close()
// STRICT: Must reject invalid/expired JWT tokens
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
"FAIL: Invalid JWT accepted - expected 401/403, got %d", resp.StatusCode)
t.Logf(" ✓ Invalid JWT correctly rejected (status %d)", resp.StatusCode)
}
func TestAuth_EmptyBearerToken(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with empty bearer token
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
require.NoError(t, err, "FAIL: Could not create request")
req.Header.Set("Authorization", "Bearer ")
client := e2e.NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Request failed")
defer resp.Body.Close()
// STRICT: Must reject empty bearer tokens
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
"FAIL: Empty bearer token accepted - expected 401/403, got %d", resp.StatusCode)
t.Logf(" ✓ Empty bearer token correctly rejected (status %d)", resp.StatusCode)
}
func TestAuth_DuplicateAuthHeaders(t *testing.T) {
if e2e.GetAPIKey() == "" {
t.Skip("No API key configured")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request with both valid API key in Authorization header
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: e2e.GetGatewayURL() + "/v1/cache/health",
Headers: map[string]string{
"Authorization": "Bearer " + e2e.GetAPIKey(),
"X-API-Key": e2e.GetAPIKey(),
},
}
_, status, err := req.Do(ctx)
require.NoError(t, err, "FAIL: Request failed")
// Should succeed since we have a valid API key
require.Equal(t, http.StatusOK, status,
"FAIL: Valid API key rejected when multiple auth headers present - got %d", status)
t.Logf(" ✓ Duplicate auth headers with valid key succeeds (status %d)", status)
}
func TestAuth_CaseSensitiveAPIKey(t *testing.T) {
apiKey := e2e.GetAPIKey()
if apiKey == "" {
t.Skip("No API key configured")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Create incorrectly cased API key
incorrectKey := ""
for i, ch := range apiKey {
if i%2 == 0 && unicode.IsLetter(ch) {
if unicode.IsLower(ch) {
incorrectKey += string(unicode.ToUpper(ch))
} else {
incorrectKey += string(unicode.ToLower(ch))
}
} else {
incorrectKey += string(ch)
}
}
// Skip if the key didn't change (no letters)
if incorrectKey == apiKey {
t.Skip("API key has no letters to change case")
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/cache/health", nil)
require.NoError(t, err, "FAIL: Could not create request")
req.Header.Set("Authorization", "Bearer "+incorrectKey)
client := e2e.NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Request failed")
defer resp.Body.Close()
// STRICT: API keys MUST be case-sensitive
require.True(t, resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden,
"FAIL: API key check is not case-sensitive - modified key accepted with status %d", resp.StatusCode)
t.Logf(" ✓ Case-modified API key correctly rejected (status %d)", resp.StatusCode)
}
func TestAuth_HealthEndpointNoAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Health endpoint at /v1/health should NOT require auth
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/health", nil)
require.NoError(t, err, "FAIL: Could not create request")
client := e2e.NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Request failed")
defer resp.Body.Close()
// Health endpoint should be publicly accessible
require.Equal(t, http.StatusOK, resp.StatusCode,
"FAIL: Health endpoint should not require auth - expected 200, got %d", resp.StatusCode)
t.Logf(" ✓ Health endpoint correctly accessible without auth")
}
func TestAuth_StatusEndpointNoAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Status endpoint at /v1/status should NOT require auth
req, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/status", nil)
require.NoError(t, err, "FAIL: Could not create request")
client := e2e.NewHTTPClient(30 * time.Second)
resp, err := client.Do(req)
require.NoError(t, err, "FAIL: Request failed")
defer resp.Body.Close()
// Status endpoint should be publicly accessible
require.Equal(t, http.StatusOK, resp.StatusCode,
"FAIL: Status endpoint should not require auth - expected 200, got %d", resp.StatusCode)
t.Logf(" ✓ Status endpoint correctly accessible without auth")
}
func TestAuth_DeploymentsWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request deployments endpoint without auth
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: e2e.GetGatewayURL() + "/v1/deployments/list",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
require.NoError(t, err, "FAIL: Request failed")
// STRICT: Deployments endpoint must require authentication
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
"FAIL: Deployments endpoint accessible without auth - expected 401/403, got %d", status)
t.Logf(" ✓ Deployments endpoint correctly requires auth (status %d)", status)
}
func TestAuth_SQLiteWithoutAuth(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Request SQLite endpoint without auth
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: e2e.GetGatewayURL() + "/v1/db/sqlite/list",
SkipAuth: true,
}
_, status, err := req.Do(ctx)
require.NoError(t, err, "FAIL: Request failed")
// STRICT: SQLite endpoint must require authentication
require.True(t, status == http.StatusUnauthorized || status == http.StatusForbidden,
"FAIL: SQLite endpoint accessible without auth - expected 401/403, got %d", status)
t.Logf(" ✓ SQLite endpoint correctly requires auth (status %d)", status)
}

View File

@ -1,6 +1,6 @@
//go:build e2e
package e2e
package shared_test
import (
"context"
@ -8,17 +8,19 @@ import (
"net/http"
"testing"
"time"
e2e "github.com/DeBrosOfficial/network/e2e"
)
func TestCache_Health(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/cache/health",
URL: e2e.GetGatewayURL() + "/v1/cache/health",
}
body, status, err := req.Do(ctx)
@ -31,7 +33,7 @@ func TestCache_Health(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -45,19 +47,19 @@ func TestCache_Health(t *testing.T) {
}
func TestCache_PutGet(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
key := "test-key"
value := "test-value"
// Put value
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -75,9 +77,9 @@ func TestCache_PutGet(t *testing.T) {
}
// Get value
getReq := &HTTPRequest{
getReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
URL: e2e.GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -94,7 +96,7 @@ func TestCache_PutGet(t *testing.T) {
}
var getResp map[string]interface{}
if err := DecodeJSON(body, &getResp); err != nil {
if err := e2e.DecodeJSON(body, &getResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -104,12 +106,12 @@ func TestCache_PutGet(t *testing.T) {
}
func TestCache_PutGetJSON(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
key := "json-key"
jsonValue := map[string]interface{}{
"name": "John",
@ -118,9 +120,9 @@ func TestCache_PutGetJSON(t *testing.T) {
}
// Put JSON value
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -138,9 +140,9 @@ func TestCache_PutGetJSON(t *testing.T) {
}
// Get JSON value
getReq := &HTTPRequest{
getReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
URL: e2e.GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -157,7 +159,7 @@ func TestCache_PutGetJSON(t *testing.T) {
}
var getResp map[string]interface{}
if err := DecodeJSON(body, &getResp); err != nil {
if err := e2e.DecodeJSON(body, &getResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -171,19 +173,19 @@ func TestCache_PutGetJSON(t *testing.T) {
}
func TestCache_Delete(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
key := "delete-key"
value := "delete-value"
// Put value
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -197,9 +199,9 @@ func TestCache_Delete(t *testing.T) {
}
// Delete value
deleteReq := &HTTPRequest{
deleteReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/delete",
URL: e2e.GetGatewayURL() + "/v1/cache/delete",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -216,9 +218,9 @@ func TestCache_Delete(t *testing.T) {
}
// Verify deletion
getReq := &HTTPRequest{
getReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
URL: e2e.GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -233,19 +235,19 @@ func TestCache_Delete(t *testing.T) {
}
func TestCache_TTL(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
key := "ttl-key"
value := "ttl-value"
// Put value with TTL
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -264,9 +266,9 @@ func TestCache_TTL(t *testing.T) {
}
// Verify value exists
getReq := &HTTPRequest{
getReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
URL: e2e.GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -279,7 +281,7 @@ func TestCache_TTL(t *testing.T) {
}
// Wait for TTL expiry (2 seconds + buffer)
Delay(2500)
e2e.Delay(2500)
// Verify value is expired
_, status, err = getReq.Do(ctx)
@ -289,19 +291,19 @@ func TestCache_TTL(t *testing.T) {
}
func TestCache_Scan(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
// Put multiple keys
keys := []string{"user-1", "user-2", "session-1", "session-2"}
for _, key := range keys {
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -316,9 +318,9 @@ func TestCache_Scan(t *testing.T) {
}
// Scan all keys
scanReq := &HTTPRequest{
scanReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/scan",
URL: e2e.GetGatewayURL() + "/v1/cache/scan",
Body: map[string]interface{}{
"dmap": dmap,
},
@ -334,7 +336,7 @@ func TestCache_Scan(t *testing.T) {
}
var scanResp map[string]interface{}
if err := DecodeJSON(body, &scanResp); err != nil {
if err := e2e.DecodeJSON(body, &scanResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -345,19 +347,19 @@ func TestCache_Scan(t *testing.T) {
}
func TestCache_ScanWithRegex(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
// Put keys with different patterns
keys := []string{"user-1", "user-2", "session-1", "session-2"}
for _, key := range keys {
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -372,9 +374,9 @@ func TestCache_ScanWithRegex(t *testing.T) {
}
// Scan with regex pattern
scanReq := &HTTPRequest{
scanReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/scan",
URL: e2e.GetGatewayURL() + "/v1/cache/scan",
Body: map[string]interface{}{
"dmap": dmap,
"pattern": "^user-",
@ -391,7 +393,7 @@ func TestCache_ScanWithRegex(t *testing.T) {
}
var scanResp map[string]interface{}
if err := DecodeJSON(body, &scanResp); err != nil {
if err := e2e.DecodeJSON(body, &scanResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -402,19 +404,19 @@ func TestCache_ScanWithRegex(t *testing.T) {
}
func TestCache_MultiGet(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
keys := []string{"key-1", "key-2", "key-3"}
// Put values
for i, key := range keys {
putReq := &HTTPRequest{
putReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/put",
URL: e2e.GetGatewayURL() + "/v1/cache/put",
Body: map[string]interface{}{
"dmap": dmap,
"key": key,
@ -429,9 +431,9 @@ func TestCache_MultiGet(t *testing.T) {
}
// Multi-get
multiGetReq := &HTTPRequest{
multiGetReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/mget",
URL: e2e.GetGatewayURL() + "/v1/cache/mget",
Body: map[string]interface{}{
"dmap": dmap,
"keys": keys,
@ -448,7 +450,7 @@ func TestCache_MultiGet(t *testing.T) {
}
var mgetResp map[string]interface{}
if err := DecodeJSON(body, &mgetResp); err != nil {
if err := e2e.DecodeJSON(body, &mgetResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -459,14 +461,14 @@ func TestCache_MultiGet(t *testing.T) {
}
func TestCache_MissingDMap(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
getReq := &HTTPRequest{
getReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
URL: e2e.GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": "",
"key": "any-key",
@ -484,16 +486,16 @@ func TestCache_MissingDMap(t *testing.T) {
}
func TestCache_MissingKey(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dmap := GenerateDMapName()
dmap := e2e.GenerateDMapName()
getReq := &HTTPRequest{
getReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/cache/get",
URL: e2e.GetGatewayURL() + "/v1/cache/get",
Body: map[string]interface{}{
"dmap": dmap,
"key": "non-existent-key",

View File

@ -1,23 +1,25 @@
//go:build e2e
package e2e
package shared_test
import (
"context"
"net/http"
"testing"
"time"
e2e "github.com/DeBrosOfficial/network/e2e"
)
func TestNetwork_Health(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/health",
URL: e2e.GetGatewayURL() + "/v1/health",
SkipAuth: true,
}
@ -31,7 +33,7 @@ func TestNetwork_Health(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -41,14 +43,14 @@ func TestNetwork_Health(t *testing.T) {
}
func TestNetwork_Status(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
URL: e2e.GetGatewayURL() + "/v1/network/status",
}
body, status, err := req.Do(ctx)
@ -61,7 +63,7 @@ func TestNetwork_Status(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -75,14 +77,14 @@ func TestNetwork_Status(t *testing.T) {
}
func TestNetwork_Peers(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/peers",
URL: e2e.GetGatewayURL() + "/v1/network/peers",
}
body, status, err := req.Do(ctx)
@ -95,7 +97,7 @@ func TestNetwork_Peers(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -105,14 +107,14 @@ func TestNetwork_Peers(t *testing.T) {
}
func TestNetwork_ProxyAnonSuccess(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/proxy/anon",
URL: e2e.GetGatewayURL() + "/v1/proxy/anon",
Body: map[string]interface{}{
"url": "https://httpbin.org/get",
"method": "GET",
@ -130,7 +132,7 @@ func TestNetwork_ProxyAnonSuccess(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -144,14 +146,14 @@ func TestNetwork_ProxyAnonSuccess(t *testing.T) {
}
func TestNetwork_ProxyAnonBadURL(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/proxy/anon",
URL: e2e.GetGatewayURL() + "/v1/proxy/anon",
Body: map[string]interface{}{
"url": "http://localhost:1/nonexistent",
"method": "GET",
@ -165,14 +167,14 @@ func TestNetwork_ProxyAnonBadURL(t *testing.T) {
}
func TestNetwork_ProxyAnonPostRequest(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/proxy/anon",
URL: e2e.GetGatewayURL() + "/v1/proxy/anon",
Body: map[string]interface{}{
"url": "https://httpbin.org/post",
"method": "POST",
@ -191,7 +193,7 @@ func TestNetwork_ProxyAnonPostRequest(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -206,9 +208,9 @@ func TestNetwork_Unauthorized(t *testing.T) {
defer cancel()
// Create request without auth
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/network/status",
URL: e2e.GetGatewayURL() + "/v1/network/status",
SkipAuth: true,
}

View File

@ -1,40 +1,42 @@
//go:build e2e
package e2e
package shared_test
import (
"fmt"
"sync"
"testing"
"time"
e2e "github.com/DeBrosOfficial/network/e2e"
)
// TestPubSub_SubscribePublish tests basic pub/sub functionality via WebSocket
func TestPubSub_SubscribePublish(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
topic := GenerateTopic()
topic := e2e.GenerateTopic()
message := "test-message-from-publisher"
// Create subscriber first
subscriber, err := NewWSPubSubClient(t, topic)
subscriber, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create subscriber: %v", err)
}
defer subscriber.Close()
// Give subscriber time to register
Delay(200)
e2e.Delay(200)
// Create publisher
publisher, err := NewWSPubSubClient(t, topic)
publisher, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create publisher: %v", err)
}
defer publisher.Close()
// Give connections time to stabilize
Delay(200)
e2e.Delay(200)
// Publish message
if err := publisher.Publish([]byte(message)); err != nil {
@ -54,37 +56,37 @@ func TestPubSub_SubscribePublish(t *testing.T) {
// TestPubSub_MultipleSubscribers tests that multiple subscribers receive the same message
func TestPubSub_MultipleSubscribers(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
topic := GenerateTopic()
topic := e2e.GenerateTopic()
message1 := "message-1"
message2 := "message-2"
// Create two subscribers
sub1, err := NewWSPubSubClient(t, topic)
sub1, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create subscriber1: %v", err)
}
defer sub1.Close()
sub2, err := NewWSPubSubClient(t, topic)
sub2, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create subscriber2: %v", err)
}
defer sub2.Close()
// Give subscribers time to register
Delay(200)
e2e.Delay(200)
// Create publisher
publisher, err := NewWSPubSubClient(t, topic)
publisher, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create publisher: %v", err)
}
defer publisher.Close()
// Give connections time to stabilize
Delay(200)
e2e.Delay(200)
// Publish first message
if err := publisher.Publish([]byte(message1)); err != nil {
@ -133,30 +135,30 @@ func TestPubSub_MultipleSubscribers(t *testing.T) {
// TestPubSub_Deduplication tests that multiple identical messages are all received
func TestPubSub_Deduplication(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
topic := GenerateTopic()
topic := e2e.GenerateTopic()
message := "duplicate-test-message"
// Create subscriber
subscriber, err := NewWSPubSubClient(t, topic)
subscriber, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create subscriber: %v", err)
}
defer subscriber.Close()
// Give subscriber time to register
Delay(200)
e2e.Delay(200)
// Create publisher
publisher, err := NewWSPubSubClient(t, topic)
publisher, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create publisher: %v", err)
}
defer publisher.Close()
// Give connections time to stabilize
Delay(200)
e2e.Delay(200)
// Publish the same message multiple times
for i := 0; i < 3; i++ {
@ -164,7 +166,7 @@ func TestPubSub_Deduplication(t *testing.T) {
t.Fatalf("publish %d failed: %v", i, err)
}
// Small delay between publishes
Delay(50)
e2e.Delay(50)
}
// Receive messages - should get all (no dedup filter)
@ -185,30 +187,30 @@ func TestPubSub_Deduplication(t *testing.T) {
// TestPubSub_ConcurrentPublish tests concurrent message publishing
func TestPubSub_ConcurrentPublish(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
topic := GenerateTopic()
topic := e2e.GenerateTopic()
numMessages := 10
// Create subscriber
subscriber, err := NewWSPubSubClient(t, topic)
subscriber, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create subscriber: %v", err)
}
defer subscriber.Close()
// Give subscriber time to register
Delay(200)
e2e.Delay(200)
// Create publisher
publisher, err := NewWSPubSubClient(t, topic)
publisher, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create publisher: %v", err)
}
defer publisher.Close()
// Give connections time to stabilize
Delay(200)
e2e.Delay(200)
// Publish multiple messages concurrently
var wg sync.WaitGroup
@ -241,45 +243,45 @@ func TestPubSub_ConcurrentPublish(t *testing.T) {
// TestPubSub_TopicIsolation tests that messages are isolated to their topics
func TestPubSub_TopicIsolation(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
topic1 := GenerateTopic()
topic2 := GenerateTopic()
topic1 := e2e.GenerateTopic()
topic2 := e2e.GenerateTopic()
msg1 := "message-on-topic1"
msg2 := "message-on-topic2"
// Create subscriber for topic1
sub1, err := NewWSPubSubClient(t, topic1)
sub1, err := e2e.NewWSPubSubClient(t, topic1)
if err != nil {
t.Fatalf("failed to create subscriber1: %v", err)
}
defer sub1.Close()
// Create subscriber for topic2
sub2, err := NewWSPubSubClient(t, topic2)
sub2, err := e2e.NewWSPubSubClient(t, topic2)
if err != nil {
t.Fatalf("failed to create subscriber2: %v", err)
}
defer sub2.Close()
// Give subscribers time to register
Delay(200)
e2e.Delay(200)
// Create publishers
pub1, err := NewWSPubSubClient(t, topic1)
pub1, err := e2e.NewWSPubSubClient(t, topic1)
if err != nil {
t.Fatalf("failed to create publisher1: %v", err)
}
defer pub1.Close()
pub2, err := NewWSPubSubClient(t, topic2)
pub2, err := e2e.NewWSPubSubClient(t, topic2)
if err != nil {
t.Fatalf("failed to create publisher2: %v", err)
}
defer pub2.Close()
// Give connections time to stabilize
Delay(200)
e2e.Delay(200)
// Publish to topic2 first
if err := pub2.Publish([]byte(msg2)); err != nil {
@ -312,29 +314,29 @@ func TestPubSub_TopicIsolation(t *testing.T) {
// TestPubSub_EmptyMessage tests sending and receiving empty messages
func TestPubSub_EmptyMessage(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
topic := GenerateTopic()
topic := e2e.GenerateTopic()
// Create subscriber
subscriber, err := NewWSPubSubClient(t, topic)
subscriber, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create subscriber: %v", err)
}
defer subscriber.Close()
// Give subscriber time to register
Delay(200)
e2e.Delay(200)
// Create publisher
publisher, err := NewWSPubSubClient(t, topic)
publisher, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create publisher: %v", err)
}
defer publisher.Close()
// Give connections time to stabilize
Delay(200)
e2e.Delay(200)
// Publish empty message
if err := publisher.Publish([]byte("")); err != nil {
@ -354,9 +356,9 @@ func TestPubSub_EmptyMessage(t *testing.T) {
// TestPubSub_LargeMessage tests sending and receiving large messages
func TestPubSub_LargeMessage(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
topic := GenerateTopic()
topic := e2e.GenerateTopic()
// Create a large message (100KB)
largeMessage := make([]byte, 100*1024)
@ -365,24 +367,24 @@ func TestPubSub_LargeMessage(t *testing.T) {
}
// Create subscriber
subscriber, err := NewWSPubSubClient(t, topic)
subscriber, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create subscriber: %v", err)
}
defer subscriber.Close()
// Give subscriber time to register
Delay(200)
e2e.Delay(200)
// Create publisher
publisher, err := NewWSPubSubClient(t, topic)
publisher, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create publisher: %v", err)
}
defer publisher.Close()
// Give connections time to stabilize
Delay(200)
e2e.Delay(200)
// Publish large message
if err := publisher.Publish(largeMessage); err != nil {
@ -409,30 +411,30 @@ func TestPubSub_LargeMessage(t *testing.T) {
// TestPubSub_RapidPublish tests rapid message publishing
func TestPubSub_RapidPublish(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
topic := GenerateTopic()
topic := e2e.GenerateTopic()
numMessages := 50
// Create subscriber
subscriber, err := NewWSPubSubClient(t, topic)
subscriber, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create subscriber: %v", err)
}
defer subscriber.Close()
// Give subscriber time to register
Delay(200)
e2e.Delay(200)
// Create publisher
publisher, err := NewWSPubSubClient(t, topic)
publisher, err := e2e.NewWSPubSubClient(t, topic)
if err != nil {
t.Fatalf("failed to create publisher: %v", err)
}
defer publisher.Close()
// Give connections time to stabilize
Delay(200)
e2e.Delay(200)
// Publish messages rapidly
for i := 0; i < numMessages; i++ {

View File

@ -1,6 +1,6 @@
//go:build e2e
package e2e
package shared_test
import (
"context"
@ -9,17 +9,19 @@ import (
"net/http"
"testing"
"time"
e2e "github.com/DeBrosOfficial/network/e2e"
)
func TestPubSub_Presence(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
topic := GenerateTopic()
topic := e2e.GenerateTopic()
memberID := "user123"
memberMeta := map[string]interface{}{"name": "Alice"}
// 1. Subscribe with presence
client1, err := NewWSPubSubPresenceClient(t, topic, memberID, memberMeta)
client1, err := e2e.NewWSPubSubPresenceClient(t, topic, memberID, memberMeta)
if err != nil {
t.Fatalf("failed to create presence client: %v", err)
}
@ -48,9 +50,9 @@ func TestPubSub_Presence(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: fmt.Sprintf("%s/v1/pubsub/presence?topic=%s", GetGatewayURL(), topic),
URL: fmt.Sprintf("%s/v1/pubsub/presence?topic=%s", e2e.GetGatewayURL(), topic),
}
body, status, err := req.Do(ctx)
@ -63,7 +65,7 @@ func TestPubSub_Presence(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -83,7 +85,7 @@ func TestPubSub_Presence(t *testing.T) {
// 3. Subscribe second member
memberID2 := "user456"
client2, err := NewWSPubSubPresenceClient(t, topic, memberID2, nil)
client2, err := e2e.NewWSPubSubPresenceClient(t, topic, memberID2, nil)
if err != nil {
t.Fatalf("failed to create second presence client: %v", err)
}
@ -119,4 +121,3 @@ func TestPubSub_Presence(t *testing.T) {
t.Fatalf("expected presence.leave for %s, got %v for %v", memberID2, event["type"], event["member_id"])
}
}

View File

@ -1,6 +1,6 @@
//go:build e2e
package e2e
package shared_test
import (
"context"
@ -8,23 +8,36 @@ import (
"net/http"
"testing"
"time"
e2e "github.com/DeBrosOfficial/network/e2e"
)
func TestRQLite_CreateTable(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
table := GenerateTableName()
table := e2e.GenerateTableName()
// Cleanup table after test
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)",
table,
)
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
@ -41,21 +54,32 @@ func TestRQLite_CreateTable(t *testing.T) {
}
func TestRQLite_InsertQuery(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
table := GenerateTableName()
table := e2e.GenerateTableName()
// Cleanup table after test
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)",
table,
)
// Create table
createReq := &HTTPRequest{
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
@ -67,9 +91,9 @@ func TestRQLite_InsertQuery(t *testing.T) {
}
// Insert rows
insertReq := &HTTPRequest{
insertReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s(name) VALUES ('alice')", table),
@ -84,9 +108,9 @@ func TestRQLite_InsertQuery(t *testing.T) {
}
// Query rows
queryReq := &HTTPRequest{
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT name FROM %s ORDER BY id", table),
},
@ -102,7 +126,7 @@ func TestRQLite_InsertQuery(t *testing.T) {
}
var queryResp map[string]interface{}
if err := DecodeJSON(body, &queryResp); err != nil {
if err := e2e.DecodeJSON(body, &queryResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -112,21 +136,21 @@ func TestRQLite_InsertQuery(t *testing.T) {
}
func TestRQLite_DropTable(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
table := GenerateTableName()
table := e2e.GenerateTableName()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)",
table,
)
// Create table
createReq := &HTTPRequest{
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
@ -138,9 +162,9 @@ func TestRQLite_DropTable(t *testing.T) {
}
// Drop table
dropReq := &HTTPRequest{
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/drop-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{
"table": table,
},
@ -156,9 +180,9 @@ func TestRQLite_DropTable(t *testing.T) {
}
// Verify table doesn't exist via schema
schemaReq := &HTTPRequest{
schemaReq := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/rqlite/schema",
URL: e2e.GetGatewayURL() + "/v1/rqlite/schema",
}
body, status, err := schemaReq.Do(ctx)
@ -168,7 +192,7 @@ func TestRQLite_DropTable(t *testing.T) {
}
var schemaResp map[string]interface{}
if err := DecodeJSON(body, &schemaResp); err != nil {
if err := e2e.DecodeJSON(body, &schemaResp); err != nil {
t.Logf("warning: failed to decode schema response: %v", err)
return
}
@ -184,14 +208,14 @@ func TestRQLite_DropTable(t *testing.T) {
}
func TestRQLite_Schema(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/rqlite/schema",
URL: e2e.GetGatewayURL() + "/v1/rqlite/schema",
}
body, status, err := req.Do(ctx)
@ -204,7 +228,7 @@ func TestRQLite_Schema(t *testing.T) {
}
var resp map[string]interface{}
if err := DecodeJSON(body, &resp); err != nil {
if err := e2e.DecodeJSON(body, &resp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -214,14 +238,14 @@ func TestRQLite_Schema(t *testing.T) {
}
func TestRQLite_MalformedSQL(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &HTTPRequest{
req := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": "SELECT * FROM nonexistent_table WHERE invalid syntax",
},
@ -239,21 +263,32 @@ func TestRQLite_MalformedSQL(t *testing.T) {
}
func TestRQLite_LargeTransaction(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
table := GenerateTableName()
table := e2e.GenerateTableName()
// Cleanup table after test
defer func() {
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": table},
}
dropReq.Do(context.Background())
}()
schema := fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)",
table,
)
// Create table
createReq := &HTTPRequest{
createReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": schema,
},
@ -270,9 +305,9 @@ func TestRQLite_LargeTransaction(t *testing.T) {
statements = append(statements, fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, i))
}
txReq := &HTTPRequest{
txReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": statements,
},
@ -284,9 +319,9 @@ func TestRQLite_LargeTransaction(t *testing.T) {
}
// Verify all rows were inserted
queryReq := &HTTPRequest{
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table),
},
@ -298,7 +333,7 @@ func TestRQLite_LargeTransaction(t *testing.T) {
}
var countResp map[string]interface{}
if err := DecodeJSON(body, &countResp); err != nil {
if err := e2e.DecodeJSON(body, &countResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -312,18 +347,35 @@ func TestRQLite_LargeTransaction(t *testing.T) {
}
func TestRQLite_ForeignKeyMigration(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
orgsTable := GenerateTableName()
usersTable := GenerateTableName()
orgsTable := e2e.GenerateTableName()
usersTable := e2e.GenerateTableName()
// Cleanup tables after test
defer func() {
dropUsersReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": usersTable},
}
dropUsersReq.Do(context.Background())
dropOrgsReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{"table": orgsTable},
}
dropOrgsReq.Do(context.Background())
}()
// Create base tables
createOrgsReq := &HTTPRequest{
createOrgsReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)",
@ -337,9 +389,9 @@ func TestRQLite_ForeignKeyMigration(t *testing.T) {
t.Fatalf("create orgs table failed: status %d, err %v", status, err)
}
createUsersReq := &HTTPRequest{
createUsersReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/create-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/create-table",
Body: map[string]interface{}{
"schema": fmt.Sprintf(
"CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)",
@ -354,9 +406,9 @@ func TestRQLite_ForeignKeyMigration(t *testing.T) {
}
// Seed data
seedReq := &HTTPRequest{
seedReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf("INSERT INTO %s(id,name) VALUES (1,'org')", orgsTable),
@ -371,9 +423,9 @@ func TestRQLite_ForeignKeyMigration(t *testing.T) {
}
// Migrate: change age type and add FK
migrationReq := &HTTPRequest{
migrationReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/transaction",
URL: e2e.GetGatewayURL() + "/v1/rqlite/transaction",
Body: map[string]interface{}{
"statements": []string{
fmt.Sprintf(
@ -396,9 +448,9 @@ func TestRQLite_ForeignKeyMigration(t *testing.T) {
}
// Verify data is intact
queryReq := &HTTPRequest{
queryReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/query",
URL: e2e.GetGatewayURL() + "/v1/rqlite/query",
Body: map[string]interface{}{
"sql": fmt.Sprintf("SELECT name, org_id, age FROM %s", usersTable),
},
@ -410,7 +462,7 @@ func TestRQLite_ForeignKeyMigration(t *testing.T) {
}
var queryResp map[string]interface{}
if err := DecodeJSON(body, &queryResp); err != nil {
if err := e2e.DecodeJSON(body, &queryResp); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -420,14 +472,14 @@ func TestRQLite_ForeignKeyMigration(t *testing.T) {
}
func TestRQLite_DropNonexistentTable(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dropReq := &HTTPRequest{
dropReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/rqlite/drop-table",
URL: e2e.GetGatewayURL() + "/v1/rqlite/drop-table",
Body: map[string]interface{}{
"table": "nonexistent_table_xyz_" + fmt.Sprintf("%d", time.Now().UnixNano()),
},

View File

@ -1,6 +1,6 @@
//go:build e2e
package e2e
package shared_test
import (
"bytes"
@ -11,10 +11,12 @@ import (
"os"
"testing"
"time"
e2e "github.com/DeBrosOfficial/network/e2e"
)
func TestServerless_DeployAndInvoke(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
@ -30,7 +32,11 @@ func TestServerless_DeployAndInvoke(t *testing.T) {
}
funcName := "e2e-hello"
namespace := "default"
// Use namespace from environment or default to test namespace
namespace := os.Getenv("ORAMA_NAMESPACE")
if namespace == "" {
namespace = "default-test-ns" // Match the namespace from LoadTestEnv()
}
// 1. Deploy function
var buf bytes.Buffer
@ -39,6 +45,7 @@ func TestServerless_DeployAndInvoke(t *testing.T) {
// Add metadata
_ = writer.WriteField("name", funcName)
_ = writer.WriteField("namespace", namespace)
_ = writer.WriteField("is_public", "true") // Make function public for E2E test
// Add WASM file
part, err := writer.CreateFormFile("wasm", funcName+".wasm")
@ -48,14 +55,14 @@ func TestServerless_DeployAndInvoke(t *testing.T) {
part.Write(wasmBytes)
writer.Close()
deployReq, _ := http.NewRequestWithContext(ctx, "POST", GetGatewayURL()+"/v1/functions", &buf)
deployReq, _ := http.NewRequestWithContext(ctx, "POST", e2e.GetGatewayURL()+"/v1/functions", &buf)
deployReq.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
if apiKey := e2e.GetAPIKey(); apiKey != "" {
deployReq.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(1 * time.Minute)
client := e2e.NewHTTPClient(1 * time.Minute)
resp, err := client.Do(deployReq)
if err != nil {
t.Fatalf("deploy request failed: %v", err)
@ -69,10 +76,10 @@ func TestServerless_DeployAndInvoke(t *testing.T) {
// 2. Invoke function
invokePayload := []byte(`{"name": "E2E Tester"}`)
invokeReq, _ := http.NewRequestWithContext(ctx, "POST", GetGatewayURL()+"/v1/functions/"+funcName+"/invoke", bytes.NewReader(invokePayload))
invokeReq, _ := http.NewRequestWithContext(ctx, "POST", e2e.GetGatewayURL()+"/v1/functions/"+funcName+"/invoke?namespace="+namespace, bytes.NewReader(invokePayload))
invokeReq.Header.Set("Content-Type", "application/json")
if apiKey := GetAPIKey(); apiKey != "" {
if apiKey := e2e.GetAPIKey(); apiKey != "" {
invokeReq.Header.Set("Authorization", "Bearer "+apiKey)
}
@ -94,8 +101,8 @@ func TestServerless_DeployAndInvoke(t *testing.T) {
}
// 3. List functions
listReq, _ := http.NewRequestWithContext(ctx, "GET", GetGatewayURL()+"/v1/functions?namespace="+namespace, nil)
if apiKey := GetAPIKey(); apiKey != "" {
listReq, _ := http.NewRequestWithContext(ctx, "GET", e2e.GetGatewayURL()+"/v1/functions?namespace="+namespace, nil)
if apiKey := e2e.GetAPIKey(); apiKey != "" {
listReq.Header.Set("Authorization", "Bearer "+apiKey)
}
resp, err = client.Do(listReq)
@ -108,8 +115,8 @@ func TestServerless_DeployAndInvoke(t *testing.T) {
}
// 4. Delete function
deleteReq, _ := http.NewRequestWithContext(ctx, "DELETE", GetGatewayURL()+"/v1/functions/"+funcName+"?namespace="+namespace, nil)
if apiKey := GetAPIKey(); apiKey != "" {
deleteReq, _ := http.NewRequestWithContext(ctx, "DELETE", e2e.GetGatewayURL()+"/v1/functions/"+funcName+"?namespace="+namespace, nil)
if apiKey := e2e.GetAPIKey(); apiKey != "" {
deleteReq.Header.Set("Authorization", "Bearer "+apiKey)
}
resp, err = client.Do(deleteReq)

View File

@ -1,6 +1,6 @@
//go:build e2e
package e2e
package shared_test
import (
"bytes"
@ -10,6 +10,8 @@ import (
"net/http"
"testing"
"time"
e2e "github.com/DeBrosOfficial/network/e2e"
)
// uploadFile is a helper to upload a file to storage
@ -34,7 +36,7 @@ func uploadFile(t *testing.T, ctx context.Context, content []byte, filename stri
}
// Create request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
@ -42,13 +44,13 @@ func uploadFile(t *testing.T, ctx context.Context, content []byte, filename stri
req.Header.Set("Content-Type", writer.FormDataContentType())
// Add auth headers
if jwt := GetJWT(); jwt != "" {
if jwt := e2e.GetJWT(); jwt != "" {
req.Header.Set("Authorization", "Bearer "+jwt)
} else if apiKey := GetAPIKey(); apiKey != "" {
} else if apiKey := e2e.GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
client := e2e.NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload request failed: %v", err)
@ -60,28 +62,20 @@ func uploadFile(t *testing.T, ctx context.Context, content []byte, filename stri
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
}
result, err := DecodeJSONFromReader(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatalf("failed to read upload response: %v", err)
}
var result map[string]interface{}
if err := e2e.DecodeJSON(body, &result); err != nil {
t.Fatalf("failed to decode upload response: %v", err)
}
return result["cid"].(string)
}
// DecodeJSON is a helper to decode JSON from io.ReadCloser
func DecodeJSONFromReader(rc io.ReadCloser) (map[string]interface{}, error) {
defer rc.Close()
body, err := io.ReadAll(rc)
if err != nil {
return nil, err
}
var result map[string]interface{}
err = DecodeJSON(body, &result)
return result, err
}
func TestStorage_UploadText(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
@ -107,18 +101,18 @@ func TestStorage_UploadText(t *testing.T) {
}
// Create request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
if apiKey := e2e.GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
client := e2e.NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload request failed: %v", err)
@ -132,7 +126,7 @@ func TestStorage_UploadText(t *testing.T) {
var result map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &result); err != nil {
if err := e2e.DecodeJSON(body, &result); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -150,7 +144,7 @@ func TestStorage_UploadText(t *testing.T) {
}
func TestStorage_UploadBinary(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
@ -177,18 +171,18 @@ func TestStorage_UploadBinary(t *testing.T) {
}
// Create request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
if apiKey := e2e.GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
client := e2e.NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload request failed: %v", err)
@ -202,7 +196,7 @@ func TestStorage_UploadBinary(t *testing.T) {
var result map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &result); err != nil {
if err := e2e.DecodeJSON(body, &result); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -212,7 +206,7 @@ func TestStorage_UploadBinary(t *testing.T) {
}
func TestStorage_UploadLarge(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
@ -239,18 +233,18 @@ func TestStorage_UploadLarge(t *testing.T) {
}
// Create request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
if apiKey := e2e.GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
client := e2e.NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload request failed: %v", err)
@ -264,7 +258,7 @@ func TestStorage_UploadLarge(t *testing.T) {
var result map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &result); err != nil {
if err := e2e.DecodeJSON(body, &result); err != nil {
t.Fatalf("failed to decode response: %v", err)
}
@ -274,7 +268,7 @@ func TestStorage_UploadLarge(t *testing.T) {
}
func TestStorage_PinUnpin(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
@ -299,18 +293,18 @@ func TestStorage_PinUnpin(t *testing.T) {
}
// Create upload request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
if apiKey := e2e.GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
client := e2e.NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload failed: %v", err)
@ -319,16 +313,23 @@ func TestStorage_PinUnpin(t *testing.T) {
var uploadResult map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &uploadResult); err != nil {
if err := e2e.DecodeJSON(body, &uploadResult); err != nil {
t.Fatalf("failed to decode upload response: %v", err)
}
cid := uploadResult["cid"].(string)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body))
}
cid, ok := uploadResult["cid"].(string)
if !ok || cid == "" {
t.Fatalf("no CID in upload response: %v", uploadResult)
}
// Pin the file
pinReq := &HTTPRequest{
pinReq := &e2e.HTTPRequest{
Method: http.MethodPost,
URL: GetGatewayURL() + "/v1/storage/pin",
URL: e2e.GetGatewayURL() + "/v1/storage/pin",
Body: map[string]interface{}{
"cid": cid,
"name": "pinned-file",
@ -345,7 +346,7 @@ func TestStorage_PinUnpin(t *testing.T) {
}
var pinResult map[string]interface{}
if err := DecodeJSON(body2, &pinResult); err != nil {
if err := e2e.DecodeJSON(body2, &pinResult); err != nil {
t.Fatalf("failed to decode pin response: %v", err)
}
@ -354,9 +355,9 @@ func TestStorage_PinUnpin(t *testing.T) {
}
// Unpin the file
unpinReq := &HTTPRequest{
unpinReq := &e2e.HTTPRequest{
Method: http.MethodDelete,
URL: GetGatewayURL() + "/v1/storage/unpin/" + cid,
URL: e2e.GetGatewayURL() + "/v1/storage/unpin/" + cid,
}
body3, status, err := unpinReq.Do(ctx)
@ -370,7 +371,7 @@ func TestStorage_PinUnpin(t *testing.T) {
}
func TestStorage_Status(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
@ -395,18 +396,18 @@ func TestStorage_Status(t *testing.T) {
}
// Create upload request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
if apiKey := e2e.GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
client := e2e.NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload failed: %v", err)
@ -415,16 +416,16 @@ func TestStorage_Status(t *testing.T) {
var uploadResult map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &uploadResult); err != nil {
if err := e2e.DecodeJSON(body, &uploadResult); err != nil {
t.Fatalf("failed to decode upload response: %v", err)
}
cid := uploadResult["cid"].(string)
// Get status
statusReq := &HTTPRequest{
statusReq := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/storage/status/" + cid,
URL: e2e.GetGatewayURL() + "/v1/storage/status/" + cid,
}
statusBody, status, err := statusReq.Do(ctx)
@ -437,7 +438,7 @@ func TestStorage_Status(t *testing.T) {
}
var statusResult map[string]interface{}
if err := DecodeJSON(statusBody, &statusResult); err != nil {
if err := e2e.DecodeJSON(statusBody, &statusResult); err != nil {
t.Fatalf("failed to decode status response: %v", err)
}
@ -447,14 +448,14 @@ func TestStorage_Status(t *testing.T) {
}
func TestStorage_InvalidCID(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
statusReq := &HTTPRequest{
statusReq := &e2e.HTTPRequest{
Method: http.MethodGet,
URL: GetGatewayURL() + "/v1/storage/status/QmInvalidCID123456789",
URL: e2e.GetGatewayURL() + "/v1/storage/status/QmInvalidCID123456789",
}
_, status, err := statusReq.Do(ctx)
@ -468,7 +469,7 @@ func TestStorage_InvalidCID(t *testing.T) {
}
func TestStorage_GetByteRange(t *testing.T) {
SkipIfMissingGateway(t)
e2e.SkipIfMissingGateway(t)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
@ -493,18 +494,18 @@ func TestStorage_GetByteRange(t *testing.T) {
}
// Create upload request
req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, e2e.GetGatewayURL()+"/v1/storage/upload", &buf)
if err != nil {
t.Fatalf("failed to create request: %v", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
if apiKey := GetAPIKey(); apiKey != "" {
if apiKey := e2e.GetAPIKey(); apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
client := NewHTTPClient(5 * time.Minute)
client := e2e.NewHTTPClient(5 * time.Minute)
resp, err := client.Do(req)
if err != nil {
t.Fatalf("upload failed: %v", err)
@ -513,19 +514,19 @@ func TestStorage_GetByteRange(t *testing.T) {
var uploadResult map[string]interface{}
body, _ := io.ReadAll(resp.Body)
if err := DecodeJSON(body, &uploadResult); err != nil {
if err := e2e.DecodeJSON(body, &uploadResult); err != nil {
t.Fatalf("failed to decode upload response: %v", err)
}
cid := uploadResult["cid"].(string)
// Get full content
getReq, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/storage/get/"+cid, nil)
getReq, err := http.NewRequestWithContext(ctx, http.MethodGet, e2e.GetGatewayURL()+"/v1/storage/get/"+cid, nil)
if err != nil {
t.Fatalf("failed to create get request: %v", err)
}
if apiKey := GetAPIKey(); apiKey != "" {
if apiKey := e2e.GetAPIKey(); apiKey != "" {
getReq.Header.Set("Authorization", "Bearer "+apiKey)
}

BIN
gateway

Binary file not shown.

228
go.mod
View File

@ -1,8 +1,6 @@
module github.com/DeBrosOfficial/network
go 1.24.0
toolchain go1.24.1
go 1.24.6
require (
github.com/charmbracelet/bubbles v0.20.0
@ -11,86 +9,182 @@ require (
github.com/ethereum/go-ethereum v1.13.14
github.com/go-chi/chi/v5 v5.2.3
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
github.com/libp2p/go-libp2p v0.41.1
github.com/libp2p/go-libp2p-pubsub v0.14.2
github.com/mackerelio/go-osstat v0.2.6
github.com/mattn/go-sqlite3 v1.14.32
github.com/multiformats/go-multiaddr v0.15.0
github.com/multiformats/go-multiaddr v0.16.0
github.com/olric-data/olric v0.7.0
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8
github.com/tetratelabs/wazero v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.40.0
golang.org/x/net v0.42.0
golang.org/x/crypto v0.47.0
golang.org/x/net v0.49.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)
require (
cloud.google.com/go/auth v0.18.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.30 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.71.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.71.0 // indirect
github.com/DataDog/datadog-agent/pkg/opentelemetry-mapping-go/otlp/attributes v0.71.0 // indirect
github.com/DataDog/datadog-agent/pkg/proto v0.71.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.73.0-rc.1 // indirect
github.com/DataDog/datadog-agent/pkg/trace v0.71.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/log v0.71.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.71.0 // indirect
github.com/DataDog/datadog-agent/pkg/version v0.71.0 // indirect
github.com/DataDog/datadog-go/v5 v5.6.0 // indirect
github.com/DataDog/dd-trace-go/v2 v2.5.0 // indirect
github.com/DataDog/go-libddwaf/v4 v4.8.0 // indirect
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20250721125240-fdf1ef85b633 // indirect
github.com/DataDog/go-sqllexer v0.1.8 // indirect
github.com/DataDog/go-tuf v1.1.1-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.7 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/RoaringBitmap/roaring v1.9.4 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/service/route53 v1.62.1 // indirect
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.41.1 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
github.com/aws/smithy-go v1.24.0 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.22.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/buraksezer/consistent v0.10.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charmbracelet/x/ansi v0.4.5 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coredns/caddy v1.1.4 // indirect
github.com/coredns/coredns v1.12.1 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/dnstap/golang-dnstap v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dunglas/httpsfv v1.1.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/elastic/gosigar v0.14.3 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/expr-lang/expr v1.17.7 // indirect
github.com/farsightsec/golang-framestream v0.3.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect
github.com/googleapis/gax-go/v2 v2.16.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/cronexpr v1.1.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-metrics v0.5.4 // indirect
github.com/hashicorp/go-msgpack/v2 v2.1.3 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.7 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/logutils v1.0.0 // indirect
github.com/hashicorp/memberlist v0.5.3 // indirect
github.com/hashicorp/nomad/api v0.0.0-20250909143645-a3b86c697f38 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/infobloxopen/go-trees v0.0.0-20200715205103-96a057b8dfb9 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-log/v2 v2.6.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/koron/go-ssdp v0.0.5 // indirect
github.com/koron/go-ssdp v0.0.6 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.2.2 // indirect
github.com/libp2p/go-netroute v0.3.0 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/miekg/dns v1.1.66 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.70 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/minio/simdjson-go v0.4.5 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/mschoch/smat v0.2.0 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
@ -101,63 +195,129 @@ require (
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.9.0 // indirect
github.com/multiformats/go-multicodec v0.9.1 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.6.0 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/opencontainers/runtime-spec v1.2.0 // indirect
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect
github.com/openzipkin/zipkin-go v0.4.3 // indirect
github.com/oschwald/geoip2-golang/v2 v2.1.0 // indirect
github.com/oschwald/maxminddb-golang/v2 v2.1.1 // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/philhofer/fwd v1.2.0 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v2 v2.2.12 // indirect
github.com/pion/dtls/v3 v3.0.4 // indirect
github.com/pion/ice/v4 v4.0.8 // indirect
github.com/pion/interceptor v0.1.37 // indirect
github.com/pion/dtls/v3 v3.0.6 // indirect
github.com/pion/ice/v4 v4.0.10 // indirect
github.com/pion/interceptor v0.1.40 // indirect
github.com/pion/logging v0.2.3 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.15 // indirect
github.com/pion/rtp v1.8.11 // indirect
github.com/pion/sctp v1.8.37 // indirect
github.com/pion/sdp/v3 v3.0.10 // indirect
github.com/pion/srtp/v3 v3.0.4 // indirect
github.com/pion/rtp v1.8.19 // indirect
github.com/pion/sctp v1.8.39 // indirect
github.com/pion/sdp/v3 v3.0.13 // indirect
github.com/pion/srtp/v3 v3.0.6 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v2 v2.2.10 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v4 v4.0.0 // indirect
github.com/pion/webrtc/v4 v4.0.10 // indirect
github.com/pion/turn/v4 v4.0.2 // indirect
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.22.0 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_golang v1.23.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.63.0 // indirect
github.com/prometheus/common v0.67.5 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/quic-go/quic-go v0.50.1 // indirect
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/redis/go-redis/v9 v9.8.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
github.com/shirou/gopsutil/v4 v4.25.8-0.20250809033336-ffcdc2b7662f // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/cobra v1.10.2 // indirect
github.com/spf13/pflag v1.0.9 // indirect
github.com/stretchr/testify v1.11.1 // indirect
github.com/theckman/httpforwarded v0.4.0 // indirect
github.com/tidwall/btree v1.7.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/redcon v1.6.2 // indirect
github.com/tinylib/msgp v1.3.0 // indirect
github.com/tklauser/go-sysconf v0.3.15 // indirect
github.com/tklauser/numcpus v0.10.0 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
go.uber.org/dig v1.18.0 // indirect
go.uber.org/fx v1.23.0 // indirect
go.uber.org/mock v0.5.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/api/v3 v3.6.7 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.6.7 // indirect
go.etcd.io/etcd/client/v3 v3.6.7 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/collector/component v1.39.0 // indirect
go.opentelemetry.io/collector/featuregate v1.46.0 // indirect
go.opentelemetry.io/collector/internal/telemetry v0.133.0 // indirect
go.opentelemetry.io/collector/pdata v1.46.0 // indirect
go.opentelemetry.io/collector/pdata/pprofile v0.140.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/log v0.13.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/mock v0.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 // indirect
golang.org/x/mod v0.26.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/text v0.27.0 // indirect
golang.org/x/tools v0.35.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
golang.org/x/mod v0.31.0 // indirect
golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect
golang.org/x/term v0.39.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.40.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/api v0.259.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
google.golang.org/grpc v1.78.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
k8s.io/api v0.34.3 // indirect
k8s.io/apimachinery v0.34.3 // indirect
k8s.io/client-go v0.34.3 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/mcs-api v0.3.0 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)

440
go.sum
View File

@ -2,13 +2,80 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw=
cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0=
cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE=
github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc=
github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
github.com/Azure/go-autorest/autorest/to v0.2.0 h1:nQOZzFCudTh+TvquAtCRjM01VEYx85e9qbwt5ncW4L8=
github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.71.0 h1:xjmjXOsiLfUF1wWXYXc8Gg6M7Jbz6a7FtqbnvGKfTvA=
github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.71.0/go.mod h1:y05SPqKEtrigKul+JBVM69ehv3lOgyKwrUIwLugoaSI=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.71.0 h1:jX8qS7CkNzL1fdcDptrOkbWpsRFTQ58ICjp/mj02u1k=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.71.0/go.mod h1:B3T0If+WdWAwPMpawjm1lieJyqSI0v04dQZHq15WGxY=
github.com/DataDog/datadog-agent/pkg/opentelemetry-mapping-go/otlp/attributes v0.71.0 h1:bowQteds9+7I4Dd+CsBRVXdlMOOGuBm5zdUQdB/6j1M=
github.com/DataDog/datadog-agent/pkg/opentelemetry-mapping-go/otlp/attributes v0.71.0/go.mod h1:XeZj0IgsiL3vgeEGTucf61JvJRh1LxWMUbZA/XJsPD0=
github.com/DataDog/datadog-agent/pkg/proto v0.71.0 h1:YTwecwy8kF1zsL2HK6KVa7XLRZYZ0Ypb2anlG0zDLeE=
github.com/DataDog/datadog-agent/pkg/proto v0.71.0/go.mod h1:KSn4jt3CykV6CT1C8Rknn/Nj3E+VYHK/UDWolg/+kzw=
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.73.0-rc.1 h1:fVqr9ApWmUMEExmgn8iFPfwm9ZrlEfFWgTKp1IcNH18=
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.73.0-rc.1/go.mod h1:lwkSvCXABHXyqy6mG9WBU6MTK9/E0i0R8JVApUtT+XA=
github.com/DataDog/datadog-agent/pkg/trace v0.71.0 h1:9UrKHDacMlAWfP2wpSxrZOQbtkwLY2AOAjYgGkgM96Y=
github.com/DataDog/datadog-agent/pkg/trace v0.71.0/go.mod h1:wfVwOlKORIB4IB1vdncTuCTx/OrVU69TLBIiBpewe1Q=
github.com/DataDog/datadog-agent/pkg/util/log v0.71.0 h1:VJ+nm5E0+UdLPkg2H7FKapx0syNcKzCFXA2vfcHz0Bc=
github.com/DataDog/datadog-agent/pkg/util/log v0.71.0/go.mod h1:oG6f6Qe23zPTLOVh0nXjlIXohrjUGXeFjh7S3Na/WyU=
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.71.0 h1:lA3CL+2yHU9gulyR/C0VssVzmvCs/jCHzt+CBs9uH4Q=
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.71.0/go.mod h1:/JHi9UFqdFYy/SFmFozY26dNOl/ODVLSQaF1LKDPiBI=
github.com/DataDog/datadog-agent/pkg/version v0.71.0 h1:jqkKmhFrhHSLpiC3twQFDCXU7nyFcC1EnwagDQxFWVs=
github.com/DataDog/datadog-agent/pkg/version v0.71.0/go.mod h1:FYj51C1ib86rpr5tlLEep9jitqvljIJ5Uz2rrimGTeY=
github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw=
github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/DataDog/dd-trace-go/v2 v2.5.0 h1:Tp4McT135WhbdT/6BYcAoRvl5gH7YKzehSo6Q3uuxBM=
github.com/DataDog/dd-trace-go/v2 v2.5.0/go.mod h1:A9rVmQfyzYUFCctFdKkli9us7G/YhXlMICpQ958wJUA=
github.com/DataDog/go-libddwaf/v4 v4.8.0 h1:m6Bl1lS2RtVN4MtdTYhR5vJ2fWQ3WmNy4FiNBpzrp6w=
github.com/DataDog/go-libddwaf/v4 v4.8.0/go.mod h1:/AZqP6zw3qGJK5mLrA0PkfK3UQDk1zCI2fUNCt4xftE=
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20250721125240-fdf1ef85b633 h1:ZRLR9Lbym748e8RznWzmSoK+OfV+8qW6SdNYA4/IqdA=
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20250721125240-fdf1ef85b633/go.mod h1:YFoTl1xsMzdSRFIu33oCSPS/3+HZAPGpO3oOM96wXCM=
github.com/DataDog/go-sqllexer v0.1.8 h1:ku9DpghFHeyyviR28W/3R4cCJwzpsuC08YIoltnx5ds=
github.com/DataDog/go-sqllexer v0.1.8/go.mod h1:GGpo1h9/BVSN+6NJKaEcJ9Jn44Hqc63Rakeb+24Mjgo=
github.com/DataDog/go-tuf v1.1.1-0.5.2 h1:YWvghV4ZvrQsPcUw8IOUMSDpqc3W5ruOIC+KJxPknv0=
github.com/DataDog/go-tuf v1.1.1-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/sketches-go v1.4.7 h1:eHs5/0i2Sdf20Zkj0udVFWuCrXGRFig2Dcfm5rtcTxc=
github.com/DataDog/sketches-go v1.4.7/go.mod h1:eAmQ/EBmtSO+nQp7IZMZVRPT4BQTmIc5RZQ+deGlTPM=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/RoaringBitmap/roaring v1.9.4 h1:yhEIoH4YezLYT04s1nHehNO64EKFTop/wBhxv2QzDdQ=
github.com/RoaringBitmap/roaring v1.9.4/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -17,10 +84,44 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU=
github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
github.com/aws/aws-sdk-go-v2/service/route53 v1.62.1 h1:1jIdwWOulae7bBLIgB36OZ0DINACb1wxM6wdGlx4eHE=
github.com/aws/aws-sdk-go-v2/service/route53 v1.62.1/go.mod h1:tE2zGlMIlxWv+7Otap7ctRp3qeKqtnja7DZguj3Vu/Y=
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.41.1 h1:72DBkm/CCuWx2LMHAXvLDkZfzopT3psfAeyZDIt1/yE=
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.41.1/go.mod h1:A+oSJxFvzgjZWkpM0mXs3RxB5O1SD6473w3qafOC9eU=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
@ -45,6 +146,8 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtyd
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/buraksezer/consistent v0.10.0 h1:hqBgz1PvNLC5rkWcEBVAL9dFMBWz6I0VgUCW25rrZlU=
github.com/buraksezer/consistent v0.10.0/go.mod h1:6BrVajWq7wbKZlTOUPs/XVfR8c0maujuPowduSpZqmw=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -58,6 +161,8 @@ github.com/charmbracelet/x/ansi v0.4.5 h1:LqK4vwBNaXw2AyGIICa5/29Sbdq58GbGdFngSe
github.com/charmbracelet/x/ansi v0.4.5/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw=
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs=
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo=
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
@ -65,40 +170,74 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/coredns/caddy v1.1.4 h1:+Lls5xASB0QsA2jpCroCOwpPlb5GjIGlxdjXxdX0XVo=
github.com/coredns/caddy v1.1.4/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
github.com/coredns/coredns v1.12.1 h1:haptbGscSbdWU46xrjdPj1vp3wvH1Z2FgCSQKEdgN5s=
github.com/coredns/coredns v1.12.1/go.mod h1:V26ngiKdNvAiEre5PTAvklrvTjnNjl6lakq1nbE/NbU=
github.com/coredns/coredns v1.14.1 h1:U7ZvMsMn3IfXhaiEHKkW0wsCKG4H5dPvWyMeSLhAodM=
github.com/coredns/coredns v1.14.1/go.mod h1:oYbISnKw+U930dyDU+VVJ+VCWpRD/frU7NfHlqeqH7U=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/dnstap/golang-dnstap v0.4.0 h1:KRHBoURygdGtBjDI2w4HifJfMAhhOqDuktAokaSa234=
github.com/dnstap/golang-dnstap v0.4.0/go.mod h1:FqsSdH58NAmkAvKcpyxht7i4FoBjKu8E4JUPt8ipSUs=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dunglas/httpsfv v1.1.0 h1:Jw76nAyKWKZKFrpMMcL76y35tOpYHqQPzHQiwDvpe54=
github.com/dunglas/httpsfv v1.1.0/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/ethereum/go-ethereum v1.13.14 h1:EwiY3FZP94derMCIam1iW4HFVrSgIcpsu0HwTQtm6CQ=
github.com/ethereum/go-ethereum v1.13.14/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU=
github.com/expr-lang/expr v1.17.7 h1:Q0xY/e/2aCIp8g9s/LGvMDCC5PxYlvHgDZRQ4y16JX8=
github.com/expr-lang/expr v1.17.7/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/farsightsec/golang-framestream v0.3.0 h1:/spFQHucTle/ZIPkYqrfshQqPe2VQEzesH243TjIwqA=
github.com/farsightsec/golang-framestream v0.3.0/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE=
@ -110,8 +249,24 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
@ -123,10 +278,16 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -137,9 +298,13 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -158,19 +323,38 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc=
github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ=
github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y=
github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4=
github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
@ -181,10 +365,14 @@ github.com/hashicorp/go-msgpack/v2 v2.1.3/go.mod h1:SjlwKKFnwBXvxD/I1bEcfJIBbEJ+
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=
github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
@ -194,10 +382,16 @@ github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk=
github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE=
github.com/hashicorp/nomad/api v0.0.0-20250909143645-a3b86c697f38 h1:1LTbcTpGdSdbj0ee7YZHNe4R2XqxfyWwIkSGWRhgkfM=
github.com/hashicorp/nomad/api v0.0.0-20250909143645-a3b86c697f38/go.mod h1:0Tdp+9HbvwrxprXv/LfYZ8P21bOl4oA8Afyet1kUvhI=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/infobloxopen/go-trees v0.0.0-20200715205103-96a057b8dfb9 h1:w66aaP3c6SIQ0pi3QH1Tb4AMO3aWoEPxd1CNvLphbkA=
github.com/infobloxopen/go-trees v0.0.0-20200715205103-96a057b8dfb9/go.mod h1:BaIJzjD2ZnHmx2acPF6XfGLPzNCMiBbMRqJr+8/8uRI=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
@ -207,11 +401,15 @@ github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
@ -226,6 +424,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk=
github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w=
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@ -242,6 +442,8 @@ github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
github.com/libp2p/go-libp2p v0.41.1 h1:8ecNQVT5ev/jqALTvisSJeVNvXYJyK4NhQx1nNRXQZE=
github.com/libp2p/go-libp2p v0.41.1/go.mod h1:DcGTovJzQl/I7HMrby5ZRjeD0kQkGiy+9w6aEkSZpRI=
github.com/libp2p/go-libp2p v0.46.0 h1:0T2yvIKpZ3DVYCuPOFxPD1layhRU486pj9rSlGWYnDM=
github.com/libp2p/go-libp2p v0.46.0/go.mod h1:TbIDnpDjBLa7isdgYpbxozIVPBTmM/7qKOJP4SFySrQ=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=
@ -252,16 +454,24 @@ github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc=
github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po=
github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc=
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0=
github.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
@ -273,9 +483,14 @@ github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
github.com/miekg/dns v1.1.70 h1:DZ4u2AV35VJxdD9Fo9fIWm119BsQL5cZU1cQ9s0LkqA=
github.com/miekg/dns v1.1.70/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
@ -286,10 +501,20 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/minio/simdjson-go v0.4.5 h1:r4IQwjRGmWCQ2VeMc7fGiilu1z5du0gJ/I/FsKwgo5A=
github.com/minio/simdjson-go v0.4.5/go.mod h1:eoNz0DcLQRyEDeaPr4Ru6JpjlZPzbA0IodxVJk8lO8E=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE=
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
@ -308,6 +533,8 @@ github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo=
github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
@ -316,11 +543,15 @@ github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivnc
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA=
github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg=
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
@ -338,11 +569,27 @@ github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlR
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 h1:uhcF5Jd7rP9DVEL10Siffyepr6SvlKbUsjH5JpNCRi8=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0/go.mod h1:+oCZ5GXXr7KPI/DNOQORPTq5AWHfALJj9c72b0+YsEY=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
github.com/oschwald/geoip2-golang/v2 v2.1.0 h1:DjnLhNJu9WHwTrmoiQFvgmyJoczhdnm7LB23UBI2Amo=
github.com/oschwald/geoip2-golang/v2 v2.1.0/go.mod h1:qdVmcPgrTJ4q2eP9tHq/yldMTdp2VMr33uVdFbHBiBc=
github.com/oschwald/maxminddb-golang/v2 v2.1.1 h1:lA8FH0oOrM4u7mLvowq8IT6a3Q/qEnqRzLQn9eH5ojc=
github.com/oschwald/maxminddb-golang/v2 v2.1.1/go.mod h1:PLdx6PR+siSIoXqqy7C7r3SB3KZnhxWr1Dp6g0Hacl8=
github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
@ -350,10 +597,16 @@ github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
github.com/pion/ice/v4 v4.0.8 h1:ajNx0idNG+S+v9Phu4LSn2cs8JEfTsA1/tEjkkAVpFY=
github.com/pion/ice/v4 v4.0.8/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
@ -365,12 +618,20 @@ github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.11 h1:17xjnY5WO5hgO6SD3/NTIUPvSFw/PbLsIJyz1r1yNIk=
github.com/pion/rtp v1.8.11/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
github.com/pion/sctp v1.8.37 h1:ZDmGPtRPX9mKCiVXtMbTWybFw3z/hVKAZgU81wcOrqs=
github.com/pion/sctp v1.8.37/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sdp/v3 v3.0.10 h1:6MChLE/1xYB+CjumMw+gZ9ufp2DPApuVSnDT8t5MIgA=
github.com/pion/sdp/v3 v3.0.10/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
@ -383,14 +644,24 @@ github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
github.com/pion/webrtc/v4 v4.0.10 h1:Hq/JLjhqLxi+NmCtE8lnRPDr8H4LcNvwg8OxVcdv56Q=
github.com/pion/webrtc/v4 v4.0.10/go.mod h1:ViHLVaNpiuvaH8pdiuQxuA9awuE6KVzAXx3vVWilOck=
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@ -399,6 +670,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -411,6 +684,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -419,12 +694,22 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg=
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
github.com/quic-go/webtransport-go v0.10.0 h1:LqXXPOXuETY5Xe8ITdGisBzTYmUOy5eSj+9n4hLTjHI=
github.com/quic-go/webtransport-go v0.10.0/go.mod h1:LeGIXr5BQKE3UsynwVBeQrU1TPrbh73MGoC6jd+V7ow=
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
@ -434,13 +719,20 @@ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8 h1:BoxiqWvhprOB2isgM59s8wkgKwAoyQH66Twfmof41oE=
github.com/rqlite/gorqlite v0.0.0-20250609141355-ac86a4a1c9a8/go.mod h1:xF/KoXmrRyahPfo5L7Szb5cAAUl53dMWBh9cMruGEZg=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
github.com/shirou/gopsutil/v4 v4.25.8-0.20250809033336-ffcdc2b7662f h1:S+PHRM3lk96X0/cGEGUukqltzkX/ekUx0F9DoCGK1G0=
github.com/shirou/gopsutil/v4 v4.25.8-0.20250809033336-ffcdc2b7662f/go.mod h1:4f4j4w8HLMPWEFs3BO2UBBLigKAaWYwkSkbIt/6Q4Ss=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
@ -472,6 +764,10 @@ github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:Udh
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@ -482,13 +778,18 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tetratelabs/wazero v1.11.0 h1:+gKemEuKCTevU4d7ZTzlsvgd1uaToIDtlQlmNbwqYhA=
github.com/tetratelabs/wazero v1.11.0/go.mod h1:eV28rsN8Q+xwjogd7f4/Pp4xFxO7uOGbLcD/LzB1wiU=
github.com/theckman/httpforwarded v0.4.0 h1:N55vGJT+6ojTnLY3LQCNliJC4TW0P0Pkeys1G1WpX2w=
github.com/theckman/httpforwarded v0.4.0/go.mod h1:GVkFynv6FJreNbgH/bpOU9ITDZ7a5WuzdNCtIMI1pVI=
github.com/tidwall/btree v1.1.0/go.mod h1:TzIRzen6yHbibdSfK6t8QimqbUnoxUSrZfeW7Uob0q4=
github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
@ -496,6 +797,12 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/redcon v1.6.2 h1:5qfvrrybgtO85jnhSravmkZyC0D+7WstbfCs3MmPhow=
github.com/tidwall/redcon v1.6.2/go.mod h1:p5Wbsgeyi2VSTBWOcA5vRXrOb9arFTcU2+ZzFjqV75Y=
github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww=
github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
@ -507,22 +814,74 @@ github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/etcd/api/v3 v3.6.7 h1:7BNJ2gQmc3DNM+9cRkv7KkGQDayElg8x3X+tFDYS+E0=
go.etcd.io/etcd/api/v3 v3.6.7/go.mod h1:xJ81TLj9hxrYYEDmXTeKURMeY3qEDN24hqe+q7KhbnI=
go.etcd.io/etcd/client/pkg/v3 v3.6.7 h1:vvzgyozz46q+TyeGBuFzVuI53/yd133CHceNb/AhBVs=
go.etcd.io/etcd/client/pkg/v3 v3.6.7/go.mod h1:2IVulJ3FZ/czIGl9T4lMF1uxzrhRahLqe+hSgy+Kh7Q=
go.etcd.io/etcd/client/v3 v3.6.7 h1:9WqA5RpIBtdMxAy1ukXLAdtg2pAxNqW5NUoO2wQrE6U=
go.etcd.io/etcd/client/v3 v3.6.7/go.mod h1:2XfROY56AXnUqGsvl+6k29wrwsSbEh1lAouQB1vHpeE=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/collector/component v1.39.0 h1:GJw80zXURBG4h0sh97bPLEn2Ra+NAWUpskaooA0wru4=
go.opentelemetry.io/collector/component v1.39.0/go.mod h1:NPaMPTLQuxm5QaaWdqkxYKztC0bRdV+86Q9ir7xS/2k=
go.opentelemetry.io/collector/featuregate v1.46.0 h1:z3JlymFdWW6aDo9cYAJ6bCqT+OI2DlurJ9P8HqfuKWQ=
go.opentelemetry.io/collector/featuregate v1.46.0/go.mod h1:d0tiRzVYrytB6LkcYgz2ESFTv7OktRPQe0QEQcPt1L4=
go.opentelemetry.io/collector/internal/telemetry v0.133.0 h1:YxbckZC9HniNOZgnSofTOe0AB/bEsmISNdQeS+3CU3o=
go.opentelemetry.io/collector/internal/telemetry v0.133.0/go.mod h1:akUK7X6ZQ+CbbCjyXLv9y/EHt5jIy+J+nGoLvndZN14=
go.opentelemetry.io/collector/pdata v1.46.0 h1:XzhnIWNtc/gbOyFiewRvybR4s3phKHrWxL3yc/wVLDo=
go.opentelemetry.io/collector/pdata v1.46.0/go.mod h1:D2e3BWCUC/bUg29WNzCDVN7Ab0Gzk7hGXZL2pnrDOn0=
go.opentelemetry.io/collector/pdata/pprofile v0.140.0 h1:b9TZ6UnyzsT/ERQw2VKGi/NYLtKSmjG7cgQuc9wZt5s=
go.opentelemetry.io/collector/pdata/pprofile v0.140.0/go.mod h1:/2s/YBWGbu+r8MuKu5zas08iSqe+3P6xnbRpfE2DWAA=
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0 h1:FGre0nZh5BSw7G73VpT3xs38HchsfPsa2aZtMp0NPOs=
go.opentelemetry.io/contrib/bridges/otelzap v0.12.0/go.mod h1:X2PYPViI2wTPIMIOBjG17KNybTzsrATnvPJ02kkz7LM=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/log v0.13.0 h1:yoxRoIZcohB6Xf0lNv9QIyCzQvrtGZklVbdCoyb7dls=
go.opentelemetry.io/otel/log v0.13.0/go.mod h1:INKfG4k1O9CL25BaM1qLe0zIedOpvlS5Z7XgSbmN83E=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=
go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg=
go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -535,11 +894,15 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4=
golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
@ -550,10 +913,13 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPI
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -566,11 +932,14 @@ golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
@ -579,10 +948,14 @@ golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -597,6 +970,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -607,6 +982,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -617,31 +994,46 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA=
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
@ -649,10 +1041,14 @@ golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -660,20 +1056,28 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
google.golang.org/api v0.259.0 h1:90TaGVIxScrh1Vn/XI2426kRpBqHwWIzVBzJsVZ5XrQ=
google.golang.org/api v0.259.0/go.mod h1:LC2ISWGWbRoyQVpxGntWwLWN/vLNxxKBK9KuJRI8Te4=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -683,10 +1087,17 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoA
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -696,13 +1107,20 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -717,7 +1135,29 @@ grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJd
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=
k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=
k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/mcs-api v0.3.0 h1:LjRvgzjMrvO1904GP6XBJSnIX221DJMyQlZOYt9LAnM=
sigs.k8s.io/mcs-api v0.3.0/go.mod h1:zZ5CK8uS6HaLkxY4HqsmcBHfzHuNMrY2uJy8T7jffK4=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=

View File

@ -0,0 +1,77 @@
-- Migration 005: DNS Records for CoreDNS Integration
-- This migration creates tables for managing DNS records with RQLite backend for CoreDNS
BEGIN;
-- DNS records table for dynamic DNS management
CREATE TABLE IF NOT EXISTS dns_records (
id INTEGER PRIMARY KEY AUTOINCREMENT,
fqdn TEXT NOT NULL UNIQUE, -- Fully qualified domain name (e.g., myapp.node-7prvNa.orama.network)
record_type TEXT NOT NULL DEFAULT 'A', -- DNS record type: A, AAAA, CNAME, TXT
value TEXT NOT NULL, -- IP address or target value
ttl INTEGER NOT NULL DEFAULT 300, -- Time to live in seconds
namespace TEXT NOT NULL, -- Namespace that owns this record
deployment_id TEXT, -- Optional: deployment that created this record
node_id TEXT, -- Optional: specific node ID for node-specific routing
is_active BOOLEAN NOT NULL DEFAULT TRUE,-- Enable/disable without deleting
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT NOT NULL -- Wallet address or 'system' for auto-created records
);
-- Indexes for fast DNS lookups
CREATE INDEX IF NOT EXISTS idx_dns_records_fqdn ON dns_records(fqdn);
CREATE INDEX IF NOT EXISTS idx_dns_records_namespace ON dns_records(namespace);
CREATE INDEX IF NOT EXISTS idx_dns_records_deployment ON dns_records(deployment_id);
CREATE INDEX IF NOT EXISTS idx_dns_records_node_id ON dns_records(node_id);
CREATE INDEX IF NOT EXISTS idx_dns_records_active ON dns_records(is_active);
-- DNS nodes registry for tracking active nodes
CREATE TABLE IF NOT EXISTS dns_nodes (
id TEXT PRIMARY KEY, -- Node ID (e.g., node-7prvNa)
ip_address TEXT NOT NULL, -- Public IP address
internal_ip TEXT, -- Private IP for cluster communication
region TEXT, -- Geographic region
status TEXT NOT NULL DEFAULT 'active', -- active, draining, offline
last_seen TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
capabilities TEXT, -- JSON: ["wasm", "ipfs", "cache"]
metadata TEXT, -- JSON: additional node info
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- Indexes for node health monitoring
CREATE INDEX IF NOT EXISTS idx_dns_nodes_status ON dns_nodes(status);
CREATE INDEX IF NOT EXISTS idx_dns_nodes_last_seen ON dns_nodes(last_seen);
-- Reserved domains table to prevent subdomain collisions
CREATE TABLE IF NOT EXISTS reserved_domains (
domain TEXT PRIMARY KEY,
reason TEXT NOT NULL,
reserved_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- Seed reserved domains
INSERT INTO reserved_domains (domain, reason) VALUES
('api.orama.network', 'API gateway endpoint'),
('www.orama.network', 'Marketing website'),
('admin.orama.network', 'Admin panel'),
('ns1.orama.network', 'Nameserver 1'),
('ns2.orama.network', 'Nameserver 2'),
('ns3.orama.network', 'Nameserver 3'),
('ns4.orama.network', 'Nameserver 4'),
('mail.orama.network', 'Email service'),
('cdn.orama.network', 'Content delivery'),
('docs.orama.network', 'Documentation'),
('status.orama.network', 'Status page')
ON CONFLICT(domain) DO NOTHING;
-- Mark migration as applied
CREATE TABLE IF NOT EXISTS schema_migrations (
version INTEGER PRIMARY KEY,
applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
INSERT OR IGNORE INTO schema_migrations(version) VALUES (5);
COMMIT;

View File

@ -0,0 +1,74 @@
-- Migration 006: Per-Namespace SQLite Databases
-- This migration creates infrastructure for isolated SQLite databases per namespace
BEGIN;
-- Namespace SQLite databases registry
CREATE TABLE IF NOT EXISTS namespace_sqlite_databases (
id TEXT PRIMARY KEY, -- UUID
namespace TEXT NOT NULL, -- Namespace that owns this database
database_name TEXT NOT NULL, -- Database name (unique per namespace)
home_node_id TEXT NOT NULL, -- Node ID where database file resides
file_path TEXT NOT NULL, -- Absolute path on home node
size_bytes BIGINT DEFAULT 0, -- Current database size
backup_cid TEXT, -- Latest backup CID in IPFS
last_backup_at TIMESTAMP, -- Last backup timestamp
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT NOT NULL, -- Wallet address that created the database
UNIQUE(namespace, database_name)
);
-- Indexes for database lookups
CREATE INDEX IF NOT EXISTS idx_sqlite_databases_namespace ON namespace_sqlite_databases(namespace);
CREATE INDEX IF NOT EXISTS idx_sqlite_databases_home_node ON namespace_sqlite_databases(home_node_id);
CREATE INDEX IF NOT EXISTS idx_sqlite_databases_name ON namespace_sqlite_databases(namespace, database_name);
-- SQLite database backups history
CREATE TABLE IF NOT EXISTS namespace_sqlite_backups (
id TEXT PRIMARY KEY, -- UUID
database_id TEXT NOT NULL, -- References namespace_sqlite_databases.id
backup_cid TEXT NOT NULL, -- IPFS CID of backup file
size_bytes BIGINT NOT NULL, -- Backup file size
backup_type TEXT NOT NULL, -- 'manual', 'scheduled', 'migration'
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT NOT NULL,
FOREIGN KEY (database_id) REFERENCES namespace_sqlite_databases(id) ON DELETE CASCADE
);
-- Index for backup history queries
CREATE INDEX IF NOT EXISTS idx_sqlite_backups_database ON namespace_sqlite_backups(database_id, created_at DESC);
-- Namespace quotas for resource management (future use)
CREATE TABLE IF NOT EXISTS namespace_quotas (
namespace TEXT PRIMARY KEY,
-- Storage quotas
max_sqlite_databases INTEGER DEFAULT 10, -- Max SQLite databases per namespace
max_storage_bytes BIGINT DEFAULT 5368709120, -- 5GB default
max_ipfs_pins INTEGER DEFAULT 1000, -- Max pinned IPFS objects
-- Compute quotas
max_deployments INTEGER DEFAULT 20, -- Max concurrent deployments
max_cpu_percent INTEGER DEFAULT 200, -- Total CPU quota (2 cores)
max_memory_mb INTEGER DEFAULT 2048, -- Total memory quota
-- Rate limits
max_rqlite_queries_per_minute INTEGER DEFAULT 1000,
max_olric_ops_per_minute INTEGER DEFAULT 10000,
-- Current usage (updated periodically)
current_storage_bytes BIGINT DEFAULT 0,
current_deployments INTEGER DEFAULT 0,
current_sqlite_databases INTEGER DEFAULT 0,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- Mark migration as applied
INSERT OR IGNORE INTO schema_migrations(version) VALUES (6);
COMMIT;

View File

@ -0,0 +1,178 @@
-- Migration 007: Deployments System
-- This migration creates the complete schema for managing custom deployments
-- (Static sites, Next.js, Go backends, Node.js backends)
BEGIN;
-- Main deployments table
CREATE TABLE IF NOT EXISTS deployments (
id TEXT PRIMARY KEY, -- UUID
namespace TEXT NOT NULL, -- Owner namespace
name TEXT NOT NULL, -- Deployment name (unique per namespace)
type TEXT NOT NULL, -- 'static', 'nextjs', 'nextjs-static', 'go-backend', 'go-wasm', 'nodejs-backend'
version INTEGER NOT NULL DEFAULT 1, -- Monotonic version counter
status TEXT NOT NULL DEFAULT 'deploying', -- 'deploying', 'active', 'failed', 'stopped', 'updating'
-- Content storage
content_cid TEXT, -- IPFS CID for static content or built assets
build_cid TEXT, -- IPFS CID for build artifacts (Next.js SSR, binaries)
-- Runtime configuration
home_node_id TEXT, -- Node ID hosting stateful data/processes
port INTEGER, -- Allocated port (NULL for static/WASM)
subdomain TEXT, -- Custom subdomain (e.g., myapp)
environment TEXT, -- JSON: {"KEY": "value", ...}
-- Resource limits
memory_limit_mb INTEGER DEFAULT 256,
cpu_limit_percent INTEGER DEFAULT 50,
disk_limit_mb INTEGER DEFAULT 1024,
-- Health & monitoring
health_check_path TEXT DEFAULT '/health', -- HTTP path for health checks
health_check_interval INTEGER DEFAULT 30, -- Seconds between health checks
restart_policy TEXT DEFAULT 'always', -- 'always', 'on-failure', 'never'
max_restart_count INTEGER DEFAULT 10, -- Max restarts before marking as failed
-- Metadata
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deployed_by TEXT NOT NULL, -- Wallet address or API key
UNIQUE(namespace, name)
);
-- Indexes for deployment lookups
CREATE INDEX IF NOT EXISTS idx_deployments_namespace ON deployments(namespace);
CREATE INDEX IF NOT EXISTS idx_deployments_status ON deployments(status);
CREATE INDEX IF NOT EXISTS idx_deployments_home_node ON deployments(home_node_id);
CREATE INDEX IF NOT EXISTS idx_deployments_type ON deployments(type);
CREATE INDEX IF NOT EXISTS idx_deployments_subdomain ON deployments(subdomain);
-- Port allocations table (prevents port conflicts)
CREATE TABLE IF NOT EXISTS port_allocations (
node_id TEXT NOT NULL,
port INTEGER NOT NULL,
deployment_id TEXT NOT NULL,
allocated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (node_id, port),
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Index for finding allocated ports by node
CREATE INDEX IF NOT EXISTS idx_port_allocations_node ON port_allocations(node_id, port);
CREATE INDEX IF NOT EXISTS idx_port_allocations_deployment ON port_allocations(deployment_id);
-- Home node assignments (namespace → node mapping)
CREATE TABLE IF NOT EXISTS home_node_assignments (
namespace TEXT PRIMARY KEY,
home_node_id TEXT NOT NULL,
assigned_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
last_heartbeat TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deployment_count INTEGER DEFAULT 0, -- Cached count for capacity planning
total_memory_mb INTEGER DEFAULT 0, -- Cached total memory usage
total_cpu_percent INTEGER DEFAULT 0 -- Cached total CPU usage
);
-- Index for querying by node
CREATE INDEX IF NOT EXISTS idx_home_node_by_node ON home_node_assignments(home_node_id);
-- Deployment domains (custom domain mapping)
CREATE TABLE IF NOT EXISTS deployment_domains (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
namespace TEXT NOT NULL,
domain TEXT NOT NULL UNIQUE, -- Full domain (e.g., myapp.orama.network or custom)
routing_type TEXT NOT NULL DEFAULT 'balanced', -- 'balanced' or 'node_specific'
node_id TEXT, -- For node_specific routing
is_custom BOOLEAN DEFAULT FALSE, -- True for user's own domain
tls_cert_cid TEXT, -- IPFS CID for custom TLS certificate
verified_at TIMESTAMP, -- When custom domain was verified
verification_token TEXT, -- TXT record token for domain verification
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Indexes for domain lookups
CREATE INDEX IF NOT EXISTS idx_deployment_domains_deployment ON deployment_domains(deployment_id);
CREATE INDEX IF NOT EXISTS idx_deployment_domains_domain ON deployment_domains(domain);
CREATE INDEX IF NOT EXISTS idx_deployment_domains_namespace ON deployment_domains(namespace);
-- Deployment history (version tracking and rollback)
CREATE TABLE IF NOT EXISTS deployment_history (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
version INTEGER NOT NULL,
content_cid TEXT,
build_cid TEXT,
deployed_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deployed_by TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'success', -- 'success', 'failed', 'rolled_back'
error_message TEXT,
rollback_from_version INTEGER, -- If this is a rollback, original version
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Indexes for history queries
CREATE INDEX IF NOT EXISTS idx_deployment_history_deployment ON deployment_history(deployment_id, version DESC);
CREATE INDEX IF NOT EXISTS idx_deployment_history_status ON deployment_history(status);
-- Deployment environment variables (separate for security)
CREATE TABLE IF NOT EXISTS deployment_env_vars (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
key TEXT NOT NULL,
value TEXT NOT NULL, -- Encrypted in production
is_secret BOOLEAN DEFAULT FALSE, -- True for sensitive values
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
UNIQUE(deployment_id, key),
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Index for env var lookups
CREATE INDEX IF NOT EXISTS idx_deployment_env_vars_deployment ON deployment_env_vars(deployment_id);
-- Deployment events log (audit trail)
CREATE TABLE IF NOT EXISTS deployment_events (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
event_type TEXT NOT NULL, -- 'created', 'started', 'stopped', 'restarted', 'updated', 'deleted', 'health_check_failed'
message TEXT,
metadata TEXT, -- JSON: additional context
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT, -- Wallet address or 'system'
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Index for event queries
CREATE INDEX IF NOT EXISTS idx_deployment_events_deployment ON deployment_events(deployment_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_deployment_events_type ON deployment_events(event_type);
-- Process health checks (for dynamic deployments)
CREATE TABLE IF NOT EXISTS deployment_health_checks (
id TEXT PRIMARY KEY, -- UUID
deployment_id TEXT NOT NULL,
node_id TEXT NOT NULL,
status TEXT NOT NULL, -- 'healthy', 'unhealthy', 'unknown'
response_time_ms INTEGER,
status_code INTEGER,
error_message TEXT,
checked_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
-- Index for health check queries (keep only recent checks)
CREATE INDEX IF NOT EXISTS idx_health_checks_deployment ON deployment_health_checks(deployment_id, checked_at DESC);
-- Mark migration as applied
INSERT OR IGNORE INTO schema_migrations(version) VALUES (7);
COMMIT;

View File

@ -0,0 +1,31 @@
-- Migration 008: IPFS Namespace Tracking
-- This migration adds namespace isolation for IPFS content by tracking CID ownership.
-- Table: ipfs_content_ownership
-- Tracks which namespace owns each CID uploaded to IPFS.
-- This enables namespace isolation so that:
-- - Namespace-A cannot GET/PIN/UNPIN Namespace-B's content
-- - Same CID can be uploaded by different namespaces (shared content)
CREATE TABLE IF NOT EXISTS ipfs_content_ownership (
id TEXT PRIMARY KEY,
cid TEXT NOT NULL,
namespace TEXT NOT NULL,
name TEXT,
size_bytes BIGINT DEFAULT 0,
is_pinned BOOLEAN DEFAULT FALSE,
uploaded_at TIMESTAMP NOT NULL,
uploaded_by TEXT NOT NULL,
UNIQUE(cid, namespace)
);
-- Index for fast namespace + CID lookup
CREATE INDEX IF NOT EXISTS idx_ipfs_ownership_namespace_cid
ON ipfs_content_ownership(namespace, cid);
-- Index for fast CID lookup across all namespaces
CREATE INDEX IF NOT EXISTS idx_ipfs_ownership_cid
ON ipfs_content_ownership(cid);
-- Index for namespace-only queries (list all content for a namespace)
CREATE INDEX IF NOT EXISTS idx_ipfs_ownership_namespace
ON ipfs_content_ownership(namespace);

View File

@ -0,0 +1,45 @@
-- Migration 009: Update DNS Records to Support Multiple Records per FQDN
-- This allows round-robin A records and multiple NS records for the same domain
BEGIN;
-- SQLite doesn't support DROP CONSTRAINT, so we recreate the table
-- First, create the new table structure
CREATE TABLE IF NOT EXISTS dns_records_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
fqdn TEXT NOT NULL, -- Fully qualified domain name (e.g., myapp.node-7prvNa.orama.network)
record_type TEXT NOT NULL DEFAULT 'A',-- DNS record type: A, AAAA, CNAME, TXT, NS, SOA
value TEXT NOT NULL, -- IP address or target value
ttl INTEGER NOT NULL DEFAULT 300, -- Time to live in seconds
priority INTEGER DEFAULT 0, -- Priority for MX/SRV records, or weight for round-robin
namespace TEXT NOT NULL DEFAULT 'system', -- Namespace that owns this record
deployment_id TEXT, -- Optional: deployment that created this record
node_id TEXT, -- Optional: specific node ID for node-specific routing
is_active BOOLEAN NOT NULL DEFAULT TRUE,-- Enable/disable without deleting
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT NOT NULL DEFAULT 'system', -- Wallet address or 'system' for auto-created records
UNIQUE(fqdn, record_type, value) -- Allow multiple records of same type for same FQDN, but not duplicates
);
-- Copy existing data if the old table exists
INSERT OR IGNORE INTO dns_records_new (id, fqdn, record_type, value, ttl, namespace, deployment_id, node_id, is_active, created_at, updated_at, created_by)
SELECT id, fqdn, record_type, value, ttl, namespace, deployment_id, node_id, is_active, created_at, updated_at, created_by
FROM dns_records WHERE 1=1;
-- Drop old table and rename new one
DROP TABLE IF EXISTS dns_records;
ALTER TABLE dns_records_new RENAME TO dns_records;
-- Recreate indexes
CREATE INDEX IF NOT EXISTS idx_dns_records_fqdn ON dns_records(fqdn);
CREATE INDEX IF NOT EXISTS idx_dns_records_fqdn_type ON dns_records(fqdn, record_type);
CREATE INDEX IF NOT EXISTS idx_dns_records_namespace ON dns_records(namespace);
CREATE INDEX IF NOT EXISTS idx_dns_records_deployment ON dns_records(deployment_id);
CREATE INDEX IF NOT EXISTS idx_dns_records_node_id ON dns_records(node_id);
CREATE INDEX IF NOT EXISTS idx_dns_records_active ON dns_records(is_active);
-- Mark migration as applied
INSERT OR IGNORE INTO schema_migrations(version) VALUES (9);
COMMIT;

View File

@ -0,0 +1,190 @@
-- Migration 010: Namespace Clusters for Physical Isolation
-- Creates tables to manage per-namespace RQLite and Olric clusters
-- Each namespace gets its own 3-node cluster for complete isolation
BEGIN;
-- Extend namespaces table with cluster status tracking
-- Note: SQLite doesn't support ADD COLUMN IF NOT EXISTS, so we handle this carefully
-- These columns track the provisioning state of the namespace's dedicated cluster
-- First check if columns exist, if not add them
-- cluster_status: 'none', 'provisioning', 'ready', 'degraded', 'failed', 'deprovisioning'
-- Create a new namespaces table with additional columns if needed
CREATE TABLE IF NOT EXISTS namespaces_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL UNIQUE,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
cluster_status TEXT DEFAULT 'none',
cluster_created_at TIMESTAMP,
cluster_ready_at TIMESTAMP
);
-- Copy data from old table if it exists and new columns don't
INSERT OR IGNORE INTO namespaces_new (id, name, created_at, cluster_status)
SELECT id, name, created_at, 'none' FROM namespaces WHERE NOT EXISTS (
SELECT 1 FROM pragma_table_info('namespaces') WHERE name = 'cluster_status'
);
-- If the column already exists, this migration was partially applied - skip the table swap
-- We'll use a different approach: just ensure the new tables exist
-- Namespace clusters registry
-- One record per namespace that has a dedicated cluster
CREATE TABLE IF NOT EXISTS namespace_clusters (
id TEXT PRIMARY KEY, -- UUID
namespace_id INTEGER NOT NULL UNIQUE, -- FK to namespaces
namespace_name TEXT NOT NULL, -- Cached for easier lookups
status TEXT NOT NULL DEFAULT 'provisioning', -- provisioning, ready, degraded, failed, deprovisioning
-- Cluster configuration
rqlite_node_count INTEGER NOT NULL DEFAULT 3,
olric_node_count INTEGER NOT NULL DEFAULT 3,
gateway_node_count INTEGER NOT NULL DEFAULT 3,
-- Provisioning metadata
provisioned_by TEXT NOT NULL, -- Wallet address that triggered provisioning
provisioned_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
ready_at TIMESTAMP,
last_health_check TIMESTAMP,
-- Error tracking
error_message TEXT,
retry_count INTEGER DEFAULT 0,
FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_namespace_clusters_status ON namespace_clusters(status);
CREATE INDEX IF NOT EXISTS idx_namespace_clusters_namespace ON namespace_clusters(namespace_id);
CREATE INDEX IF NOT EXISTS idx_namespace_clusters_name ON namespace_clusters(namespace_name);
-- Namespace cluster nodes
-- Tracks which physical nodes host services for each namespace cluster
CREATE TABLE IF NOT EXISTS namespace_cluster_nodes (
id TEXT PRIMARY KEY, -- UUID
namespace_cluster_id TEXT NOT NULL, -- FK to namespace_clusters
node_id TEXT NOT NULL, -- FK to dns_nodes (physical node)
-- Role in the cluster
-- Each node can have multiple roles (rqlite + olric + gateway)
role TEXT NOT NULL, -- 'rqlite_leader', 'rqlite_follower', 'olric', 'gateway'
-- Service ports (allocated from reserved range 10000-10099)
rqlite_http_port INTEGER, -- Port for RQLite HTTP API
rqlite_raft_port INTEGER, -- Port for RQLite Raft consensus
olric_http_port INTEGER, -- Port for Olric HTTP API
olric_memberlist_port INTEGER, -- Port for Olric memberlist gossip
gateway_http_port INTEGER, -- Port for Gateway HTTP
-- Service status
status TEXT NOT NULL DEFAULT 'pending', -- pending, starting, running, stopped, failed
process_pid INTEGER, -- PID of running process (for local management)
last_heartbeat TIMESTAMP,
error_message TEXT,
-- Join addresses for cluster formation
rqlite_join_address TEXT, -- Address to join RQLite cluster
olric_peers TEXT, -- JSON array of Olric peer addresses
-- Metadata
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
UNIQUE(namespace_cluster_id, node_id, role),
FOREIGN KEY (namespace_cluster_id) REFERENCES namespace_clusters(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_cluster_nodes_cluster ON namespace_cluster_nodes(namespace_cluster_id);
CREATE INDEX IF NOT EXISTS idx_cluster_nodes_node ON namespace_cluster_nodes(node_id);
CREATE INDEX IF NOT EXISTS idx_cluster_nodes_status ON namespace_cluster_nodes(status);
CREATE INDEX IF NOT EXISTS idx_cluster_nodes_role ON namespace_cluster_nodes(role);
-- Namespace port allocations
-- Manages the reserved port range (10000-10099) for namespace services
-- Each namespace instance on a node gets a block of 5 consecutive ports
CREATE TABLE IF NOT EXISTS namespace_port_allocations (
id TEXT PRIMARY KEY, -- UUID
node_id TEXT NOT NULL, -- Physical node ID
namespace_cluster_id TEXT NOT NULL, -- Namespace cluster this allocation belongs to
-- Port block (5 consecutive ports)
port_start INTEGER NOT NULL, -- Start of port block (e.g., 10000)
port_end INTEGER NOT NULL, -- End of port block (e.g., 10004)
-- Individual port assignments within the block
rqlite_http_port INTEGER NOT NULL, -- port_start + 0
rqlite_raft_port INTEGER NOT NULL, -- port_start + 1
olric_http_port INTEGER NOT NULL, -- port_start + 2
olric_memberlist_port INTEGER NOT NULL, -- port_start + 3
gateway_http_port INTEGER NOT NULL, -- port_start + 4
allocated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- Prevent overlapping allocations on same node
UNIQUE(node_id, port_start),
-- One allocation per namespace per node
UNIQUE(namespace_cluster_id, node_id),
FOREIGN KEY (namespace_cluster_id) REFERENCES namespace_clusters(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_ns_port_alloc_node ON namespace_port_allocations(node_id);
CREATE INDEX IF NOT EXISTS idx_ns_port_alloc_cluster ON namespace_port_allocations(namespace_cluster_id);
-- Namespace cluster events
-- Audit log for cluster provisioning and lifecycle events
CREATE TABLE IF NOT EXISTS namespace_cluster_events (
id TEXT PRIMARY KEY, -- UUID
namespace_cluster_id TEXT NOT NULL,
event_type TEXT NOT NULL, -- Event types listed below
node_id TEXT, -- Optional: specific node this event relates to
message TEXT,
metadata TEXT, -- JSON for additional event data
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (namespace_cluster_id) REFERENCES namespace_clusters(id) ON DELETE CASCADE
);
-- Event types:
-- 'provisioning_started' - Cluster provisioning began
-- 'nodes_selected' - 3 nodes were selected for the cluster
-- 'ports_allocated' - Ports allocated on a node
-- 'rqlite_started' - RQLite instance started on a node
-- 'rqlite_joined' - RQLite instance joined the cluster
-- 'rqlite_leader_elected' - RQLite leader election completed
-- 'olric_started' - Olric instance started on a node
-- 'olric_joined' - Olric instance joined memberlist
-- 'gateway_started' - Gateway instance started on a node
-- 'dns_created' - DNS records created for namespace
-- 'cluster_ready' - All services ready, cluster is operational
-- 'cluster_degraded' - One or more nodes are unhealthy
-- 'cluster_failed' - Cluster failed to provision or operate
-- 'node_failed' - Specific node became unhealthy
-- 'node_recovered' - Node recovered from failure
-- 'deprovisioning_started' - Cluster deprovisioning began
-- 'deprovisioned' - Cluster fully deprovisioned
CREATE INDEX IF NOT EXISTS idx_cluster_events_cluster ON namespace_cluster_events(namespace_cluster_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_cluster_events_type ON namespace_cluster_events(event_type);
-- Global deployment registry
-- Prevents duplicate deployment subdomains across all namespaces
-- Since deployments now use {name}-{random}.{domain}, we track used subdomains globally
CREATE TABLE IF NOT EXISTS global_deployment_subdomains (
subdomain TEXT PRIMARY KEY, -- Full subdomain (e.g., 'myapp-f3o4if')
namespace TEXT NOT NULL, -- Owner namespace
deployment_id TEXT NOT NULL, -- FK to deployments (in namespace cluster)
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- No FK to deployments since deployments are in namespace-specific clusters
UNIQUE(subdomain)
);
CREATE INDEX IF NOT EXISTS idx_global_subdomains_namespace ON global_deployment_subdomains(namespace);
CREATE INDEX IF NOT EXISTS idx_global_subdomains_deployment ON global_deployment_subdomains(deployment_id);
-- Mark migration as applied
INSERT OR IGNORE INTO schema_migrations(version) VALUES (10);
COMMIT;

View File

@ -0,0 +1,19 @@
-- Migration 011: DNS Nameservers Table
-- Maps NS hostnames (ns1, ns2, ns3) to specific node IDs and IPs
-- Provides stable NS assignment that survives restarts and re-seeding
BEGIN;
CREATE TABLE IF NOT EXISTS dns_nameservers (
hostname TEXT PRIMARY KEY, -- e.g., "ns1", "ns2", "ns3"
node_id TEXT NOT NULL, -- Peer ID of the assigned node
ip_address TEXT NOT NULL, -- IP address of the assigned node
domain TEXT NOT NULL, -- Base domain (e.g., "dbrs.space")
assigned_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
UNIQUE(node_id, domain) -- A node can only hold one NS slot per domain
);
INSERT OR IGNORE INTO schema_migrations(version) VALUES (11);
COMMIT;

View File

@ -0,0 +1,15 @@
-- Deployment replicas: tracks which nodes host replicas of each deployment
CREATE TABLE IF NOT EXISTS deployment_replicas (
deployment_id TEXT NOT NULL,
node_id TEXT NOT NULL,
port INTEGER DEFAULT 0,
status TEXT NOT NULL DEFAULT 'pending',
is_primary BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (deployment_id, node_id),
FOREIGN KEY (deployment_id) REFERENCES deployments(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_deployment_replicas_node ON deployment_replicas(node_id);
CREATE INDEX IF NOT EXISTS idx_deployment_replicas_status ON deployment_replicas(deployment_id, status);

View File

@ -0,0 +1,9 @@
-- WireGuard mesh peer tracking
CREATE TABLE IF NOT EXISTS wireguard_peers (
node_id TEXT PRIMARY KEY,
wg_ip TEXT NOT NULL UNIQUE,
public_key TEXT NOT NULL UNIQUE,
public_ip TEXT NOT NULL,
wg_port INTEGER DEFAULT 51820,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);

View File

@ -0,0 +1,8 @@
CREATE TABLE IF NOT EXISTS invite_tokens (
token TEXT PRIMARY KEY,
created_by TEXT NOT NULL,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
expires_at DATETIME NOT NULL,
used_at DATETIME,
used_by_ip TEXT
);

View File

@ -0,0 +1,3 @@
-- Store IPFS peer IDs alongside WireGuard peers for automatic swarm discovery
-- Each node registers its IPFS peer ID so other nodes can connect via ipfs swarm connect
ALTER TABLE wireguard_peers ADD COLUMN ipfs_peer_id TEXT DEFAULT '';

6
migrations/embed.go Normal file
View File

@ -0,0 +1,6 @@
package migrations
import "embed"
//go:embed *.sql
var FS embed.FS

View File

@ -19,6 +19,7 @@ type Credentials struct {
IssuedAt time.Time `json:"issued_at"`
LastUsedAt time.Time `json:"last_used_at,omitempty"`
Plan string `json:"plan,omitempty"`
NamespaceURL string `json:"namespace_url,omitempty"`
}
// CredentialStore manages credentials for multiple gateways
@ -165,17 +166,59 @@ func (creds *Credentials) UpdateLastUsed() {
creds.LastUsedAt = time.Now()
}
// GetDefaultGatewayURL returns the default gateway URL from environment or fallback
// GetDefaultGatewayURL returns the default gateway URL from environment config, env vars, or fallback
func GetDefaultGatewayURL() string {
// Check environment variables first (for backwards compatibility)
if envURL := os.Getenv("DEBROS_GATEWAY_URL"); envURL != "" {
return envURL
}
if envURL := os.Getenv("DEBROS_GATEWAY"); envURL != "" {
return envURL
}
// Try to read from environment config file
if gwURL := getGatewayFromEnvConfig(); gwURL != "" {
return gwURL
}
return "http://localhost:6001"
}
// getGatewayFromEnvConfig reads the active environment's gateway URL from the config file
func getGatewayFromEnvConfig() string {
homeDir, err := os.UserHomeDir()
if err != nil {
return ""
}
envConfigPath := filepath.Join(homeDir, ".orama", "environments.json")
data, err := os.ReadFile(envConfigPath)
if err != nil {
return ""
}
var config struct {
Environments []struct {
Name string `json:"name"`
GatewayURL string `json:"gateway_url"`
} `json:"environments"`
ActiveEnvironment string `json:"active_environment"`
}
if err := json.Unmarshal(data, &config); err != nil {
return ""
}
// Find the active environment
for _, env := range config.Environments {
if env.Name == config.ActiveEnvironment {
return env.GatewayURL
}
}
return ""
}
// HasValidCredentials checks if there are valid credentials for the default gateway
func HasValidCredentials() (bool, error) {
store, err := LoadCredentials()

View File

@ -86,7 +86,8 @@ func LoadEnhancedCredentials() (*EnhancedCredentialStore, error) {
}
}
// Parse as legacy v2.0 format (single credential per gateway) and migrate
// Parse as legacy format (single credential per gateway) and migrate
// Supports both v1.0 and v2.0 legacy formats
var legacyStore struct {
Gateways map[string]*Credentials `json:"gateways"`
Version string `json:"version"`
@ -96,8 +97,8 @@ func LoadEnhancedCredentials() (*EnhancedCredentialStore, error) {
return nil, fmt.Errorf("invalid credentials file format: %w", err)
}
if legacyStore.Version != "2.0" {
return nil, fmt.Errorf("unsupported credentials version %q; expected \"2.0\"", legacyStore.Version)
if legacyStore.Version != "1.0" && legacyStore.Version != "2.0" {
return nil, fmt.Errorf("unsupported credentials version %q; expected \"1.0\" or \"2.0\"", legacyStore.Version)
}
// Convert legacy format to enhanced format

View File

@ -16,20 +16,22 @@ import (
// PerformSimpleAuthentication performs a simple authentication flow where the user
// provides a wallet address and receives an API key without signature verification
func PerformSimpleAuthentication(gatewayURL string) (*Credentials, error) {
func PerformSimpleAuthentication(gatewayURL, wallet, namespace string) (*Credentials, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Println("\n🔐 Simple Wallet Authentication")
fmt.Println("================================")
// Read wallet address
fmt.Print("Enter your wallet address (0x...): ")
walletInput, err := reader.ReadString('\n')
if err != nil {
return nil, fmt.Errorf("failed to read wallet address: %w", err)
// Read wallet address (skip prompt if provided via flag)
if wallet == "" {
fmt.Print("Enter your wallet address (0x...): ")
walletInput, err := reader.ReadString('\n')
if err != nil {
return nil, fmt.Errorf("failed to read wallet address: %w", err)
}
wallet = strings.TrimSpace(walletInput)
}
wallet := strings.TrimSpace(walletInput)
if wallet == "" {
return nil, fmt.Errorf("wallet address cannot be empty")
}
@ -43,16 +45,21 @@ func PerformSimpleAuthentication(gatewayURL string) (*Credentials, error) {
return nil, fmt.Errorf("invalid wallet address format")
}
// Read namespace (optional)
fmt.Print("Enter namespace (press Enter for 'default'): ")
nsInput, err := reader.ReadString('\n')
if err != nil {
return nil, fmt.Errorf("failed to read namespace: %w", err)
}
namespace := strings.TrimSpace(nsInput)
// Read namespace (skip prompt if provided via flag)
if namespace == "" {
namespace = "default"
for {
fmt.Print("Enter namespace (required): ")
nsInput, err := reader.ReadString('\n')
if err != nil {
return nil, fmt.Errorf("failed to read namespace: %w", err)
}
namespace = strings.TrimSpace(nsInput)
if namespace != "" {
break
}
fmt.Println("⚠️ Namespace cannot be empty. Please enter a namespace.")
}
}
fmt.Printf("\n✅ Wallet: %s\n", wallet)
@ -65,13 +72,20 @@ func PerformSimpleAuthentication(gatewayURL string) (*Credentials, error) {
return nil, fmt.Errorf("failed to request API key: %w", err)
}
// Build namespace gateway URL from the gateway URL
namespaceURL := ""
if domain := extractDomainFromURL(gatewayURL); domain != "" {
namespaceURL = fmt.Sprintf("https://ns-%s.%s", namespace, domain)
}
// Create credentials
creds := &Credentials{
APIKey: apiKey,
Namespace: namespace,
UserID: wallet,
Wallet: wallet,
IssuedAt: time.Now(),
APIKey: apiKey,
Namespace: namespace,
UserID: wallet,
Wallet: wallet,
IssuedAt: time.Now(),
NamespaceURL: namespaceURL,
}
fmt.Printf("\n🎉 Authentication successful!\n")
@ -81,6 +95,7 @@ func PerformSimpleAuthentication(gatewayURL string) (*Credentials, error) {
}
// requestAPIKeyFromGateway calls the gateway's simple-key endpoint to generate an API key
// For non-default namespaces, this may trigger cluster provisioning and require polling
func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, error) {
reqBody := map[string]string{
"wallet": wallet,
@ -95,7 +110,7 @@ func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, err
endpoint := gatewayURL + "/v1/auth/simple-key"
// Extract domain from URL for TLS configuration
// This uses tlsutil which handles Let's Encrypt staging certificates for *.debros.network
// This uses tlsutil which handles Let's Encrypt staging certificates for *.orama.network
domain := extractDomainFromURL(gatewayURL)
client := tlsutil.NewHTTPClientForDomain(30*time.Second, domain)
@ -105,6 +120,170 @@ func requestAPIKeyFromGateway(gatewayURL, wallet, namespace string) (string, err
}
defer resp.Body.Close()
// Handle 202 Accepted - namespace cluster is being provisioned
if resp.StatusCode == http.StatusAccepted {
return handleProvisioningResponse(gatewayURL, client, resp, wallet, namespace)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("gateway returned status %d: %s", resp.StatusCode, string(body))
}
var respBody map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&respBody); err != nil {
return "", fmt.Errorf("failed to decode response: %w", err)
}
apiKey, ok := respBody["api_key"].(string)
if !ok || apiKey == "" {
return "", fmt.Errorf("no api_key in response")
}
return apiKey, nil
}
// handleProvisioningResponse handles 202 Accepted responses when namespace cluster provisioning is needed
func handleProvisioningResponse(gatewayURL string, client *http.Client, resp *http.Response, wallet, namespace string) (string, error) {
var provResp map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&provResp); err != nil {
return "", fmt.Errorf("failed to decode provisioning response: %w", err)
}
status, _ := provResp["status"].(string)
pollURL, _ := provResp["poll_url"].(string)
clusterID, _ := provResp["cluster_id"].(string)
message, _ := provResp["message"].(string)
if status != "provisioning" {
return "", fmt.Errorf("unexpected status: %s", status)
}
fmt.Printf("\n🏗 Provisioning namespace cluster...\n")
if message != "" {
fmt.Printf(" %s\n", message)
}
if clusterID != "" {
fmt.Printf(" Cluster ID: %s\n", clusterID)
}
fmt.Println()
// Poll until cluster is ready
if err := pollProvisioningStatus(gatewayURL, client, pollURL); err != nil {
return "", err
}
// Cluster is ready, retry the API key request
fmt.Println("\n✅ Namespace cluster ready!")
fmt.Println("⏳ Retrieving API key...")
return retryAPIKeyRequest(gatewayURL, client, wallet, namespace)
}
// pollProvisioningStatus polls the status endpoint until the cluster is ready
func pollProvisioningStatus(gatewayURL string, client *http.Client, pollURL string) error {
// Build full poll URL if it's a relative path
if strings.HasPrefix(pollURL, "/") {
pollURL = gatewayURL + pollURL
}
maxAttempts := 120 // 10 minutes (5 seconds per poll)
pollInterval := 5 * time.Second
spinnerChars := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
spinnerIdx := 0
for i := 0; i < maxAttempts; i++ {
// Show progress spinner
fmt.Printf("\r%s Waiting for cluster... ", spinnerChars[spinnerIdx%len(spinnerChars)])
spinnerIdx++
resp, err := client.Get(pollURL)
if err != nil {
time.Sleep(pollInterval)
continue
}
var statusResp map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&statusResp); err != nil {
resp.Body.Close()
time.Sleep(pollInterval)
continue
}
resp.Body.Close()
status, _ := statusResp["status"].(string)
switch status {
case "ready":
fmt.Printf("\r✅ Cluster ready! \n")
return nil
case "failed":
errMsg, _ := statusResp["error"].(string)
fmt.Printf("\r❌ Provisioning failed \n")
return fmt.Errorf("cluster provisioning failed: %s", errMsg)
case "provisioning":
// Show progress details
rqliteReady, _ := statusResp["rqlite_ready"].(bool)
olricReady, _ := statusResp["olric_ready"].(bool)
gatewayReady, _ := statusResp["gateway_ready"].(bool)
dnsReady, _ := statusResp["dns_ready"].(bool)
progressStr := ""
if rqliteReady {
progressStr += "RQLite✓ "
}
if olricReady {
progressStr += "Olric✓ "
}
if gatewayReady {
progressStr += "Gateway✓ "
}
if dnsReady {
progressStr += "DNS✓"
}
if progressStr != "" {
fmt.Printf("\r%s Provisioning... [%s]", spinnerChars[spinnerIdx%len(spinnerChars)], progressStr)
}
default:
// Unknown status, continue polling
}
time.Sleep(pollInterval)
}
fmt.Printf("\r⚠ Timeout waiting for cluster \n")
return fmt.Errorf("timeout waiting for namespace cluster provisioning")
}
// retryAPIKeyRequest retries the API key request after cluster provisioning
func retryAPIKeyRequest(gatewayURL string, client *http.Client, wallet, namespace string) (string, error) {
reqBody := map[string]string{
"wallet": wallet,
"namespace": namespace,
}
payload, err := json.Marshal(reqBody)
if err != nil {
return "", fmt.Errorf("failed to marshal request: %w", err)
}
endpoint := gatewayURL + "/v1/auth/simple-key"
resp, err := client.Post(endpoint, "application/json", bytes.NewReader(payload))
if err != nil {
return "", fmt.Errorf("failed to call gateway: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusAccepted {
// Still provisioning? This shouldn't happen but handle gracefully
return "", fmt.Errorf("cluster still provisioning, please try again")
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("gateway returned status %d: %s", resp.StatusCode, string(body))

View File

@ -179,11 +179,11 @@ func (cm *CertificateManager) generateNodeCertificate(hostname string, caCertPEM
DNSNames: []string{hostname},
}
// Add wildcard support if hostname contains *.debros.network
if hostname == "*.debros.network" {
template.DNSNames = []string{"*.debros.network", "debros.network"}
} else if hostname == "debros.network" {
template.DNSNames = []string{"*.debros.network", "debros.network"}
// Add wildcard support if hostname contains *.orama.network
if hostname == "*.orama.network" {
template.DNSNames = []string{"*.orama.network", "orama.network"}
} else if hostname == "orama.network" {
template.DNSNames = []string{"*.orama.network", "orama.network"}
}
// Try to parse as IP address for IP-based certificates
@ -254,4 +254,3 @@ func (cm *CertificateManager) parseCACertificate(caCertPEM, caKeyPEM []byte) (*x
func LoadTLSCertificate(certPEM, keyPEM []byte) (tls.Certificate, error) {
return tls.X509KeyPair(certPEM, keyPEM)
}

View File

@ -2,6 +2,7 @@ package cli
import (
"bufio"
"flag"
"fmt"
"os"
"strings"
@ -19,13 +20,22 @@ func HandleAuthCommand(args []string) {
subcommand := args[0]
switch subcommand {
case "login":
handleAuthLogin()
var wallet, namespace string
fs := flag.NewFlagSet("auth login", flag.ExitOnError)
fs.StringVar(&wallet, "wallet", "", "Wallet address (0x...)")
fs.StringVar(&namespace, "namespace", "", "Namespace name")
_ = fs.Parse(args[1:])
handleAuthLogin(wallet, namespace)
case "logout":
handleAuthLogout()
case "whoami":
handleAuthWhoami()
case "status":
handleAuthStatus()
case "list":
handleAuthList()
case "switch":
handleAuthSwitch()
default:
fmt.Fprintf(os.Stderr, "Unknown auth command: %s\n", subcommand)
showAuthHelp()
@ -35,42 +45,107 @@ func HandleAuthCommand(args []string) {
func showAuthHelp() {
fmt.Printf("🔐 Authentication Commands\n\n")
fmt.Printf("Usage: dbn auth <subcommand>\n\n")
fmt.Printf("Usage: orama auth <subcommand>\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" login - Authenticate by providing your wallet address\n")
fmt.Printf(" logout - Clear stored credentials\n")
fmt.Printf(" whoami - Show current authentication status\n")
fmt.Printf(" status - Show detailed authentication info\n\n")
fmt.Printf(" status - Show detailed authentication info\n")
fmt.Printf(" list - List all stored credentials for current environment\n")
fmt.Printf(" switch - Switch between stored credentials\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" dbn auth login # Enter wallet address interactively\n")
fmt.Printf(" dbn auth whoami # Check who you're logged in as\n")
fmt.Printf(" dbn auth status # View detailed authentication info\n")
fmt.Printf(" dbn auth logout # Clear all stored credentials\n\n")
fmt.Printf(" orama auth login # Enter wallet address interactively\n")
fmt.Printf(" orama auth login --wallet 0x... --namespace myns # Non-interactive\n")
fmt.Printf(" orama auth whoami # Check who you're logged in as\n")
fmt.Printf(" orama auth status # View detailed authentication info\n")
fmt.Printf(" orama auth logout # Clear all stored credentials\n\n")
fmt.Printf("Environment Variables:\n")
fmt.Printf(" DEBROS_GATEWAY_URL - Gateway URL (overrides environment config)\n\n")
fmt.Printf("Authentication Flow:\n")
fmt.Printf(" 1. Run 'dbn auth login'\n")
fmt.Printf(" 1. Run 'orama auth login'\n")
fmt.Printf(" 2. Enter your wallet address when prompted\n")
fmt.Printf(" 3. Enter your namespace (or press Enter for 'default')\n")
fmt.Printf(" 4. An API key will be generated and saved to ~/.orama/credentials.json\n\n")
fmt.Printf("Note: Authentication uses the currently active environment.\n")
fmt.Printf(" Use 'dbn env current' to see your active environment.\n")
fmt.Printf(" Use 'orama env current' to see your active environment.\n")
}
func handleAuthLogin() {
// Prompt for node selection
gatewayURL := promptForGatewayURL()
fmt.Printf("🔐 Authenticating with gateway at: %s\n", gatewayURL)
func handleAuthLogin(wallet, namespace string) {
// Get gateway URL from active environment
gatewayURL := getGatewayURL()
// Use the simple authentication flow
creds, err := auth.PerformSimpleAuthentication(gatewayURL)
// Show active environment
env, err := GetActiveEnvironment()
if err == nil {
fmt.Printf("🌍 Environment: %s\n", env.Name)
}
fmt.Printf("🔐 Authenticating with gateway at: %s\n\n", gatewayURL)
// Load enhanced credential store
store, err := auth.LoadEnhancedCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to load credentials: %v\n", err)
os.Exit(1)
}
// Check if we already have credentials for this gateway
gwCreds := store.Gateways[gatewayURL]
if gwCreds != nil && len(gwCreds.Credentials) > 0 {
// Show existing credentials and offer choice
choice, credIndex, err := store.DisplayCredentialMenu(gatewayURL)
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Menu selection failed: %v\n", err)
os.Exit(1)
}
switch choice {
case auth.AuthChoiceUseCredential:
selectedCreds := gwCreds.Credentials[credIndex]
store.SetDefaultCredential(gatewayURL, credIndex)
selectedCreds.UpdateLastUsed()
if err := store.Save(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to save credentials: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Switched to wallet: %s\n", selectedCreds.Wallet)
fmt.Printf("🏢 Namespace: %s\n", selectedCreds.Namespace)
return
case auth.AuthChoiceLogout:
store.ClearAllCredentials()
if err := store.Save(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to clear credentials: %v\n", err)
os.Exit(1)
}
fmt.Println("✅ All credentials cleared")
return
case auth.AuthChoiceExit:
fmt.Println("Exiting...")
return
case auth.AuthChoiceAddCredential:
// Fall through to add new credential
}
}
// Perform simple authentication to add a new credential
creds, err := auth.PerformSimpleAuthentication(gatewayURL, wallet, namespace)
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Authentication failed: %v\n", err)
os.Exit(1)
}
// Save credentials to file
if err := auth.SaveCredentialsForDefaultGateway(creds); err != nil {
// Add to enhanced store
store.AddCredential(gatewayURL, creds)
// Set as default
gwCreds = store.Gateways[gatewayURL]
if gwCreds != nil {
store.SetDefaultCredential(gatewayURL, len(gwCreds.Credentials)-1)
}
if err := store.Save(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to save credentials: %v\n", err)
os.Exit(1)
}
@ -81,6 +156,9 @@ func handleAuthLogin() {
fmt.Printf("🎯 Wallet: %s\n", creds.Wallet)
fmt.Printf("🏢 Namespace: %s\n", creds.Namespace)
fmt.Printf("🔑 API Key: %s\n", creds.APIKey)
if creds.NamespaceURL != "" {
fmt.Printf("🌐 Namespace URL: %s\n", creds.NamespaceURL)
}
}
func handleAuthLogout() {
@ -92,23 +170,26 @@ func handleAuthLogout() {
}
func handleAuthWhoami() {
store, err := auth.LoadCredentials()
store, err := auth.LoadEnhancedCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to load credentials: %v\n", err)
os.Exit(1)
}
gatewayURL := getGatewayURL()
creds, exists := store.GetCredentialsForGateway(gatewayURL)
creds := store.GetDefaultCredential(gatewayURL)
if !exists || !creds.IsValid() {
fmt.Println("❌ Not authenticated - run 'dbn auth login' to authenticate")
if creds == nil || !creds.IsValid() {
fmt.Println("❌ Not authenticated - run 'orama auth login' to authenticate")
os.Exit(1)
}
fmt.Println("✅ Authenticated")
fmt.Printf(" Wallet: %s\n", creds.Wallet)
fmt.Printf(" Namespace: %s\n", creds.Namespace)
if creds.NamespaceURL != "" {
fmt.Printf(" NS Gateway: %s\n", creds.NamespaceURL)
}
fmt.Printf(" Issued At: %s\n", creds.IssuedAt.Format("2006-01-02 15:04:05"))
if !creds.ExpiresAt.IsZero() {
fmt.Printf(" Expires At: %s\n", creds.ExpiresAt.Format("2006-01-02 15:04:05"))
@ -122,14 +203,14 @@ func handleAuthWhoami() {
}
func handleAuthStatus() {
store, err := auth.LoadCredentials()
store, err := auth.LoadEnhancedCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to load credentials: %v\n", err)
os.Exit(1)
}
gatewayURL := getGatewayURL()
creds, exists := store.GetCredentialsForGateway(gatewayURL)
creds := store.GetDefaultCredential(gatewayURL)
// Show active environment
env, err := GetActiveEnvironment()
@ -140,7 +221,7 @@ func handleAuthStatus() {
fmt.Println("🔐 Authentication Status")
fmt.Printf(" Gateway URL: %s\n", gatewayURL)
if !exists || creds == nil {
if creds == nil {
fmt.Println(" Status: ❌ Not authenticated")
return
}
@ -156,6 +237,9 @@ func handleAuthStatus() {
fmt.Println(" Status: ✅ Authenticated")
fmt.Printf(" Wallet: %s\n", creds.Wallet)
fmt.Printf(" Namespace: %s\n", creds.Namespace)
if creds.NamespaceURL != "" {
fmt.Printf(" NS Gateway: %s\n", creds.NamespaceURL)
}
if !creds.ExpiresAt.IsZero() {
fmt.Printf(" Expires: %s\n", creds.ExpiresAt.Format("2006-01-02 15:04:05"))
}
@ -192,7 +276,7 @@ func promptForGatewayURL() string {
return "http://localhost:6001"
}
fmt.Print("Enter node domain (e.g., node-hk19de.debros.network): ")
fmt.Print("Enter node domain (e.g., node-hk19de.orama.network): ")
domain, _ := reader.ReadString('\n')
domain = strings.TrimSpace(domain)
@ -228,3 +312,108 @@ func getGatewayURL() string {
// Fallback to default (node-1)
return "http://localhost:6001"
}
func handleAuthList() {
store, err := auth.LoadEnhancedCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to load credentials: %v\n", err)
os.Exit(1)
}
gatewayURL := getGatewayURL()
// Show active environment
env, err := GetActiveEnvironment()
if err == nil {
fmt.Printf("🌍 Environment: %s\n", env.Name)
}
fmt.Printf("🔗 Gateway: %s\n\n", gatewayURL)
gwCreds := store.Gateways[gatewayURL]
if gwCreds == nil || len(gwCreds.Credentials) == 0 {
fmt.Println("No credentials stored for this environment.")
fmt.Println("Run 'orama auth login' to authenticate.")
return
}
fmt.Printf("🔐 Stored Credentials (%d):\n\n", len(gwCreds.Credentials))
for i, creds := range gwCreds.Credentials {
defaultMark := ""
if i == gwCreds.DefaultIndex {
defaultMark = " ← active"
}
statusEmoji := "✅"
statusText := "valid"
if !creds.IsValid() {
statusEmoji = "❌"
statusText = "expired"
}
fmt.Printf(" %d. %s Wallet: %s%s\n", i+1, statusEmoji, creds.Wallet, defaultMark)
fmt.Printf(" Namespace: %s | Status: %s\n", creds.Namespace, statusText)
if creds.Plan != "" {
fmt.Printf(" Plan: %s\n", creds.Plan)
}
if !creds.IssuedAt.IsZero() {
fmt.Printf(" Issued: %s\n", creds.IssuedAt.Format("2006-01-02 15:04:05"))
}
fmt.Println()
}
}
func handleAuthSwitch() {
store, err := auth.LoadEnhancedCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to load credentials: %v\n", err)
os.Exit(1)
}
gatewayURL := getGatewayURL()
gwCreds := store.Gateways[gatewayURL]
if gwCreds == nil || len(gwCreds.Credentials) == 0 {
fmt.Println("No credentials stored for this environment.")
fmt.Println("Run 'orama auth login' to authenticate first.")
os.Exit(1)
}
if len(gwCreds.Credentials) == 1 {
fmt.Println("Only one credential stored. Nothing to switch to.")
return
}
// Display menu
choice, credIndex, err := store.DisplayCredentialMenu(gatewayURL)
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Menu selection failed: %v\n", err)
os.Exit(1)
}
switch choice {
case auth.AuthChoiceUseCredential:
selectedCreds := gwCreds.Credentials[credIndex]
store.SetDefaultCredential(gatewayURL, credIndex)
selectedCreds.UpdateLastUsed()
if err := store.Save(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to save credentials: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Switched to wallet: %s\n", selectedCreds.Wallet)
fmt.Printf("🏢 Namespace: %s\n", selectedCreds.Namespace)
case auth.AuthChoiceAddCredential:
fmt.Println("Use 'orama auth login' to add a new credential.")
case auth.AuthChoiceLogout:
store.ClearAllCredentials()
if err := store.Save(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to clear credentials: %v\n", err)
os.Exit(1)
}
fmt.Println("✅ All credentials cleared")
case auth.AuthChoiceExit:
fmt.Println("Cancelled.")
}
}

View File

@ -158,7 +158,7 @@ func HandlePeerIDCommand(format string, timeout time.Duration) {
// HandlePubSubCommand handles pubsub commands
func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: dbn pubsub <publish|subscribe|topics> [args...]\n")
fmt.Fprintf(os.Stderr, "Usage: orama pubsub <publish|subscribe|topics> [args...]\n")
os.Exit(1)
}
@ -179,7 +179,7 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
switch subcommand {
case "publish":
if len(args) < 3 {
fmt.Fprintf(os.Stderr, "Usage: dbn pubsub publish <topic> <message>\n")
fmt.Fprintf(os.Stderr, "Usage: orama pubsub publish <topic> <message>\n")
os.Exit(1)
}
err := cli.PubSub().Publish(ctx, args[1], []byte(args[2]))
@ -191,7 +191,7 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
case "subscribe":
if len(args) < 2 {
fmt.Fprintf(os.Stderr, "Usage: dbn pubsub subscribe <topic> [duration]\n")
fmt.Fprintf(os.Stderr, "Usage: orama pubsub subscribe <topic> [duration]\n")
os.Exit(1)
}
duration := 30 * time.Second
@ -243,7 +243,7 @@ func HandlePubSubCommand(args []string, format string, timeout time.Duration) {
// Helper functions
func createClient() (client.NetworkClient, error) {
config := client.DefaultClientConfig("dbn")
config := client.DefaultClientConfig("orama")
// Use active environment's gateway URL
gatewayURL := getGatewayURL()

481
pkg/cli/db/commands.go Normal file
View File

@ -0,0 +1,481 @@
package db
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"text/tabwriter"
"time"
"github.com/DeBrosOfficial/network/pkg/auth"
"github.com/spf13/cobra"
)
// DBCmd is the root database command
var DBCmd = &cobra.Command{
Use: "db",
Short: "Manage SQLite databases",
Long: "Create and manage per-namespace SQLite databases",
}
// CreateCmd creates a new database
var CreateCmd = &cobra.Command{
Use: "create <database_name>",
Short: "Create a new SQLite database",
Args: cobra.ExactArgs(1),
RunE: createDatabase,
}
// QueryCmd executes a SQL query
var QueryCmd = &cobra.Command{
Use: "query <database_name> <sql>",
Short: "Execute a SQL query",
Args: cobra.ExactArgs(2),
RunE: queryDatabase,
}
// ListCmd lists all databases
var ListCmd = &cobra.Command{
Use: "list",
Short: "List all databases",
RunE: listDatabases,
}
// BackupCmd backs up a database to IPFS
var BackupCmd = &cobra.Command{
Use: "backup <database_name>",
Short: "Backup database to IPFS",
Args: cobra.ExactArgs(1),
RunE: backupDatabase,
}
// BackupsCmd lists backups for a database
var BackupsCmd = &cobra.Command{
Use: "backups <database_name>",
Short: "List backups for a database",
Args: cobra.ExactArgs(1),
RunE: listBackups,
}
func init() {
DBCmd.AddCommand(CreateCmd)
DBCmd.AddCommand(QueryCmd)
DBCmd.AddCommand(ListCmd)
DBCmd.AddCommand(BackupCmd)
DBCmd.AddCommand(BackupsCmd)
}
func createDatabase(cmd *cobra.Command, args []string) error {
dbName := args[0]
apiURL := getAPIURL()
url := apiURL + "/v1/db/sqlite/create"
payload := map[string]string{
"database_name": dbName,
}
jsonData, err := json.Marshal(payload)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusCreated {
return fmt.Errorf("failed to create database: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
fmt.Printf("✅ Database created successfully!\n\n")
fmt.Printf("Name: %s\n", result["database_name"])
fmt.Printf("Home Node: %s\n", result["home_node_id"])
fmt.Printf("Created: %s\n", result["created_at"])
return nil
}
func queryDatabase(cmd *cobra.Command, args []string) error {
dbName := args[0]
sql := args[1]
apiURL := getAPIURL()
url := apiURL + "/v1/db/sqlite/query"
payload := map[string]interface{}{
"database_name": dbName,
"query": sql,
}
jsonData, err := json.Marshal(payload)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("query failed: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
// Print results
if rows, ok := result["rows"].([]interface{}); ok && len(rows) > 0 {
// Print as table
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
// Print headers
firstRow := rows[0].(map[string]interface{})
for col := range firstRow {
fmt.Fprintf(w, "%s\t", col)
}
fmt.Fprintln(w)
// Print rows
for _, row := range rows {
r := row.(map[string]interface{})
for _, val := range r {
fmt.Fprintf(w, "%v\t", val)
}
fmt.Fprintln(w)
}
w.Flush()
fmt.Printf("\nRows returned: %d\n", len(rows))
} else if rowsAffected, ok := result["rows_affected"].(float64); ok {
fmt.Printf("✅ Query executed successfully\n")
fmt.Printf("Rows affected: %d\n", int(rowsAffected))
}
return nil
}
func listDatabases(cmd *cobra.Command, args []string) error {
apiURL := getAPIURL()
url := apiURL + "/v1/db/sqlite/list"
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to list databases: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
databases, ok := result["databases"].([]interface{})
if !ok || len(databases) == 0 {
fmt.Println("No databases found")
return nil
}
// Print table
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
fmt.Fprintln(w, "NAME\tSIZE\tBACKUP CID\tCREATED")
for _, db := range databases {
d := db.(map[string]interface{})
size := "0 B"
if sizeBytes, ok := d["size_bytes"].(float64); ok {
size = formatBytes(int64(sizeBytes))
}
backupCID := "-"
if cid, ok := d["backup_cid"].(string); ok && cid != "" {
if len(cid) > 12 {
backupCID = cid[:12] + "..."
} else {
backupCID = cid
}
}
createdAt := ""
if created, ok := d["created_at"].(string); ok {
if t, err := time.Parse(time.RFC3339, created); err == nil {
createdAt = t.Format("2006-01-02 15:04")
}
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n",
d["database_name"],
size,
backupCID,
createdAt,
)
}
w.Flush()
fmt.Printf("\nTotal: %v\n", result["total"])
return nil
}
func backupDatabase(cmd *cobra.Command, args []string) error {
dbName := args[0]
fmt.Printf("📦 Backing up database '%s' to IPFS...\n", dbName)
apiURL := getAPIURL()
url := apiURL + "/v1/db/sqlite/backup"
payload := map[string]string{
"database_name": dbName,
}
jsonData, err := json.Marshal(payload)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("backup failed: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
fmt.Printf("\n✅ Backup successful!\n\n")
fmt.Printf("Database: %s\n", result["database_name"])
fmt.Printf("Backup CID: %s\n", result["backup_cid"])
fmt.Printf("IPFS URL: %s\n", result["ipfs_url"])
fmt.Printf("Backed up: %s\n", result["backed_up_at"])
return nil
}
func listBackups(cmd *cobra.Command, args []string) error {
dbName := args[0]
apiURL := getAPIURL()
url := fmt.Sprintf("%s/v1/db/sqlite/backups?database_name=%s", apiURL, dbName)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to list backups: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
backups, ok := result["backups"].([]interface{})
if !ok || len(backups) == 0 {
fmt.Println("No backups found")
return nil
}
// Print table
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
fmt.Fprintln(w, "CID\tSIZE\tBACKED UP")
for _, backup := range backups {
b := backup.(map[string]interface{})
cid := b["backup_cid"].(string)
if len(cid) > 20 {
cid = cid[:20] + "..."
}
size := "0 B"
if sizeBytes, ok := b["size_bytes"].(float64); ok {
size = formatBytes(int64(sizeBytes))
}
backedUpAt := ""
if backed, ok := b["backed_up_at"].(string); ok {
if t, err := time.Parse(time.RFC3339, backed); err == nil {
backedUpAt = t.Format("2006-01-02 15:04")
}
}
fmt.Fprintf(w, "%s\t%s\t%s\n", cid, size, backedUpAt)
}
w.Flush()
fmt.Printf("\nTotal: %v\n", result["total"])
return nil
}
func getAPIURL() string {
if url := os.Getenv("ORAMA_API_URL"); url != "" {
return url
}
return auth.GetDefaultGatewayURL()
}
func getAuthToken() (string, error) {
if token := os.Getenv("ORAMA_TOKEN"); token != "" {
return token, nil
}
// Try to get from enhanced credentials store
store, err := auth.LoadEnhancedCredentials()
if err != nil {
return "", fmt.Errorf("failed to load credentials: %w", err)
}
gatewayURL := auth.GetDefaultGatewayURL()
creds := store.GetDefaultCredential(gatewayURL)
if creds == nil {
return "", fmt.Errorf("no credentials found for %s. Run 'orama auth login' to authenticate", gatewayURL)
}
if !creds.IsValid() {
return "", fmt.Errorf("credentials expired for %s. Run 'orama auth login' to re-authenticate", gatewayURL)
}
return creds.APIKey, nil
}
func formatBytes(bytes int64) string {
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := int64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
}

View File

@ -0,0 +1,55 @@
package cli
import (
"fmt"
"os"
"github.com/DeBrosOfficial/network/pkg/cli/db"
"github.com/DeBrosOfficial/network/pkg/cli/deployments"
)
// HandleDeployCommand handles deploy commands
func HandleDeployCommand(args []string) {
deployCmd := deployments.DeployCmd
deployCmd.SetArgs(args)
if err := deployCmd.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
// HandleDeploymentsCommand handles deployments management commands
func HandleDeploymentsCommand(args []string) {
// Create root command for deployments management
deploymentsCmd := deployments.DeployCmd
deploymentsCmd.Use = "deployments"
deploymentsCmd.Short = "Manage deployments"
deploymentsCmd.Long = "List, get, delete, rollback, and view logs for deployments"
// Add management subcommands
deploymentsCmd.AddCommand(deployments.ListCmd)
deploymentsCmd.AddCommand(deployments.GetCmd)
deploymentsCmd.AddCommand(deployments.DeleteCmd)
deploymentsCmd.AddCommand(deployments.RollbackCmd)
deploymentsCmd.AddCommand(deployments.LogsCmd)
deploymentsCmd.AddCommand(deployments.StatsCmd)
deploymentsCmd.SetArgs(args)
if err := deploymentsCmd.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
// HandleDBCommand handles database commands
func HandleDBCommand(args []string) {
dbCmd := db.DBCmd
dbCmd.SetArgs(args)
if err := dbCmd.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}

View File

@ -0,0 +1,638 @@
package deployments
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/DeBrosOfficial/network/pkg/auth"
"github.com/spf13/cobra"
)
// DeployCmd is the root deploy command
var DeployCmd = &cobra.Command{
Use: "deploy",
Short: "Deploy applications",
Long: "Deploy static sites, Next.js apps, Go backends, and Node.js backends",
}
// DeployStaticCmd deploys a static site
var DeployStaticCmd = &cobra.Command{
Use: "static <source_path>",
Short: "Deploy a static site (React, Vue, etc.)",
Args: cobra.ExactArgs(1),
RunE: deployStatic,
}
// DeployNextJSCmd deploys a Next.js application
var DeployNextJSCmd = &cobra.Command{
Use: "nextjs <source_path>",
Short: "Deploy a Next.js application",
Args: cobra.ExactArgs(1),
RunE: deployNextJS,
}
// DeployGoCmd deploys a Go backend
var DeployGoCmd = &cobra.Command{
Use: "go <source_path>",
Short: "Deploy a Go backend",
Args: cobra.ExactArgs(1),
RunE: deployGo,
}
// DeployNodeJSCmd deploys a Node.js backend
var DeployNodeJSCmd = &cobra.Command{
Use: "nodejs <source_path>",
Short: "Deploy a Node.js backend",
Args: cobra.ExactArgs(1),
RunE: deployNodeJS,
}
var (
deployName string
deploySubdomain string
deploySSR bool
deployUpdate bool
)
func init() {
DeployStaticCmd.Flags().StringVar(&deployName, "name", "", "Deployment name (required)")
DeployStaticCmd.Flags().StringVar(&deploySubdomain, "subdomain", "", "Custom subdomain")
DeployStaticCmd.Flags().BoolVar(&deployUpdate, "update", false, "Update existing deployment")
DeployStaticCmd.MarkFlagRequired("name")
DeployNextJSCmd.Flags().StringVar(&deployName, "name", "", "Deployment name (required)")
DeployNextJSCmd.Flags().StringVar(&deploySubdomain, "subdomain", "", "Custom subdomain")
DeployNextJSCmd.Flags().BoolVar(&deploySSR, "ssr", false, "Deploy with SSR (server-side rendering)")
DeployNextJSCmd.Flags().BoolVar(&deployUpdate, "update", false, "Update existing deployment")
DeployNextJSCmd.MarkFlagRequired("name")
DeployGoCmd.Flags().StringVar(&deployName, "name", "", "Deployment name (required)")
DeployGoCmd.Flags().StringVar(&deploySubdomain, "subdomain", "", "Custom subdomain")
DeployGoCmd.Flags().BoolVar(&deployUpdate, "update", false, "Update existing deployment")
DeployGoCmd.MarkFlagRequired("name")
DeployNodeJSCmd.Flags().StringVar(&deployName, "name", "", "Deployment name (required)")
DeployNodeJSCmd.Flags().StringVar(&deploySubdomain, "subdomain", "", "Custom subdomain")
DeployNodeJSCmd.Flags().BoolVar(&deployUpdate, "update", false, "Update existing deployment")
DeployNodeJSCmd.MarkFlagRequired("name")
DeployCmd.AddCommand(DeployStaticCmd)
DeployCmd.AddCommand(DeployNextJSCmd)
DeployCmd.AddCommand(DeployGoCmd)
DeployCmd.AddCommand(DeployNodeJSCmd)
}
func deployStatic(cmd *cobra.Command, args []string) error {
sourcePath := args[0]
// Warn if source looks like it needs building
if _, err := os.Stat(filepath.Join(sourcePath, "package.json")); err == nil {
if _, err := os.Stat(filepath.Join(sourcePath, "index.html")); os.IsNotExist(err) {
fmt.Printf("⚠️ Warning: %s has package.json but no index.html. You may need to build first.\n", sourcePath)
fmt.Printf(" Try: cd %s && npm run build, then deploy the output directory (e.g. dist/ or out/)\n\n", sourcePath)
}
}
fmt.Printf("📦 Creating tarball from %s...\n", sourcePath)
tarball, err := createTarball(sourcePath)
if err != nil {
return fmt.Errorf("failed to create tarball: %w", err)
}
defer os.Remove(tarball)
fmt.Printf("☁️ Uploading to Orama Network...\n")
endpoint := "/v1/deployments/static/upload"
if deployUpdate {
endpoint = "/v1/deployments/static/update?name=" + deployName
}
resp, err := uploadDeployment(endpoint, tarball, map[string]string{
"name": deployName,
"subdomain": deploySubdomain,
})
if err != nil {
return err
}
fmt.Printf("\n✅ Deployment successful!\n\n")
printDeploymentInfo(resp)
return nil
}
func deployNextJS(cmd *cobra.Command, args []string) error {
sourcePath, err := filepath.Abs(args[0])
if err != nil {
return fmt.Errorf("failed to resolve path: %w", err)
}
// Verify it's a Next.js project
if _, err := os.Stat(filepath.Join(sourcePath, "package.json")); os.IsNotExist(err) {
return fmt.Errorf("no package.json found in %s", sourcePath)
}
// Step 1: Install dependencies if needed
if _, err := os.Stat(filepath.Join(sourcePath, "node_modules")); os.IsNotExist(err) {
fmt.Printf("📦 Installing dependencies...\n")
if err := runBuildCommand(sourcePath, "npm", "install"); err != nil {
return fmt.Errorf("npm install failed: %w", err)
}
}
// Step 2: Build
fmt.Printf("🔨 Building Next.js application...\n")
if err := runBuildCommand(sourcePath, "npm", "run", "build"); err != nil {
return fmt.Errorf("build failed: %w", err)
}
var tarball string
if deploySSR {
// SSR: tarball the standalone output
standalonePath := filepath.Join(sourcePath, ".next", "standalone")
if _, err := os.Stat(standalonePath); os.IsNotExist(err) {
return fmt.Errorf(".next/standalone/ not found. Ensure next.config.js has output: 'standalone'")
}
// Copy static assets into standalone
staticSrc := filepath.Join(sourcePath, ".next", "static")
staticDst := filepath.Join(standalonePath, ".next", "static")
if _, err := os.Stat(staticSrc); err == nil {
if err := copyDir(staticSrc, staticDst); err != nil {
return fmt.Errorf("failed to copy static assets: %w", err)
}
}
// Copy public directory if it exists
publicSrc := filepath.Join(sourcePath, "public")
publicDst := filepath.Join(standalonePath, "public")
if _, err := os.Stat(publicSrc); err == nil {
if err := copyDir(publicSrc, publicDst); err != nil {
return fmt.Errorf("failed to copy public directory: %w", err)
}
}
fmt.Printf("📦 Creating tarball from standalone output...\n")
tarball, err = createTarballAll(standalonePath)
} else {
// Static export: tarball the out/ directory
outPath := filepath.Join(sourcePath, "out")
if _, err := os.Stat(outPath); os.IsNotExist(err) {
return fmt.Errorf("out/ directory not found. For static export, ensure next.config.js has output: 'export'")
}
fmt.Printf("📦 Creating tarball from static export...\n")
tarball, err = createTarball(outPath)
}
if err != nil {
return fmt.Errorf("failed to create tarball: %w", err)
}
defer os.Remove(tarball)
fmt.Printf("☁️ Uploading to Orama Network...\n")
endpoint := "/v1/deployments/nextjs/upload"
if deployUpdate {
endpoint = "/v1/deployments/nextjs/update?name=" + deployName
}
resp, err := uploadDeployment(endpoint, tarball, map[string]string{
"name": deployName,
"subdomain": deploySubdomain,
"ssr": fmt.Sprintf("%t", deploySSR),
})
if err != nil {
return err
}
fmt.Printf("\n✅ Deployment successful!\n\n")
printDeploymentInfo(resp)
if deploySSR {
fmt.Printf("⚠️ Note: SSR deployment may take a minute to start. Check status with: orama deployments get %s\n", deployName)
}
return nil
}
func deployGo(cmd *cobra.Command, args []string) error {
sourcePath, err := filepath.Abs(args[0])
if err != nil {
return fmt.Errorf("failed to resolve path: %w", err)
}
// Verify it's a Go project
if _, err := os.Stat(filepath.Join(sourcePath, "go.mod")); os.IsNotExist(err) {
return fmt.Errorf("no go.mod found in %s", sourcePath)
}
// Cross-compile for Linux amd64 (production VPS target)
fmt.Printf("🔨 Building Go binary (linux/amd64)...\n")
buildCmd := exec.Command("go", "build", "-o", "app", ".")
buildCmd.Dir = sourcePath
buildCmd.Env = append(os.Environ(), "GOOS=linux", "GOARCH=amd64", "CGO_ENABLED=0")
buildCmd.Stdout = os.Stdout
buildCmd.Stderr = os.Stderr
if err := buildCmd.Run(); err != nil {
return fmt.Errorf("go build failed: %w", err)
}
defer os.Remove(filepath.Join(sourcePath, "app")) // Clean up after tarball
fmt.Printf("📦 Creating tarball...\n")
tarball, err := createTarballFiles(sourcePath, []string{"app"})
if err != nil {
return fmt.Errorf("failed to create tarball: %w", err)
}
defer os.Remove(tarball)
fmt.Printf("☁️ Uploading to Orama Network...\n")
endpoint := "/v1/deployments/go/upload"
if deployUpdate {
endpoint = "/v1/deployments/go/update?name=" + deployName
}
resp, err := uploadDeployment(endpoint, tarball, map[string]string{
"name": deployName,
"subdomain": deploySubdomain,
})
if err != nil {
return err
}
fmt.Printf("\n✅ Deployment successful!\n\n")
printDeploymentInfo(resp)
return nil
}
func deployNodeJS(cmd *cobra.Command, args []string) error {
sourcePath, err := filepath.Abs(args[0])
if err != nil {
return fmt.Errorf("failed to resolve path: %w", err)
}
// Verify it's a Node.js project
if _, err := os.Stat(filepath.Join(sourcePath, "package.json")); os.IsNotExist(err) {
return fmt.Errorf("no package.json found in %s", sourcePath)
}
// Install dependencies if needed
if _, err := os.Stat(filepath.Join(sourcePath, "node_modules")); os.IsNotExist(err) {
fmt.Printf("📦 Installing dependencies...\n")
if err := runBuildCommand(sourcePath, "npm", "install", "--production"); err != nil {
return fmt.Errorf("npm install failed: %w", err)
}
}
// Run build script if it exists
if hasBuildScript(sourcePath) {
fmt.Printf("🔨 Building...\n")
if err := runBuildCommand(sourcePath, "npm", "run", "build"); err != nil {
return fmt.Errorf("build failed: %w", err)
}
}
fmt.Printf("📦 Creating tarball...\n")
tarball, err := createTarball(sourcePath)
if err != nil {
return fmt.Errorf("failed to create tarball: %w", err)
}
defer os.Remove(tarball)
fmt.Printf("☁️ Uploading to Orama Network...\n")
endpoint := "/v1/deployments/nodejs/upload"
if deployUpdate {
endpoint = "/v1/deployments/nodejs/update?name=" + deployName
}
resp, err := uploadDeployment(endpoint, tarball, map[string]string{
"name": deployName,
"subdomain": deploySubdomain,
})
if err != nil {
return err
}
fmt.Printf("\n✅ Deployment successful!\n\n")
printDeploymentInfo(resp)
return nil
}
// runBuildCommand runs a command in the given directory with stdout/stderr streaming
func runBuildCommand(dir string, name string, args ...string) error {
cmd := exec.Command(name, args...)
cmd.Dir = dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// hasBuildScript checks if package.json has a "build" script
func hasBuildScript(dir string) bool {
data, err := os.ReadFile(filepath.Join(dir, "package.json"))
if err != nil {
return false
}
var pkg map[string]interface{}
if err := json.Unmarshal(data, &pkg); err != nil {
return false
}
scripts, ok := pkg["scripts"].(map[string]interface{})
if !ok {
return false
}
_, ok = scripts["build"]
return ok
}
// copyDir recursively copies a directory
func copyDir(src, dst string) error {
return filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relPath, err := filepath.Rel(src, path)
if err != nil {
return err
}
dstPath := filepath.Join(dst, relPath)
if info.IsDir() {
return os.MkdirAll(dstPath, info.Mode())
}
data, err := os.ReadFile(path)
if err != nil {
return err
}
return os.WriteFile(dstPath, data, info.Mode())
})
}
// createTarballFiles creates a tarball containing only specific files from a directory
func createTarballFiles(baseDir string, files []string) (string, error) {
tmpFile, err := os.CreateTemp("", "orama-deploy-*.tar.gz")
if err != nil {
return "", err
}
defer tmpFile.Close()
gzWriter := gzip.NewWriter(tmpFile)
defer gzWriter.Close()
tarWriter := tar.NewWriter(gzWriter)
defer tarWriter.Close()
for _, f := range files {
fullPath := filepath.Join(baseDir, f)
info, err := os.Stat(fullPath)
if err != nil {
return "", fmt.Errorf("file %s not found: %w", f, err)
}
header, err := tar.FileInfoHeader(info, "")
if err != nil {
return "", err
}
header.Name = f
if err := tarWriter.WriteHeader(header); err != nil {
return "", err
}
if !info.IsDir() {
file, err := os.Open(fullPath)
if err != nil {
return "", err
}
_, err = io.Copy(tarWriter, file)
file.Close()
if err != nil {
return "", err
}
}
}
return tmpFile.Name(), nil
}
func createTarball(sourcePath string) (string, error) {
return createTarballWithOptions(sourcePath, true)
}
// createTarballAll creates a tarball including node_modules and hidden dirs (for standalone output)
func createTarballAll(sourcePath string) (string, error) {
return createTarballWithOptions(sourcePath, false)
}
func createTarballWithOptions(sourcePath string, skipNodeModules bool) (string, error) {
// Create temp file
tmpFile, err := os.CreateTemp("", "orama-deploy-*.tar.gz")
if err != nil {
return "", err
}
defer tmpFile.Close()
// Create gzip writer
gzWriter := gzip.NewWriter(tmpFile)
defer gzWriter.Close()
// Create tar writer
tarWriter := tar.NewWriter(gzWriter)
defer tarWriter.Close()
// Walk directory and add files
err = filepath.Walk(sourcePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Skip hidden files and node_modules (unless disabled)
if skipNodeModules {
if strings.HasPrefix(info.Name(), ".") && info.Name() != "." {
if info.IsDir() {
return filepath.SkipDir
}
return nil
}
if info.Name() == "node_modules" {
return filepath.SkipDir
}
}
// Create tar header
header, err := tar.FileInfoHeader(info, "")
if err != nil {
return err
}
// Update header name to be relative to source
relPath, err := filepath.Rel(sourcePath, path)
if err != nil {
return err
}
header.Name = relPath
// Write header
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
// Write file content if not a directory
if !info.IsDir() {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(tarWriter, file)
return err
}
return nil
})
return tmpFile.Name(), err
}
func uploadDeployment(endpoint, tarballPath string, formData map[string]string) (map[string]interface{}, error) {
// Open tarball
file, err := os.Open(tarballPath)
if err != nil {
return nil, err
}
defer file.Close()
// Create multipart request
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
// Add form fields
for key, value := range formData {
writer.WriteField(key, value)
}
// Add file
part, err := writer.CreateFormFile("tarball", filepath.Base(tarballPath))
if err != nil {
return nil, err
}
_, err = io.Copy(part, file)
if err != nil {
return nil, err
}
writer.Close()
// Get API URL from config
apiURL := getAPIURL()
url := apiURL + endpoint
// Create request
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
// Add auth header
token, err := getAuthToken()
if err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
req.Header.Set("Authorization", "Bearer "+token)
// Send request
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Read response
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("deployment failed: %s", string(respBody))
}
// Parse response
var result map[string]interface{}
err = json.Unmarshal(respBody, &result)
if err != nil {
return nil, err
}
return result, nil
}
func printDeploymentInfo(resp map[string]interface{}) {
fmt.Printf("Name: %s\n", resp["name"])
fmt.Printf("Type: %s\n", resp["type"])
fmt.Printf("Status: %s\n", resp["status"])
fmt.Printf("Version: %v\n", resp["version"])
if contentCID, ok := resp["content_cid"]; ok && contentCID != "" {
fmt.Printf("Content CID: %s\n", contentCID)
}
if urls, ok := resp["urls"].([]interface{}); ok && len(urls) > 0 {
fmt.Printf("\nURLs:\n")
for _, url := range urls {
fmt.Printf(" • %s\n", url)
}
}
}
func getAPIURL() string {
// Check environment variable first
if url := os.Getenv("ORAMA_API_URL"); url != "" {
return url
}
// Get from active environment config
return auth.GetDefaultGatewayURL()
}
func getAuthToken() (string, error) {
// Check environment variable first
if token := os.Getenv("ORAMA_TOKEN"); token != "" {
return token, nil
}
// Try to get from enhanced credentials store
store, err := auth.LoadEnhancedCredentials()
if err != nil {
return "", fmt.Errorf("failed to load credentials: %w", err)
}
gatewayURL := auth.GetDefaultGatewayURL()
creds := store.GetDefaultCredential(gatewayURL)
if creds == nil {
return "", fmt.Errorf("no credentials found for %s. Run 'orama auth login' to authenticate", gatewayURL)
}
if !creds.IsValid() {
return "", fmt.Errorf("credentials expired for %s. Run 'orama auth login' to re-authenticate", gatewayURL)
}
return creds.APIKey, nil
}

334
pkg/cli/deployments/list.go Normal file
View File

@ -0,0 +1,334 @@
package deployments
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
)
// ListCmd lists all deployments
var ListCmd = &cobra.Command{
Use: "list",
Short: "List all deployments",
RunE: listDeployments,
}
// GetCmd gets a specific deployment
var GetCmd = &cobra.Command{
Use: "get <name>",
Short: "Get deployment details",
Args: cobra.ExactArgs(1),
RunE: getDeployment,
}
// DeleteCmd deletes a deployment
var DeleteCmd = &cobra.Command{
Use: "delete <name>",
Short: "Delete a deployment",
Args: cobra.ExactArgs(1),
RunE: deleteDeployment,
}
// RollbackCmd rolls back a deployment
var RollbackCmd = &cobra.Command{
Use: "rollback <name>",
Short: "Rollback a deployment to a previous version",
Args: cobra.ExactArgs(1),
RunE: rollbackDeployment,
}
var (
rollbackVersion int
)
func init() {
RollbackCmd.Flags().IntVar(&rollbackVersion, "version", 0, "Version to rollback to (required)")
RollbackCmd.MarkFlagRequired("version")
}
func listDeployments(cmd *cobra.Command, args []string) error {
apiURL := getAPIURL()
url := apiURL + "/v1/deployments/list"
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to list deployments: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
deployments, ok := result["deployments"].([]interface{})
if !ok || len(deployments) == 0 {
fmt.Println("No deployments found")
return nil
}
// Print table
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
fmt.Fprintln(w, "NAME\tTYPE\tSTATUS\tVERSION\tCREATED")
for _, dep := range deployments {
d := dep.(map[string]interface{})
createdAt := ""
if created, ok := d["created_at"].(string); ok {
if t, err := time.Parse(time.RFC3339, created); err == nil {
createdAt = t.Format("2006-01-02 15:04")
}
}
fmt.Fprintf(w, "%s\t%s\t%s\t%v\t%s\n",
d["name"],
d["type"],
d["status"],
d["version"],
createdAt,
)
}
w.Flush()
fmt.Printf("\nTotal: %v\n", result["total"])
return nil
}
func getDeployment(cmd *cobra.Command, args []string) error {
name := args[0]
apiURL := getAPIURL()
url := fmt.Sprintf("%s/v1/deployments/get?name=%s", apiURL, name)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to get deployment: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
// Print deployment info
fmt.Printf("Deployment: %s\n\n", result["name"])
fmt.Printf("ID: %s\n", result["id"])
fmt.Printf("Type: %s\n", result["type"])
fmt.Printf("Status: %s\n", result["status"])
fmt.Printf("Version: %v\n", result["version"])
fmt.Printf("Namespace: %s\n", result["namespace"])
if contentCID, ok := result["content_cid"]; ok && contentCID != "" {
fmt.Printf("Content CID: %s\n", contentCID)
}
if buildCID, ok := result["build_cid"]; ok && buildCID != "" {
fmt.Printf("Build CID: %s\n", buildCID)
}
if port, ok := result["port"]; ok && port != nil && port.(float64) > 0 {
fmt.Printf("Port: %v\n", port)
}
if homeNodeID, ok := result["home_node_id"]; ok && homeNodeID != "" {
fmt.Printf("Home Node: %s\n", homeNodeID)
}
if subdomain, ok := result["subdomain"]; ok && subdomain != "" {
fmt.Printf("Subdomain: %s\n", subdomain)
}
fmt.Printf("Memory Limit: %v MB\n", result["memory_limit_mb"])
fmt.Printf("CPU Limit: %v%%\n", result["cpu_limit_percent"])
fmt.Printf("Restart Policy: %s\n", result["restart_policy"])
if urls, ok := result["urls"].([]interface{}); ok && len(urls) > 0 {
fmt.Printf("\nURLs:\n")
for _, url := range urls {
fmt.Printf(" • %s\n", url)
}
}
if createdAt, ok := result["created_at"].(string); ok {
fmt.Printf("\nCreated: %s\n", createdAt)
}
if updatedAt, ok := result["updated_at"].(string); ok {
fmt.Printf("Updated: %s\n", updatedAt)
}
return nil
}
func deleteDeployment(cmd *cobra.Command, args []string) error {
name := args[0]
fmt.Printf("⚠️ Are you sure you want to delete deployment '%s'? (y/N): ", name)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Cancelled")
return nil
}
apiURL := getAPIURL()
url := fmt.Sprintf("%s/v1/deployments/delete?name=%s", apiURL, name)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to delete deployment: %s", string(body))
}
fmt.Printf("✅ Deployment '%s' deleted successfully\n", name)
return nil
}
func rollbackDeployment(cmd *cobra.Command, args []string) error {
name := args[0]
if rollbackVersion <= 0 {
return fmt.Errorf("version must be positive")
}
fmt.Printf("⚠️ Rolling back '%s' to version %d. Continue? (y/N): ", name, rollbackVersion)
var confirm string
fmt.Scanln(&confirm)
if confirm != "y" && confirm != "Y" {
fmt.Println("Cancelled")
return nil
}
apiURL := getAPIURL()
url := apiURL + "/v1/deployments/rollback?name=" + name
payload := map[string]interface{}{
"name": name,
"version": rollbackVersion,
}
jsonData, err := json.Marshal(payload)
if err != nil {
return err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("rollback failed: %s", string(body))
}
var result map[string]interface{}
err = json.Unmarshal(body, &result)
if err != nil {
return err
}
fmt.Printf("\n✅ Rollback successful!\n\n")
fmt.Printf("Deployment: %s\n", result["name"])
fmt.Printf("Current Version: %v\n", result["version"])
fmt.Printf("Rolled Back From: %v\n", result["rolled_back_from"])
fmt.Printf("Rolled Back To: %v\n", result["rolled_back_to"])
fmt.Printf("Status: %s\n", result["status"])
return nil
}

View File

@ -0,0 +1,78 @@
package deployments
import (
"bufio"
"fmt"
"io"
"net/http"
"github.com/spf13/cobra"
)
// LogsCmd streams deployment logs
var LogsCmd = &cobra.Command{
Use: "logs <name>",
Short: "Stream deployment logs",
Args: cobra.ExactArgs(1),
RunE: streamLogs,
}
var (
logsFollow bool
logsLines int
)
func init() {
LogsCmd.Flags().BoolVarP(&logsFollow, "follow", "f", false, "Follow log output")
LogsCmd.Flags().IntVarP(&logsLines, "lines", "n", 100, "Number of lines to show")
}
func streamLogs(cmd *cobra.Command, args []string) error {
name := args[0]
apiURL := getAPIURL()
url := fmt.Sprintf("%s/v1/deployments/logs?name=%s&lines=%d&follow=%t",
apiURL, name, logsLines, logsFollow)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("failed to get logs: %s", string(body))
}
// Stream logs
reader := bufio.NewReader(resp.Body)
for {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
if !logsFollow {
break
}
continue
}
return err
}
fmt.Print(line)
}
return nil
}

View File

@ -0,0 +1,116 @@
package deployments
import (
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/spf13/cobra"
)
// StatsCmd shows resource usage for a deployment
var StatsCmd = &cobra.Command{
Use: "stats <name>",
Short: "Show resource usage for a deployment",
Args: cobra.ExactArgs(1),
RunE: statsDeployment,
}
func statsDeployment(cmd *cobra.Command, args []string) error {
name := args[0]
apiURL := getAPIURL()
url := fmt.Sprintf("%s/v1/deployments/stats?name=%s", apiURL, name)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
token, err := getAuthToken()
if err != nil {
return err
}
req.Header.Set("Authorization", "Bearer "+token)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("failed to get stats: %s", string(body))
}
var stats map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil {
return fmt.Errorf("failed to parse stats: %w", err)
}
// Display
fmt.Println()
fmt.Printf(" Name: %s\n", stats["name"])
fmt.Printf(" Type: %s\n", stats["type"])
fmt.Printf(" Status: %s\n", stats["status"])
if pid, ok := stats["pid"]; ok {
pidInt := int(pid.(float64))
if pidInt > 0 {
fmt.Printf(" PID: %d\n", pidInt)
}
}
if uptime, ok := stats["uptime_seconds"]; ok {
secs := uptime.(float64)
if secs > 0 {
fmt.Printf(" Uptime: %s\n", formatUptime(secs))
}
}
fmt.Println()
if cpu, ok := stats["cpu_percent"]; ok {
fmt.Printf(" CPU: %.1f%%\n", cpu.(float64))
}
if mem, ok := stats["memory_rss_mb"]; ok {
fmt.Printf(" RAM: %s\n", formatSize(mem.(float64)))
}
if disk, ok := stats["disk_mb"]; ok {
fmt.Printf(" Disk: %s\n", formatSize(disk.(float64)))
}
fmt.Println()
return nil
}
func formatUptime(seconds float64) string {
s := int(seconds)
days := s / 86400
hours := (s % 86400) / 3600
mins := (s % 3600) / 60
if days > 0 {
return fmt.Sprintf("%dd %dh %dm", days, hours, mins)
}
if hours > 0 {
return fmt.Sprintf("%dh %dm", hours, mins)
}
return fmt.Sprintf("%dm", mins)
}
func formatSize(mb float64) string {
if mb < 0.1 {
return fmt.Sprintf("%.1f KB", mb*1024)
}
if mb >= 1024 {
return fmt.Sprintf("%.1f GB", mb/1024)
}
return fmt.Sprintf("%.1f MB", mb)
}

View File

@ -158,7 +158,7 @@ func handleDevStatus(args []string) {
func handleDevLogs(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: dbn dev logs <component> [--follow]\n")
fmt.Fprintf(os.Stderr, "Usage: orama dev logs <component> [--follow]\n")
fmt.Fprintf(os.Stderr, "\nComponents: node-1, node-2, node-3, node-4, node-5, gateway, ipfs-node-1, ipfs-node-2, ipfs-node-3, ipfs-node-4, ipfs-node-5, olric, anon\n")
os.Exit(1)
}

View File

@ -24,6 +24,10 @@ func HandleEnvCommand(args []string) {
handleEnvSwitch(subargs)
case "enable":
handleEnvEnable(subargs)
case "add":
handleEnvAdd(subargs)
case "remove":
handleEnvRemove(subargs)
case "help":
showEnvHelp()
default:
@ -35,7 +39,7 @@ func HandleEnvCommand(args []string) {
func showEnvHelp() {
fmt.Printf("🌍 Environment Management Commands\n\n")
fmt.Printf("Usage: dbn env <subcommand>\n\n")
fmt.Printf("Usage: orama env <subcommand>\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" list - List all available environments\n")
fmt.Printf(" current - Show current active environment\n")
@ -43,15 +47,15 @@ func showEnvHelp() {
fmt.Printf(" enable - Alias for 'switch' (e.g., 'devnet enable')\n\n")
fmt.Printf("Available Environments:\n")
fmt.Printf(" local - Local development (http://localhost:6001)\n")
fmt.Printf(" devnet - Development network (https://devnet.orama.network)\n")
fmt.Printf(" testnet - Test network (https://testnet.orama.network)\n\n")
fmt.Printf(" devnet - Development network (https://orama-devnet.network)\n")
fmt.Printf(" testnet - Test network (https://orama-tesetnet.network)\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" dbn env list\n")
fmt.Printf(" dbn env current\n")
fmt.Printf(" dbn env switch devnet\n")
fmt.Printf(" dbn env enable testnet\n")
fmt.Printf(" dbn devnet enable # Shorthand for switch to devnet\n")
fmt.Printf(" dbn testnet enable # Shorthand for switch to testnet\n")
fmt.Printf(" orama env list\n")
fmt.Printf(" orama env current\n")
fmt.Printf(" orama env switch devnet\n")
fmt.Printf(" orama env enable testnet\n")
fmt.Printf(" orama devnet enable # Shorthand for switch to devnet\n")
fmt.Printf(" orama testnet enable # Shorthand for switch to testnet\n")
}
func handleEnvList() {
@ -99,7 +103,7 @@ func handleEnvCurrent() {
func handleEnvSwitch(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: dbn env switch <environment>\n")
fmt.Fprintf(os.Stderr, "Usage: orama env switch <environment>\n")
fmt.Fprintf(os.Stderr, "Available: local, devnet, testnet\n")
os.Exit(1)
}
@ -140,3 +144,108 @@ func handleEnvEnable(args []string) {
// 'enable' is just an alias for 'switch'
handleEnvSwitch(args)
}
func handleEnvAdd(args []string) {
if len(args) < 2 {
fmt.Fprintf(os.Stderr, "Usage: orama env add <name> <gateway_url> [description]\n")
fmt.Fprintf(os.Stderr, "Example: orama env add production http://dbrs.space \"Production network\"\n")
os.Exit(1)
}
name := args[0]
gatewayURL := args[1]
description := ""
if len(args) > 2 {
description = args[2]
}
// Initialize environments if needed
if err := InitializeEnvironments(); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to initialize environments: %v\n", err)
os.Exit(1)
}
envConfig, err := LoadEnvironmentConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to load environment config: %v\n", err)
os.Exit(1)
}
// Check if environment already exists
for _, env := range envConfig.Environments {
if env.Name == name {
fmt.Fprintf(os.Stderr, "❌ Environment '%s' already exists\n", name)
os.Exit(1)
}
}
// Add new environment
envConfig.Environments = append(envConfig.Environments, Environment{
Name: name,
GatewayURL: gatewayURL,
Description: description,
IsActive: false,
})
if err := SaveEnvironmentConfig(envConfig); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to save environment config: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Added environment: %s\n", name)
fmt.Printf(" Gateway URL: %s\n", gatewayURL)
if description != "" {
fmt.Printf(" Description: %s\n", description)
}
}
func handleEnvRemove(args []string) {
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Usage: orama env remove <name>\n")
os.Exit(1)
}
name := args[0]
// Don't allow removing 'local'
if name == "local" {
fmt.Fprintf(os.Stderr, "❌ Cannot remove the 'local' environment\n")
os.Exit(1)
}
envConfig, err := LoadEnvironmentConfig()
if err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to load environment config: %v\n", err)
os.Exit(1)
}
// Find and remove environment
found := false
newEnvs := make([]Environment, 0, len(envConfig.Environments))
for _, env := range envConfig.Environments {
if env.Name == name {
found = true
continue
}
newEnvs = append(newEnvs, env)
}
if !found {
fmt.Fprintf(os.Stderr, "❌ Environment '%s' not found\n", name)
os.Exit(1)
}
envConfig.Environments = newEnvs
// If we removed the active environment, switch to local
if envConfig.ActiveEnvironment == name {
envConfig.ActiveEnvironment = "local"
}
if err := SaveEnvironmentConfig(envConfig); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to save environment config: %v\n", err)
os.Exit(1)
}
fmt.Printf("✅ Removed environment: %s\n", name)
}

View File

@ -31,15 +31,21 @@ var DefaultEnvironments = []Environment{
Description: "Local development environment (node-1)",
IsActive: true,
},
{
Name: "production",
GatewayURL: "https://dbrs.space",
Description: "Production network (dbrs.space)",
IsActive: false,
},
{
Name: "devnet",
GatewayURL: "https://devnet.orama.network",
GatewayURL: "https://orama-devnet.network",
Description: "Development network (testnet)",
IsActive: false,
},
{
Name: "testnet",
GatewayURL: "https://testnet.orama.network",
GatewayURL: "https://orama-tesetnet.network",
Description: "Test network (staging)",
IsActive: false,
},

View File

@ -0,0 +1,131 @@
package cli
import (
"bufio"
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"net/http"
"os"
"strings"
"github.com/DeBrosOfficial/network/pkg/auth"
)
// HandleNamespaceCommand handles namespace management commands
func HandleNamespaceCommand(args []string) {
if len(args) == 0 {
showNamespaceHelp()
return
}
subcommand := args[0]
switch subcommand {
case "delete":
var force bool
fs := flag.NewFlagSet("namespace delete", flag.ExitOnError)
fs.BoolVar(&force, "force", false, "Skip confirmation prompt")
_ = fs.Parse(args[1:])
handleNamespaceDelete(force)
case "help":
showNamespaceHelp()
default:
fmt.Fprintf(os.Stderr, "Unknown namespace command: %s\n", subcommand)
showNamespaceHelp()
os.Exit(1)
}
}
func showNamespaceHelp() {
fmt.Printf("Namespace Management Commands\n\n")
fmt.Printf("Usage: orama namespace <subcommand>\n\n")
fmt.Printf("Subcommands:\n")
fmt.Printf(" delete - Delete the current namespace and all its resources\n")
fmt.Printf(" help - Show this help message\n\n")
fmt.Printf("Flags:\n")
fmt.Printf(" --force - Skip confirmation prompt\n\n")
fmt.Printf("Examples:\n")
fmt.Printf(" orama namespace delete\n")
fmt.Printf(" orama namespace delete --force\n")
}
func handleNamespaceDelete(force bool) {
// Load credentials
store, err := auth.LoadEnhancedCredentials()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load credentials: %v\n", err)
os.Exit(1)
}
gatewayURL := getGatewayURL()
creds := store.GetDefaultCredential(gatewayURL)
if creds == nil || !creds.IsValid() {
fmt.Fprintf(os.Stderr, "Not authenticated. Run 'orama auth login' first.\n")
os.Exit(1)
}
namespace := creds.Namespace
if namespace == "" || namespace == "default" {
fmt.Fprintf(os.Stderr, "Cannot delete default namespace.\n")
os.Exit(1)
}
// Confirm deletion
if !force {
fmt.Printf("This will permanently delete namespace '%s' and all its resources:\n", namespace)
fmt.Printf(" - RQLite cluster (3 nodes)\n")
fmt.Printf(" - Olric cache cluster (3 nodes)\n")
fmt.Printf(" - Gateway instances\n")
fmt.Printf(" - API keys and credentials\n\n")
fmt.Printf("Type the namespace name to confirm: ")
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
input := strings.TrimSpace(scanner.Text())
if input != namespace {
fmt.Println("Aborted - namespace name did not match.")
os.Exit(1)
}
}
fmt.Printf("Deleting namespace '%s'...\n", namespace)
// Make DELETE request to gateway
url := fmt.Sprintf("%s/v1/namespace/delete", gatewayURL)
req, err := http.NewRequest(http.MethodDelete, url, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create request: %v\n", err)
os.Exit(1)
}
req.Header.Set("Authorization", "Bearer "+creds.APIKey)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
resp, err := client.Do(req)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to connect to gateway: %v\n", err)
os.Exit(1)
}
defer resp.Body.Close()
var result map[string]interface{}
json.NewDecoder(resp.Body).Decode(&result)
if resp.StatusCode != http.StatusOK {
errMsg := "unknown error"
if e, ok := result["error"].(string); ok {
errMsg = e
}
fmt.Fprintf(os.Stderr, "Failed to delete namespace: %s\n", errMsg)
os.Exit(1)
}
fmt.Printf("Namespace '%s' deleted successfully.\n", namespace)
fmt.Printf("Run 'orama auth login' to create a new namespace.\n")
}

View File

@ -7,42 +7,32 @@ import (
)
// TestProdCommandFlagParsing verifies that prod command flags are parsed correctly
// Note: The installer now uses --vps-ip presence to determine if it's a first node (no --bootstrap flag)
// First node: has --vps-ip but no --peers or --join
// Joining node: has --vps-ip, --peers, and --cluster-secret
// Genesis node: has --vps-ip but no --join or --token
// Joining node: has --vps-ip, --join (HTTPS URL), and --token (invite token)
func TestProdCommandFlagParsing(t *testing.T) {
tests := []struct {
name string
args []string
expectVPSIP string
expectDomain string
expectPeers string
expectJoin string
expectSecret string
expectBranch string
isFirstNode bool // first node = no peers and no join address
name string
args []string
expectVPSIP string
expectDomain string
expectJoin string
expectToken string
expectBranch string
isFirstNode bool // genesis node = no --join and no --token
}{
{
name: "first node (creates new cluster)",
args: []string{"install", "--vps-ip", "10.0.0.1", "--domain", "node-1.example.com"},
expectVPSIP: "10.0.0.1",
name: "genesis node (creates new cluster)",
args: []string{"install", "--vps-ip", "10.0.0.1", "--domain", "node-1.example.com"},
expectVPSIP: "10.0.0.1",
expectDomain: "node-1.example.com",
isFirstNode: true,
isFirstNode: true,
},
{
name: "joining node with peers",
args: []string{"install", "--vps-ip", "10.0.0.2", "--peers", "/ip4/10.0.0.1/tcp/4001/p2p/Qm123", "--cluster-secret", "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
expectVPSIP: "10.0.0.2",
expectPeers: "/ip4/10.0.0.1/tcp/4001/p2p/Qm123",
expectSecret: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
isFirstNode: false,
},
{
name: "joining node with join address",
args: []string{"install", "--vps-ip", "10.0.0.3", "--join", "10.0.0.1:7001", "--cluster-secret", "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
expectVPSIP: "10.0.0.3",
expectJoin: "10.0.0.1:7001",
expectSecret: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
name: "joining node with invite token",
args: []string{"install", "--vps-ip", "10.0.0.2", "--join", "https://node1.dbrs.space", "--token", "abc123def456"},
expectVPSIP: "10.0.0.2",
expectJoin: "https://node1.dbrs.space",
expectToken: "abc123def456",
isFirstNode: false,
},
{
@ -56,8 +46,7 @@ func TestProdCommandFlagParsing(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Extract flags manually to verify parsing logic
var vpsIP, domain, peersStr, joinAddr, clusterSecret, branch string
var vpsIP, domain, joinAddr, token, branch string
for i, arg := range tt.args {
switch arg {
@ -69,17 +58,13 @@ func TestProdCommandFlagParsing(t *testing.T) {
if i+1 < len(tt.args) {
domain = tt.args[i+1]
}
case "--peers":
if i+1 < len(tt.args) {
peersStr = tt.args[i+1]
}
case "--join":
if i+1 < len(tt.args) {
joinAddr = tt.args[i+1]
}
case "--cluster-secret":
case "--token":
if i+1 < len(tt.args) {
clusterSecret = tt.args[i+1]
token = tt.args[i+1]
}
case "--branch":
if i+1 < len(tt.args) {
@ -88,8 +73,8 @@ func TestProdCommandFlagParsing(t *testing.T) {
}
}
// First node detection: no peers and no join address
isFirstNode := peersStr == "" && joinAddr == ""
// Genesis node detection: no --join and no --token
isFirstNode := joinAddr == "" && token == ""
if vpsIP != tt.expectVPSIP {
t.Errorf("expected vpsIP=%q, got %q", tt.expectVPSIP, vpsIP)
@ -97,14 +82,11 @@ func TestProdCommandFlagParsing(t *testing.T) {
if domain != tt.expectDomain {
t.Errorf("expected domain=%q, got %q", tt.expectDomain, domain)
}
if peersStr != tt.expectPeers {
t.Errorf("expected peers=%q, got %q", tt.expectPeers, peersStr)
}
if joinAddr != tt.expectJoin {
t.Errorf("expected join=%q, got %q", tt.expectJoin, joinAddr)
}
if clusterSecret != tt.expectSecret {
t.Errorf("expected clusterSecret=%q, got %q", tt.expectSecret, clusterSecret)
if token != tt.expectToken {
t.Errorf("expected token=%q, got %q", tt.expectToken, token)
}
if branch != tt.expectBranch {
t.Errorf("expected branch=%q, got %q", tt.expectBranch, branch)

View File

@ -5,6 +5,7 @@ import (
"os"
"github.com/DeBrosOfficial/network/pkg/cli/production/install"
"github.com/DeBrosOfficial/network/pkg/cli/production/invite"
"github.com/DeBrosOfficial/network/pkg/cli/production/lifecycle"
"github.com/DeBrosOfficial/network/pkg/cli/production/logs"
"github.com/DeBrosOfficial/network/pkg/cli/production/migrate"
@ -24,6 +25,8 @@ func HandleCommand(args []string) {
subargs := args[1:]
switch subcommand {
case "invite":
invite.Handle(subargs)
case "install":
install.Handle(subargs)
case "upgrade":

View File

@ -39,6 +39,12 @@ func Handle(args []string) {
os.Exit(1)
}
// Validate Anyone relay configuration if enabled
if err := orchestrator.validator.ValidateAnyoneRelayFlags(); err != nil {
fmt.Fprintf(os.Stderr, "❌ %v\n", err)
os.Exit(1)
}
// Execute installation
if err := orchestrator.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "❌ %v\n", err)

View File

@ -10,21 +10,40 @@ import (
type Flags struct {
VpsIP string
Domain string
BaseDomain string // Base domain for deployment routing (e.g., "dbrs.space")
Branch string
NoPull bool
Force bool
DryRun bool
SkipChecks bool
JoinAddress string
ClusterSecret string
SwarmKey string
PeersStr string
PreBuilt bool // Skip building binaries, use pre-built binaries already on disk
Nameserver bool // Make this node a nameserver (runs CoreDNS + Caddy)
JoinAddress string // HTTPS URL of existing node (e.g., https://node1.dbrs.space)
Token string // Invite token for joining (from orama invite)
ClusterSecret string // Deprecated: use --token instead
SwarmKey string // Deprecated: use --token instead
PeersStr string // Deprecated: use --token instead
// IPFS/Cluster specific info for Peering configuration
IPFSPeerID string
IPFSAddrs string
IPFSClusterPeerID string
IPFSClusterAddrs string
// Security flags
SkipFirewall bool // Skip UFW firewall setup (for users who manage their own firewall)
// Anyone relay operator flags
AnyoneRelay bool // Run as relay operator instead of client
AnyoneExit bool // Run as exit relay (legal implications)
AnyoneMigrate bool // Migrate existing Anyone installation
AnyoneNickname string // Relay nickname (1-19 alphanumeric)
AnyoneContact string // Contact info (email or @telegram)
AnyoneWallet string // Ethereum wallet for rewards
AnyoneORPort int // ORPort for relay (default 9001)
AnyoneFamily string // Comma-separated fingerprints of other relays you operate
AnyoneBandwidth int // Percentage of VPS bandwidth for relay (default: 30, 0=unlimited)
AnyoneAccounting int // Monthly data cap for relay in GB (0=unlimited)
}
// ParseFlags parses install command flags
@ -36,16 +55,20 @@ func ParseFlags(args []string) (*Flags, error) {
fs.StringVar(&flags.VpsIP, "vps-ip", "", "Public IP of this VPS (required)")
fs.StringVar(&flags.Domain, "domain", "", "Domain name for HTTPS (optional, e.g. gateway.example.com)")
fs.StringVar(&flags.BaseDomain, "base-domain", "", "Base domain for deployment routing (e.g., dbrs.space)")
fs.StringVar(&flags.Branch, "branch", "main", "Git branch to use (main or nightly)")
fs.BoolVar(&flags.NoPull, "no-pull", false, "Skip git clone/pull, use existing repository in /home/debros/src")
fs.BoolVar(&flags.Force, "force", false, "Force reconfiguration even if already installed")
fs.BoolVar(&flags.DryRun, "dry-run", false, "Show what would be done without making changes")
fs.BoolVar(&flags.SkipChecks, "skip-checks", false, "Skip minimum resource checks (RAM/CPU)")
fs.BoolVar(&flags.PreBuilt, "pre-built", false, "Skip building binaries on VPS, use pre-built binaries already in /home/debros/bin and /usr/local/bin")
fs.BoolVar(&flags.Nameserver, "nameserver", false, "Make this node a nameserver (runs CoreDNS + Caddy)")
// Cluster join flags
fs.StringVar(&flags.JoinAddress, "join", "", "Join an existing cluster (e.g. 1.2.3.4:7001)")
fs.StringVar(&flags.ClusterSecret, "cluster-secret", "", "Cluster secret for IPFS Cluster (required if joining)")
fs.StringVar(&flags.SwarmKey, "swarm-key", "", "IPFS Swarm key (required if joining)")
fs.StringVar(&flags.JoinAddress, "join", "", "Join existing cluster via HTTPS URL (e.g. https://node1.dbrs.space)")
fs.StringVar(&flags.Token, "token", "", "Invite token for joining (from orama invite on existing node)")
fs.StringVar(&flags.ClusterSecret, "cluster-secret", "", "Deprecated: use --token instead")
fs.StringVar(&flags.SwarmKey, "swarm-key", "", "Deprecated: use --token instead")
fs.StringVar(&flags.PeersStr, "peers", "", "Comma-separated list of bootstrap peer multiaddrs")
// IPFS/Cluster specific info for Peering configuration
@ -54,6 +77,21 @@ func ParseFlags(args []string) (*Flags, error) {
fs.StringVar(&flags.IPFSClusterPeerID, "ipfs-cluster-peer", "", "Peer ID of existing IPFS Cluster node")
fs.StringVar(&flags.IPFSClusterAddrs, "ipfs-cluster-addrs", "", "Comma-separated multiaddrs of existing IPFS Cluster node")
// Security flags
fs.BoolVar(&flags.SkipFirewall, "skip-firewall", false, "Skip UFW firewall setup (for users who manage their own firewall)")
// Anyone relay operator flags
fs.BoolVar(&flags.AnyoneRelay, "anyone-relay", false, "Run as Anyone relay operator (earn rewards)")
fs.BoolVar(&flags.AnyoneExit, "anyone-exit", false, "Run as exit relay (requires --anyone-relay, legal implications)")
fs.BoolVar(&flags.AnyoneMigrate, "anyone-migrate", false, "Migrate existing Anyone installation into Orama Network")
fs.StringVar(&flags.AnyoneNickname, "anyone-nickname", "", "Relay nickname (1-19 alphanumeric chars)")
fs.StringVar(&flags.AnyoneContact, "anyone-contact", "", "Contact info (email or @telegram)")
fs.StringVar(&flags.AnyoneWallet, "anyone-wallet", "", "Ethereum wallet address for rewards")
fs.IntVar(&flags.AnyoneORPort, "anyone-orport", 9001, "ORPort for relay (default 9001)")
fs.StringVar(&flags.AnyoneFamily, "anyone-family", "", "Comma-separated fingerprints of other relays you operate")
fs.IntVar(&flags.AnyoneBandwidth, "anyone-bandwidth", 30, "Limit relay to N% of VPS bandwidth (0=unlimited, runs speedtest)")
fs.IntVar(&flags.AnyoneAccounting, "anyone-accounting", 0, "Monthly data cap for relay in GB (0=unlimited)")
if err := fs.Parse(args); err != nil {
if err == flag.ErrHelp {
return nil, err

View File

@ -1,13 +1,21 @@
package install
import (
"bufio"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/cli/utils"
"github.com/DeBrosOfficial/network/pkg/environments/production"
joinhandlers "github.com/DeBrosOfficial/network/pkg/gateway/handlers/join"
)
// Orchestrator manages the install process
@ -25,13 +33,36 @@ func NewOrchestrator(flags *Flags) (*Orchestrator, error) {
oramaHome := "/home/debros"
oramaDir := oramaHome + "/.orama"
// Prompt for base domain if not provided via flag
if flags.BaseDomain == "" {
flags.BaseDomain = promptForBaseDomain()
}
// Normalize peers
peers, err := utils.NormalizePeers(flags.PeersStr)
if err != nil {
return nil, fmt.Errorf("invalid peers: %w", err)
}
setup := production.NewProductionSetup(oramaHome, os.Stdout, flags.Force, flags.Branch, flags.NoPull, flags.SkipChecks)
setup := production.NewProductionSetup(oramaHome, os.Stdout, flags.Force, flags.Branch, flags.NoPull, flags.SkipChecks, flags.PreBuilt)
setup.SetNameserver(flags.Nameserver)
// Configure Anyone relay if enabled
if flags.AnyoneRelay {
setup.SetAnyoneRelayConfig(&production.AnyoneRelayConfig{
Enabled: true,
Exit: flags.AnyoneExit,
Migrate: flags.AnyoneMigrate,
Nickname: flags.AnyoneNickname,
Contact: flags.AnyoneContact,
Wallet: flags.AnyoneWallet,
ORPort: flags.AnyoneORPort,
MyFamily: flags.AnyoneFamily,
BandwidthPct: flags.AnyoneBandwidth,
AccountingMax: flags.AnyoneAccounting,
})
}
validator := NewValidator(flags, oramaDir)
return &Orchestrator{
@ -54,23 +85,49 @@ func (o *Orchestrator) Execute() error {
fmt.Printf(" Using existing repository at /home/debros/src\n")
}
// Inform user if using pre-built binaries
if o.flags.PreBuilt {
fmt.Printf(" ⚠️ --pre-built flag enabled: Skipping all Go compilation\n")
fmt.Printf(" Using pre-built binaries from /home/debros/bin and /usr/local/bin\n")
}
// Validate DNS if domain is provided
o.validator.ValidateDNS()
// Dry-run mode: show what would be done and exit
if o.flags.DryRun {
utils.ShowDryRunSummary(o.flags.VpsIP, o.flags.Domain, o.flags.Branch, o.peers, o.flags.JoinAddress, o.validator.IsFirstNode(), o.oramaDir)
var relayInfo *utils.AnyoneRelayDryRunInfo
if o.flags.AnyoneRelay {
relayInfo = &utils.AnyoneRelayDryRunInfo{
Enabled: true,
Exit: o.flags.AnyoneExit,
Nickname: o.flags.AnyoneNickname,
Contact: o.flags.AnyoneContact,
Wallet: o.flags.AnyoneWallet,
ORPort: o.flags.AnyoneORPort,
}
}
utils.ShowDryRunSummaryWithRelay(o.flags.VpsIP, o.flags.Domain, o.flags.Branch, o.peers, o.flags.JoinAddress, o.validator.IsFirstNode(), o.oramaDir, relayInfo)
return nil
}
// Save secrets before installation
if err := o.validator.SaveSecrets(); err != nil {
return err
// Save secrets before installation (only for genesis; join flow gets secrets from response)
if !o.isJoiningNode() {
if err := o.validator.SaveSecrets(); err != nil {
return err
}
}
// Save branch preference for future upgrades
if err := production.SaveBranchPreference(o.oramaDir, o.flags.Branch); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Warning: Failed to save branch preference: %v\n", err)
// Save preferences for future upgrades (branch + nameserver)
prefs := &production.NodePreferences{
Branch: o.flags.Branch,
Nameserver: o.flags.Nameserver,
}
if err := production.SavePreferences(o.oramaDir, prefs); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Warning: Failed to save preferences: %v\n", err)
}
if o.flags.Nameserver {
fmt.Printf(" This node will be a nameserver (CoreDNS + Caddy)\n")
}
// Phase 1: Check prerequisites
@ -91,30 +148,56 @@ func (o *Orchestrator) Execute() error {
return fmt.Errorf("binary installation failed: %w", err)
}
// Phase 3: Generate secrets FIRST (before service initialization)
// Branch: genesis node vs joining node
if o.isJoiningNode() {
return o.executeJoinFlow()
}
return o.executeGenesisFlow()
}
// isJoiningNode returns true if --join and --token are both set
func (o *Orchestrator) isJoiningNode() bool {
return o.flags.JoinAddress != "" && o.flags.Token != ""
}
// executeGenesisFlow runs the install for the first node in a new cluster
func (o *Orchestrator) executeGenesisFlow() error {
// Phase 3: Generate secrets locally
fmt.Printf("\n🔐 Phase 3: Generating secrets...\n")
if err := o.setup.Phase3GenerateSecrets(); err != nil {
return fmt.Errorf("secret generation failed: %w", err)
}
// Phase 4: Generate configs (BEFORE service initialization)
// Phase 6a: WireGuard — self-assign 10.0.0.1
fmt.Printf("\n🔒 Phase 6a: Setting up WireGuard mesh VPN...\n")
if _, _, err := o.setup.Phase6SetupWireGuard(true); err != nil {
fmt.Fprintf(os.Stderr, " ⚠️ Warning: WireGuard setup failed: %v\n", err)
} else {
fmt.Printf(" ✓ WireGuard configured (10.0.0.1)\n")
}
// Phase 6b: UFW firewall
fmt.Printf("\n🛡 Phase 6b: Setting up UFW firewall...\n")
if err := o.setup.Phase6bSetupFirewall(o.flags.SkipFirewall); err != nil {
fmt.Fprintf(os.Stderr, " ⚠️ Warning: Firewall setup failed: %v\n", err)
}
// Phase 4: Generate configs using WG IP (10.0.0.1) as advertise address
// All inter-node communication uses WireGuard IPs, not public IPs
fmt.Printf("\n⚙ Phase 4: Generating configurations...\n")
enableHTTPS := o.flags.Domain != ""
if err := o.setup.Phase4GenerateConfigs(o.peers, o.flags.VpsIP, enableHTTPS, o.flags.Domain, o.flags.JoinAddress); err != nil {
enableHTTPS := false
genesisWGIP := "10.0.0.1"
if err := o.setup.Phase4GenerateConfigs(o.peers, genesisWGIP, enableHTTPS, o.flags.Domain, o.flags.BaseDomain, ""); err != nil {
return fmt.Errorf("configuration generation failed: %w", err)
}
// Validate generated configuration
if err := o.validator.ValidateGeneratedConfig(); err != nil {
return err
}
// Phase 2c: Initialize services (after config is in place)
// Phase 2c: Initialize services (use WG IP for IPFS Cluster peer discovery)
fmt.Printf("\nPhase 2c: Initializing services...\n")
ipfsPeerInfo := o.buildIPFSPeerInfo()
ipfsClusterPeerInfo := o.buildIPFSClusterPeerInfo()
if err := o.setup.Phase2cInitializeServices(o.peers, o.flags.VpsIP, ipfsPeerInfo, ipfsClusterPeerInfo); err != nil {
if err := o.setup.Phase2cInitializeServices(o.peers, genesisWGIP, nil, nil); err != nil {
return fmt.Errorf("service initialization failed: %w", err)
}
@ -124,18 +207,239 @@ func (o *Orchestrator) Execute() error {
return fmt.Errorf("service creation failed: %w", err)
}
// Log completion with actual peer ID
// Install namespace systemd template units
fmt.Printf("\n🔧 Phase 5b: Installing namespace systemd templates...\n")
if err := o.installNamespaceTemplates(); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Template installation warning: %v\n", err)
}
// Phase 7: Seed DNS records (with retry — migrations may still be running)
if o.flags.Nameserver && o.flags.BaseDomain != "" {
fmt.Printf("\n🌐 Phase 7: Seeding DNS records...\n")
var seedErr error
for attempt := 1; attempt <= 6; attempt++ {
waitSec := 5 * attempt
fmt.Printf(" Waiting for RQLite + migrations (%ds, attempt %d/6)...\n", waitSec, attempt)
time.Sleep(time.Duration(waitSec) * time.Second)
seedErr = o.setup.SeedDNSRecords(o.flags.BaseDomain, o.flags.VpsIP, o.peers)
if seedErr == nil {
fmt.Printf(" ✓ DNS records seeded\n")
break
}
fmt.Fprintf(os.Stderr, " ⚠️ Attempt %d failed: %v\n", attempt, seedErr)
}
if seedErr != nil {
fmt.Fprintf(os.Stderr, " ⚠️ Warning: DNS seeding failed after all attempts.\n")
fmt.Fprintf(os.Stderr, " Records will self-heal via node heartbeat once running.\n")
}
}
o.setup.LogSetupComplete(o.setup.NodePeerID)
fmt.Printf("✅ Production installation complete!\n\n")
o.printFirstNodeSecrets()
return nil
}
// For first node, print important secrets and identifiers
if o.validator.IsFirstNode() {
o.printFirstNodeSecrets()
// executeJoinFlow runs the install for a node joining an existing cluster via invite token
func (o *Orchestrator) executeJoinFlow() error {
// Step 1: Generate WG keypair
fmt.Printf("\n🔑 Generating WireGuard keypair...\n")
privKey, pubKey, err := production.GenerateKeyPair()
if err != nil {
return fmt.Errorf("failed to generate WG keypair: %w", err)
}
fmt.Printf(" ✓ WireGuard keypair generated\n")
// Step 2: Call join endpoint on existing node
fmt.Printf("\n🤝 Requesting cluster join from %s...\n", o.flags.JoinAddress)
joinResp, err := o.callJoinEndpoint(pubKey)
if err != nil {
return fmt.Errorf("join request failed: %w", err)
}
fmt.Printf(" ✓ Join approved — assigned WG IP: %s\n", joinResp.WGIP)
fmt.Printf(" ✓ Received %d WG peers\n", len(joinResp.WGPeers))
// Step 3: Configure WireGuard with assigned IP and peers
fmt.Printf("\n🔒 Configuring WireGuard tunnel...\n")
var wgPeers []production.WireGuardPeer
for _, p := range joinResp.WGPeers {
wgPeers = append(wgPeers, production.WireGuardPeer{
PublicKey: p.PublicKey,
Endpoint: p.Endpoint,
AllowedIP: p.AllowedIP,
})
}
// Install WG package first
wp := production.NewWireGuardProvisioner(production.WireGuardConfig{})
if err := wp.Install(); err != nil {
return fmt.Errorf("failed to install wireguard: %w", err)
}
if err := o.setup.EnableWireGuardWithPeers(privKey, joinResp.WGIP, wgPeers); err != nil {
return fmt.Errorf("failed to enable WireGuard: %w", err)
}
// Step 4: Verify WG tunnel
fmt.Printf("\n🔍 Verifying WireGuard tunnel...\n")
if err := o.verifyWGTunnel(joinResp.WGPeers); err != nil {
return fmt.Errorf("WireGuard tunnel verification failed: %w", err)
}
fmt.Printf(" ✓ WireGuard tunnel established\n")
// Step 5: UFW firewall
fmt.Printf("\n🛡 Setting up UFW firewall...\n")
if err := o.setup.Phase6bSetupFirewall(o.flags.SkipFirewall); err != nil {
fmt.Fprintf(os.Stderr, " ⚠️ Warning: Firewall setup failed: %v\n", err)
}
// Step 6: Save secrets from join response
fmt.Printf("\n🔐 Saving cluster secrets...\n")
if err := o.saveSecretsFromJoinResponse(joinResp); err != nil {
return fmt.Errorf("failed to save secrets: %w", err)
}
fmt.Printf(" ✓ Secrets saved\n")
// Step 7: Generate configs using WG IP as advertise address
// All inter-node communication uses WireGuard IPs, not public IPs
fmt.Printf("\n⚙ Generating configurations...\n")
enableHTTPS := false
rqliteJoin := joinResp.RQLiteJoinAddress
if err := o.setup.Phase4GenerateConfigs(joinResp.BootstrapPeers, joinResp.WGIP, enableHTTPS, o.flags.Domain, joinResp.BaseDomain, rqliteJoin, joinResp.OlricPeers); err != nil {
return fmt.Errorf("configuration generation failed: %w", err)
}
if err := o.validator.ValidateGeneratedConfig(); err != nil {
return err
}
// Step 8: Initialize services with IPFS peer info from join response
fmt.Printf("\nInitializing services...\n")
var ipfsPeerInfo *production.IPFSPeerInfo
if joinResp.IPFSPeer.ID != "" {
ipfsPeerInfo = &production.IPFSPeerInfo{
PeerID: joinResp.IPFSPeer.ID,
Addrs: joinResp.IPFSPeer.Addrs,
}
}
var ipfsClusterPeerInfo *production.IPFSClusterPeerInfo
if joinResp.IPFSClusterPeer.ID != "" {
ipfsClusterPeerInfo = &production.IPFSClusterPeerInfo{
PeerID: joinResp.IPFSClusterPeer.ID,
Addrs: joinResp.IPFSClusterPeer.Addrs,
}
}
if err := o.setup.Phase2cInitializeServices(joinResp.BootstrapPeers, joinResp.WGIP, ipfsPeerInfo, ipfsClusterPeerInfo); err != nil {
return fmt.Errorf("service initialization failed: %w", err)
}
// Step 9: Create systemd services
fmt.Printf("\n🔧 Creating systemd services...\n")
if err := o.setup.Phase5CreateSystemdServices(enableHTTPS); err != nil {
return fmt.Errorf("service creation failed: %w", err)
}
// Install namespace systemd template units
fmt.Printf("\n🔧 Installing namespace systemd templates...\n")
if err := o.installNamespaceTemplates(); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Template installation warning: %v\n", err)
}
o.setup.LogSetupComplete(o.setup.NodePeerID)
fmt.Printf("✅ Production installation complete! Joined cluster via %s\n\n", o.flags.JoinAddress)
return nil
}
// callJoinEndpoint sends the join request to the existing node's HTTPS endpoint
func (o *Orchestrator) callJoinEndpoint(wgPubKey string) (*joinhandlers.JoinResponse, error) {
reqBody := joinhandlers.JoinRequest{
Token: o.flags.Token,
WGPublicKey: wgPubKey,
PublicIP: o.flags.VpsIP,
}
bodyBytes, err := json.Marshal(reqBody)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
url := strings.TrimRight(o.flags.JoinAddress, "/") + "/v1/internal/join"
client := &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true, // Self-signed certs during initial setup
},
},
}
resp, err := client.Post(url, "application/json", strings.NewReader(string(bodyBytes)))
if err != nil {
return nil, fmt.Errorf("failed to contact %s: %w", url, err)
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("join rejected (HTTP %d): %s", resp.StatusCode, string(respBody))
}
var joinResp joinhandlers.JoinResponse
if err := json.Unmarshal(respBody, &joinResp); err != nil {
return nil, fmt.Errorf("failed to parse join response: %w", err)
}
return &joinResp, nil
}
// saveSecretsFromJoinResponse writes cluster secrets received from the join endpoint to disk
func (o *Orchestrator) saveSecretsFromJoinResponse(resp *joinhandlers.JoinResponse) error {
secretsDir := filepath.Join(o.oramaDir, "secrets")
if err := os.MkdirAll(secretsDir, 0700); err != nil {
return fmt.Errorf("failed to create secrets dir: %w", err)
}
// Write cluster secret
if resp.ClusterSecret != "" {
if err := os.WriteFile(filepath.Join(secretsDir, "cluster-secret"), []byte(resp.ClusterSecret), 0600); err != nil {
return fmt.Errorf("failed to write cluster-secret: %w", err)
}
}
// Write swarm key
if resp.SwarmKey != "" {
if err := os.WriteFile(filepath.Join(secretsDir, "swarm.key"), []byte(resp.SwarmKey), 0600); err != nil {
return fmt.Errorf("failed to write swarm.key: %w", err)
}
}
return nil
}
// verifyWGTunnel pings a WG peer to verify the tunnel is working
func (o *Orchestrator) verifyWGTunnel(peers []joinhandlers.WGPeerInfo) error {
if len(peers) == 0 {
return fmt.Errorf("no WG peers to verify")
}
// Extract the IP from the first peer's AllowedIP (e.g. "10.0.0.1/32" -> "10.0.0.1")
targetIP := strings.TrimSuffix(peers[0].AllowedIP, "/32")
// Retry ping for up to 30 seconds
deadline := time.Now().Add(30 * time.Second)
for time.Now().Before(deadline) {
cmd := exec.Command("ping", "-c", "1", "-W", "2", targetIP)
if err := cmd.Run(); err == nil {
return nil
}
time.Sleep(2 * time.Second)
}
return fmt.Errorf("could not reach %s via WireGuard after 30s", targetIP)
}
func (o *Orchestrator) buildIPFSPeerInfo() *production.IPFSPeerInfo {
if o.flags.IPFSPeerID != "" {
var addrs []string
@ -190,3 +494,96 @@ func (o *Orchestrator) printFirstNodeSecrets() {
fmt.Printf(" Node Peer ID:\n")
fmt.Printf(" %s\n\n", o.setup.NodePeerID)
}
// promptForBaseDomain interactively prompts the user to select a network environment
// Returns the selected base domain for deployment routing
func promptForBaseDomain() string {
reader := bufio.NewReader(os.Stdin)
fmt.Println("\n🌐 Network Environment Selection")
fmt.Println("=================================")
fmt.Println("Select the network environment for this node:")
fmt.Println()
fmt.Println(" 1. orama-devnet.network (Development - for testing)")
fmt.Println(" 2. orama-testnet.network (Testnet - pre-production)")
fmt.Println(" 3. orama-mainnet.network (Mainnet - production)")
fmt.Println(" 4. Custom domain...")
fmt.Println()
fmt.Print("Select option [1-4] (default: 1): ")
choice, _ := reader.ReadString('\n')
choice = strings.TrimSpace(choice)
switch choice {
case "", "1":
fmt.Println("✓ Selected: orama-devnet.network")
return "orama-devnet.network"
case "2":
fmt.Println("✓ Selected: orama-testnet.network")
return "orama-testnet.network"
case "3":
fmt.Println("✓ Selected: orama-mainnet.network")
return "orama-mainnet.network"
case "4":
fmt.Print("Enter custom base domain (e.g., example.com): ")
customDomain, _ := reader.ReadString('\n')
customDomain = strings.TrimSpace(customDomain)
if customDomain == "" {
fmt.Println("⚠️ No domain entered, using orama-devnet.network")
return "orama-devnet.network"
}
// Remove any protocol prefix if user included it
customDomain = strings.TrimPrefix(customDomain, "https://")
customDomain = strings.TrimPrefix(customDomain, "http://")
customDomain = strings.TrimSuffix(customDomain, "/")
fmt.Printf("✓ Selected: %s\n", customDomain)
return customDomain
default:
fmt.Println("⚠️ Invalid option, using orama-devnet.network")
return "orama-devnet.network"
}
}
// installNamespaceTemplates installs systemd template unit files for namespace services
func (o *Orchestrator) installNamespaceTemplates() error {
sourceDir := filepath.Join(o.oramaHome, "src", "systemd")
systemdDir := "/etc/systemd/system"
templates := []string{
"debros-namespace-rqlite@.service",
"debros-namespace-olric@.service",
"debros-namespace-gateway@.service",
}
installedCount := 0
for _, template := range templates {
sourcePath := filepath.Join(sourceDir, template)
destPath := filepath.Join(systemdDir, template)
// Read template file
data, err := os.ReadFile(sourcePath)
if err != nil {
fmt.Printf(" ⚠️ Warning: Failed to read %s: %v\n", template, err)
continue
}
// Write to systemd directory
if err := os.WriteFile(destPath, data, 0644); err != nil {
fmt.Printf(" ⚠️ Warning: Failed to install %s: %v\n", template, err)
continue
}
installedCount++
fmt.Printf(" ✓ Installed %s\n", template)
}
if installedCount > 0 {
// Reload systemd daemon to pick up new templates
if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil {
return fmt.Errorf("failed to reload systemd daemon: %w", err)
}
fmt.Printf(" ✓ Systemd daemon reloaded (%d templates installed)\n", installedCount)
}
return nil
}

View File

@ -7,20 +7,22 @@ import (
"strings"
"github.com/DeBrosOfficial/network/pkg/cli/utils"
"github.com/DeBrosOfficial/network/pkg/config/validate"
"github.com/DeBrosOfficial/network/pkg/environments/production/installers"
)
// Validator validates install command inputs
type Validator struct {
flags *Flags
oramaDir string
flags *Flags
oramaDir string
isFirstNode bool
}
// NewValidator creates a new validator
func NewValidator(flags *Flags, oramaDir string) *Validator {
return &Validator{
flags: flags,
oramaDir: oramaDir,
flags: flags,
oramaDir: oramaDir,
isFirstNode: flags.JoinAddress == "",
}
}
@ -28,7 +30,7 @@ func NewValidator(flags *Flags, oramaDir string) *Validator {
// ValidateFlags validates required flags
func (v *Validator) ValidateFlags() error {
if v.flags.VpsIP == "" && !v.flags.DryRun {
return fmt.Errorf("--vps-ip is required for installation\nExample: dbn prod install --vps-ip 1.2.3.4")
return fmt.Errorf("--vps-ip is required for installation\nExample: orama prod install --vps-ip 1.2.3.4")
}
return nil
}
@ -43,7 +45,17 @@ func (v *Validator) ValidateRootPrivileges() error {
// ValidatePorts validates port availability
func (v *Validator) ValidatePorts() error {
if err := utils.EnsurePortsAvailable("install", utils.DefaultPorts()); err != nil {
ports := utils.DefaultPorts()
// Add ORPort check for relay mode (skip if migrating existing installation)
if v.flags.AnyoneRelay && !v.flags.AnyoneMigrate {
ports = append(ports, utils.PortSpec{
Name: "Anyone ORPort",
Port: v.flags.AnyoneORPort,
})
}
if err := utils.EnsurePortsAvailable("install", ports); err != nil {
return err
}
return nil
@ -88,8 +100,9 @@ func (v *Validator) SaveSecrets() error {
if err := os.MkdirAll(secretsDir, 0755); err != nil {
return fmt.Errorf("failed to create secrets directory: %w", err)
}
// Convert 64-hex key to full swarm.key format
swarmKeyContent := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", strings.ToUpper(v.flags.SwarmKey))
// Extract hex only (strips headers if user passed full file content)
hexKey := strings.ToUpper(validate.ExtractSwarmKeyHex(v.flags.SwarmKey))
swarmKeyContent := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", hexKey)
swarmKeyPath := filepath.Join(secretsDir, "swarm.key")
if err := os.WriteFile(swarmKeyPath, []byte(swarmKeyContent), 0600); err != nil {
return fmt.Errorf("failed to save swarm key: %w", err)
@ -104,3 +117,125 @@ func (v *Validator) SaveSecrets() error {
func (v *Validator) IsFirstNode() bool {
return v.isFirstNode
}
// ValidateAnyoneRelayFlags validates Anyone relay configuration and displays warnings
func (v *Validator) ValidateAnyoneRelayFlags() error {
// Skip validation if not running as relay
if !v.flags.AnyoneRelay {
return nil
}
fmt.Printf("\n🔗 Anyone Relay Configuration\n")
// Check for existing Anyone installation
existing, err := installers.DetectExistingAnyoneInstallation()
if err != nil {
fmt.Printf(" ⚠️ Warning: failed to detect existing installation: %v\n", err)
}
if existing != nil {
fmt.Printf(" ⚠️ Existing Anyone relay detected:\n")
if existing.Fingerprint != "" {
fmt.Printf(" Fingerprint: %s\n", existing.Fingerprint)
}
if existing.Nickname != "" {
fmt.Printf(" Nickname: %s\n", existing.Nickname)
}
if existing.Wallet != "" {
fmt.Printf(" Wallet: %s\n", existing.Wallet)
}
if existing.MyFamily != "" {
familyCount := len(strings.Split(existing.MyFamily, ","))
fmt.Printf(" MyFamily: %d relays\n", familyCount)
}
fmt.Printf(" Keys: %s\n", existing.KeysPath)
fmt.Printf(" Config: %s\n", existing.ConfigPath)
if existing.IsRunning {
fmt.Printf(" Status: Running\n")
}
if !v.flags.AnyoneMigrate {
fmt.Printf("\n 💡 Use --anyone-migrate to preserve existing keys and fingerprint\n")
} else {
fmt.Printf("\n ✓ Will migrate existing installation (keys preserved)\n")
// Auto-populate missing values from existing installation
if v.flags.AnyoneNickname == "" && existing.Nickname != "" {
v.flags.AnyoneNickname = existing.Nickname
fmt.Printf(" ✓ Using existing nickname: %s\n", existing.Nickname)
}
if v.flags.AnyoneWallet == "" && existing.Wallet != "" {
v.flags.AnyoneWallet = existing.Wallet
fmt.Printf(" ✓ Using existing wallet: %s\n", existing.Wallet)
}
}
fmt.Println()
}
// Validate required fields for relay mode
if v.flags.AnyoneNickname == "" {
return fmt.Errorf("--anyone-nickname is required for relay mode")
}
if err := installers.ValidateNickname(v.flags.AnyoneNickname); err != nil {
return fmt.Errorf("invalid --anyone-nickname: %w", err)
}
if v.flags.AnyoneWallet == "" {
return fmt.Errorf("--anyone-wallet is required for relay mode (for rewards)")
}
if err := installers.ValidateWallet(v.flags.AnyoneWallet); err != nil {
return fmt.Errorf("invalid --anyone-wallet: %w", err)
}
if v.flags.AnyoneContact == "" {
return fmt.Errorf("--anyone-contact is required for relay mode")
}
// Validate ORPort
if v.flags.AnyoneORPort < 1 || v.flags.AnyoneORPort > 65535 {
return fmt.Errorf("--anyone-orport must be between 1 and 65535")
}
// Validate bandwidth percentage
if v.flags.AnyoneBandwidth < 0 || v.flags.AnyoneBandwidth > 100 {
return fmt.Errorf("--anyone-bandwidth must be between 0 and 100")
}
// Validate accounting
if v.flags.AnyoneAccounting < 0 {
return fmt.Errorf("--anyone-accounting must be >= 0")
}
// Display configuration summary
fmt.Printf(" Nickname: %s\n", v.flags.AnyoneNickname)
fmt.Printf(" Contact: %s\n", v.flags.AnyoneContact)
fmt.Printf(" Wallet: %s\n", v.flags.AnyoneWallet)
fmt.Printf(" ORPort: %d\n", v.flags.AnyoneORPort)
if v.flags.AnyoneExit {
fmt.Printf(" Mode: Exit Relay\n")
} else {
fmt.Printf(" Mode: Non-exit Relay\n")
}
if v.flags.AnyoneBandwidth > 0 {
fmt.Printf(" Bandwidth: %d%% of VPS speed (speedtest will run during install)\n", v.flags.AnyoneBandwidth)
} else {
fmt.Printf(" Bandwidth: Unlimited\n")
}
if v.flags.AnyoneAccounting > 0 {
fmt.Printf(" Data cap: %d GB/month\n", v.flags.AnyoneAccounting)
}
// Warning about token requirement
fmt.Printf("\n ⚠️ IMPORTANT: Relay operators must hold 100 $ANYONE tokens\n")
fmt.Printf(" in wallet %s to receive rewards.\n", v.flags.AnyoneWallet)
fmt.Printf(" Register at: https://dashboard.anyone.io\n")
// Exit relay warning
if v.flags.AnyoneExit {
fmt.Printf("\n ⚠️ EXIT RELAY WARNING:\n")
fmt.Printf(" Running an exit relay may expose you to legal liability\n")
fmt.Printf(" for traffic that exits through your node.\n")
fmt.Printf(" Ensure you understand the implications before proceeding.\n")
}
fmt.Println()
return nil
}

View File

@ -0,0 +1,115 @@
package invite
import (
"crypto/rand"
"encoding/hex"
"fmt"
"net/http"
"os"
"strings"
"time"
"gopkg.in/yaml.v3"
)
// Handle processes the invite command
func Handle(args []string) {
// Must run on a cluster node with RQLite running locally
domain, err := readNodeDomain()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: could not read node config: %v\n", err)
fmt.Fprintf(os.Stderr, "Make sure you're running this on an installed node.\n")
os.Exit(1)
}
// Generate random token
tokenBytes := make([]byte, 32)
if _, err := rand.Read(tokenBytes); err != nil {
fmt.Fprintf(os.Stderr, "Error generating token: %v\n", err)
os.Exit(1)
}
token := hex.EncodeToString(tokenBytes)
// Determine expiry (default 1 hour, --expiry flag for override)
expiry := time.Hour
for i, arg := range args {
if arg == "--expiry" && i+1 < len(args) {
d, err := time.ParseDuration(args[i+1])
if err != nil {
fmt.Fprintf(os.Stderr, "Invalid expiry duration: %v\n", err)
os.Exit(1)
}
expiry = d
}
}
expiresAt := time.Now().UTC().Add(expiry).Format("2006-01-02 15:04:05")
// Get node ID for created_by
nodeID := "unknown"
if hostname, err := os.Hostname(); err == nil {
nodeID = hostname
}
// Insert token into RQLite via HTTP API
if err := insertToken(token, nodeID, expiresAt); err != nil {
fmt.Fprintf(os.Stderr, "Error storing invite token: %v\n", err)
fmt.Fprintf(os.Stderr, "Make sure RQLite is running on this node.\n")
os.Exit(1)
}
// Print the invite command
fmt.Printf("\nInvite token created (expires in %s)\n\n", expiry)
fmt.Printf("Run this on the new node:\n\n")
fmt.Printf(" sudo orama install --join https://%s --token %s --vps-ip <NEW_NODE_IP> --nameserver\n\n", domain, token)
fmt.Printf("Replace <NEW_NODE_IP> with the new node's public IP address.\n")
}
// readNodeDomain reads the domain from the node config file
func readNodeDomain() (string, error) {
configPath := "/home/debros/.orama/configs/node.yaml"
data, err := os.ReadFile(configPath)
if err != nil {
return "", fmt.Errorf("read config: %w", err)
}
var config struct {
Node struct {
Domain string `yaml:"domain"`
} `yaml:"node"`
}
if err := yaml.Unmarshal(data, &config); err != nil {
return "", fmt.Errorf("parse config: %w", err)
}
if config.Node.Domain == "" {
return "", fmt.Errorf("node domain not set in config")
}
return config.Node.Domain, nil
}
// insertToken inserts an invite token into RQLite via HTTP API
func insertToken(token, createdBy, expiresAt string) error {
body := fmt.Sprintf(`[["INSERT INTO invite_tokens (token, created_by, expires_at) VALUES ('%s', '%s', '%s')"]]`,
token, createdBy, expiresAt)
req, err := http.NewRequest("POST", "http://localhost:5001/db/execute", strings.NewReader(body))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to connect to RQLite: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("RQLite returned status %d", resp.StatusCode)
}
return nil
}

View File

@ -51,7 +51,7 @@ func HandleStart() {
}
if active {
fmt.Printf(" %s already running\n", svc)
// Re-enable if disabled (in case it was stopped with 'dbn prod stop')
// Re-enable if disabled (in case it was stopped with 'orama prod stop')
enabled, err := utils.IsServiceEnabled(svc)
if err == nil && !enabled {
if err := exec.Command("systemctl", "enable", svc).Run(); err != nil {
@ -83,7 +83,7 @@ func HandleStart() {
// Enable and start inactive services
for _, svc := range inactive {
// Re-enable the service first (in case it was disabled by 'dbn prod stop')
// Re-enable the service first (in case it was disabled by 'orama prod stop')
enabled, err := utils.IsServiceEnabled(svc)
if err == nil && !enabled {
if err := exec.Command("systemctl", "enable", svc).Run(); err != nil {

View File

@ -4,6 +4,7 @@ import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/DeBrosOfficial/network/pkg/cli/utils"
@ -18,12 +19,18 @@ func HandleStop() {
fmt.Printf("Stopping all DeBros production services...\n")
// First, stop all namespace services
fmt.Printf("\n Stopping namespace services...\n")
stopAllNamespaceServices()
services := utils.GetProductionServices()
if len(services) == 0 {
fmt.Printf(" ⚠️ No DeBros services found\n")
return
}
fmt.Printf("\n Stopping main services...\n")
// First, disable all services to prevent auto-restart
disableArgs := []string{"disable"}
disableArgs = append(disableArgs, services...)
@ -107,6 +114,43 @@ func HandleStop() {
fmt.Fprintf(os.Stderr, " If services are still restarting, they may need manual intervention\n")
} else {
fmt.Printf("\n✅ All services stopped and disabled (will not auto-start on boot)\n")
fmt.Printf(" Use 'dbn prod start' to start and re-enable services\n")
fmt.Printf(" Use 'orama prod start' to start and re-enable services\n")
}
}
// stopAllNamespaceServices stops all running namespace services
func stopAllNamespaceServices() {
// Find all running namespace services using systemctl list-units
cmd := exec.Command("systemctl", "list-units", "--type=service", "--all", "--no-pager", "--no-legend", "debros-namespace-*@*.service")
output, err := cmd.Output()
if err != nil {
fmt.Printf(" ⚠️ Warning: Failed to list namespace services: %v\n", err)
return
}
lines := strings.Split(string(output), "\n")
var namespaceServices []string
for _, line := range lines {
fields := strings.Fields(line)
if len(fields) > 0 {
serviceName := fields[0]
if strings.HasPrefix(serviceName, "debros-namespace-") {
namespaceServices = append(namespaceServices, serviceName)
}
}
}
if len(namespaceServices) == 0 {
fmt.Printf(" No namespace services found\n")
return
}
// Stop all namespace services
for _, svc := range namespaceServices {
if err := exec.Command("systemctl", "stop", svc).Run(); err != nil {
fmt.Printf(" ⚠️ Warning: Failed to stop %s: %v\n", svc, err)
}
}
fmt.Printf(" ✓ Stopped %d namespace service(s)\n", len(namespaceServices))
}

View File

@ -47,7 +47,7 @@ func Handle(args []string) {
}
func showUsage() {
fmt.Fprintf(os.Stderr, "Usage: dbn prod logs <service> [--follow]\n")
fmt.Fprintf(os.Stderr, "Usage: orama prod logs <service> [--follow]\n")
fmt.Fprintf(os.Stderr, "\nService aliases:\n")
fmt.Fprintf(os.Stderr, " node, ipfs, cluster, gateway, olric\n")
fmt.Fprintf(os.Stderr, "\nOr use full service name:\n")

View File

@ -54,5 +54,5 @@ func Handle() {
fmt.Printf(" ❌ %s not found\n", oramaDir)
}
fmt.Printf("\nView logs with: dbn prod logs <service>\n")
fmt.Printf("\nView logs with: orama prod logs <service>\n")
}

Some files were not shown because too many files have changed in this diff Show More