Merge pull request #67 from DeBrosOfficial/ipfs

Ipfs & olric
This commit is contained in:
anonpenguin 2025-11-06 06:26:05 +02:00 committed by GitHub
commit a262da2c29
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 4609 additions and 133 deletions

View File

@ -30,6 +30,15 @@ if [ -z "$OTHER_FILES" ]; then
exit 0 exit 0
fi fi
# Check for skip flag
# To skip changelog generation, set SKIP_CHANGELOG=1 before committing:
# SKIP_CHANGELOG=1 git commit -m "your message"
# SKIP_CHANGELOG=1 git commit
if [ "$SKIP_CHANGELOG" = "1" ] || [ "$SKIP_CHANGELOG" = "true" ]; then
echo -e "${YELLOW}Skipping changelog update (SKIP_CHANGELOG is set)${NOCOLOR}"
exit 0
fi
# Update changelog before commit # Update changelog before commit
if [ -f "$CHANGELOG_SCRIPT" ]; then if [ -f "$CHANGELOG_SCRIPT" ]; then
echo -e "\n${CYAN}Updating changelog...${NOCOLOR}" echo -e "\n${CYAN}Updating changelog...${NOCOLOR}"

View File

@ -11,7 +11,7 @@
"program": "./cmd/gateway", "program": "./cmd/gateway",
"env": { "env": {
"GATEWAY_ADDR": ":6001", "GATEWAY_ADDR": ":6001",
"GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", "GATEWAY_BOOTSTRAP_PEERS": "/ip4/localhost/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee",
"GATEWAY_NAMESPACE": "default", "GATEWAY_NAMESPACE": "default",
"GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default" "GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default"
} }
@ -36,7 +36,7 @@
"program": "./cmd/gateway", "program": "./cmd/gateway",
"env": { "env": {
"GATEWAY_ADDR": ":6001", "GATEWAY_ADDR": ":6001",
"GATEWAY_BOOTSTRAP_PEERS": "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee", "GATEWAY_BOOTSTRAP_PEERS": "/ip4/localhost/tcp/4001/p2p/12D3KooWSHHwEY6cga3ng7tD1rzStAU58ogQXVMX3LZJ6Gqf6dee",
"GATEWAY_NAMESPACE": "default", "GATEWAY_NAMESPACE": "default",
"GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default" "GATEWAY_API_KEY": "ak_iGustrsFk9H8uXpwczCATe5U:default"
} }

View File

@ -13,15 +13,38 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Deprecated ### Deprecated
### Fixed ### Fixed
## [0.56.0] - 2025-11-05
### Added
- Added IPFS storage endpoints to the Gateway for content upload, pinning, status, retrieval, and unpinning.
- Introduced `StorageClient` interface and implementation in the Go client library for interacting with the new IPFS storage endpoints.
- Added support for automatically starting IPFS daemon, IPFS Cluster daemon, and Olric cache server in the `dev` environment setup.
### Changed
- Updated Gateway configuration to include settings for IPFS Cluster API URL, IPFS API URL, timeout, and replication factor.
- Refactored Olric configuration generation to use a simpler, local-environment focused setup.
- Improved IPFS content retrieval (`Get`) to fall back to the IPFS Gateway (port 8080) if the IPFS API (port 5001) returns a 404.
### Deprecated
### Removed
### Fixed
## [0.54.0] - 2025-11-03 ## [0.54.0] - 2025-11-03
### Added ### Added
- Integrated Olric distributed cache for high-speed key-value storage and caching. - Integrated Olric distributed cache for high-speed key-value storage and caching.
- Added new HTTP Gateway endpoints for cache operations (GET, PUT, DELETE, SCAN) via `/v1/cache/`. - Added new HTTP Gateway endpoints for cache operations (GET, PUT, DELETE, SCAN) via `/v1/cache/`.
- Added `olric_servers` and `olric_timeout` configuration options to the Gateway. - Added `olric_servers` and `olric_timeout` configuration options to the Gateway.
- Updated the automated installation script (`install-debros-network.sh`) to include Olric installation, configuration, and firewall rules (ports 3320, 3322). - Updated the automated installation script (`install-debros-network.sh`) to include Olric installation, configuration, and firewall rules (ports 3320, 3322).
### Changed ### Changed
- Refactored README for better clarity and organization, focusing on quick start and core features. - Refactored README for better clarity and organization, focusing on quick start and core features.
### Deprecated ### Deprecated
@ -29,12 +52,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed ### Removed
### Fixed ### Fixed
\n \n
## [0.53.18] - 2025-11-03 ## [0.53.18] - 2025-11-03
### Added ### Added
\n \n
### Changed ### Changed
- Increased the connection timeout during peer discovery from 15 seconds to 20 seconds to improve connection reliability. - Increased the connection timeout during peer discovery from 15 seconds to 20 seconds to improve connection reliability.
- Removed unnecessary debug logging related to filtering out ephemeral port addresses during peer exchange. - Removed unnecessary debug logging related to filtering out ephemeral port addresses during peer exchange.
@ -43,13 +71,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed ### Removed
### Fixed ### Fixed
\n \n
## [0.53.17] - 2025-11-03 ## [0.53.17] - 2025-11-03
### Added ### Added
- Added a new Git `pre-commit` hook to automatically update the changelog and version before committing, ensuring version consistency. - Added a new Git `pre-commit` hook to automatically update the changelog and version before committing, ensuring version consistency.
### Changed ### Changed
- Refactored the `update_changelog.sh` script to support different execution contexts (pre-commit vs. pre-push), allowing it to analyze only staged changes during commit. - Refactored the `update_changelog.sh` script to support different execution contexts (pre-commit vs. pre-push), allowing it to analyze only staged changes during commit.
- The Git `pre-push` hook was simplified by removing the changelog update logic, which is now handled by the `pre-commit` hook. - The Git `pre-push` hook was simplified by removing the changelog update logic, which is now handled by the `pre-commit` hook.
@ -58,12 +90,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed ### Removed
### Fixed ### Fixed
\n \n
## [0.53.16] - 2025-11-03 ## [0.53.16] - 2025-11-03
### Added ### Added
\n \n
### Changed ### Changed
- Improved the changelog generation script to prevent infinite loops when the only unpushed commit is a previous changelog update. - Improved the changelog generation script to prevent infinite loops when the only unpushed commit is a previous changelog update.
### Deprecated ### Deprecated
@ -71,12 +108,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed ### Removed
### Fixed ### Fixed
\n \n
## [0.53.15] - 2025-11-03 ## [0.53.15] - 2025-11-03
### Added ### Added
\n \n
### Changed ### Changed
- Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation. - Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation.
- Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security. - Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security.
- Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility. - Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility.
@ -88,12 +130,17 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed ### Removed
### Fixed ### Fixed
\n \n
## [0.53.15] - 2025-11-03 ## [0.53.15] - 2025-11-03
### Added ### Added
\n \n
### Changed ### Changed
- Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation. - Improved the pre-push git hook to automatically commit updated changelog and Makefile after generation.
- Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security. - Updated the changelog generation script to load the OpenRouter API key from the .env file or environment variables for better security.
- Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility. - Modified the pre-push hook to read user confirmation from /dev/tty for better compatibility.
@ -105,14 +152,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed ### Removed
### Fixed ### Fixed
\n \n
## [0.53.14] - 2025-11-03 ## [0.53.14] - 2025-11-03
### Added ### Added
- Added a new `install-hooks` target to the Makefile to easily set up git hooks. - Added a new `install-hooks` target to the Makefile to easily set up git hooks.
- Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`. - Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`.
### Changed ### Changed
- Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog. - Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog.
- Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration. - Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration.
- Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments. - Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments.
@ -124,14 +175,18 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed ### Removed
### Fixed ### Fixed
\n \n
## [0.53.14] - 2025-11-03 ## [0.53.14] - 2025-11-03
### Added ### Added
- Added a new `install-hooks` target to the Makefile to easily set up git hooks. - Added a new `install-hooks` target to the Makefile to easily set up git hooks.
- Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`. - Added a script (`scripts/install-hooks.sh`) to copy git hooks from `.githooks` to `.git/hooks`.
### Changed ### Changed
- Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog. - Improved the pre-push git hook to automatically commit the updated `CHANGELOG.md` and `Makefile` after generating the changelog.
- Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration. - Updated the changelog generation script (`scripts/update_changelog.sh`) to load the OpenRouter API key from the `.env` file or environment variables, improving security and configuration.
- Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments. - Modified the pre-push hook to read user confirmation from `/dev/tty` for better compatibility in various terminal environments.
@ -141,6 +196,7 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant
### Removed ### Removed
### Fixed ### Fixed
\n \n
## [0.53.8] - 2025-10-31 ## [0.53.8] - 2025-10-31

289
Makefile
View File

@ -7,21 +7,21 @@ test:
# Gateway-focused E2E tests assume gateway and nodes are already running # Gateway-focused E2E tests assume gateway and nodes are already running
# Configure via env: # Configure via env:
# GATEWAY_BASE_URL (default http://127.0.0.1:6001) # GATEWAY_BASE_URL (default http://localhost:6001)
# GATEWAY_API_KEY (required for auth-protected routes) # GATEWAY_API_KEY (required for auth-protected routes)
.PHONY: test-e2e .PHONY: test-e2e
test-e2e: test-e2e:
@echo "Running gateway E2E tests (HTTP/WS only)..." @echo "Running gateway E2E tests (HTTP/WS only)..."
@echo "Base URL: $${GATEWAY_BASE_URL:-http://127.0.0.1:6001}" @echo "Base URL: $${GATEWAY_BASE_URL:-http://localhost:6001}"
@test -n "$$GATEWAY_API_KEY" || (echo "GATEWAY_API_KEY must be set" && exit 1) @test -n "$$GATEWAY_API_KEY" || (echo "GATEWAY_API_KEY must be set" && exit 1)
go test -v -tags e2e ./e2e go test -v -tags e2e ./e2e
# Network - Distributed P2P Database System # Network - Distributed P2P Database System
# Makefile for development and build tasks # Makefile for development and build tasks
.PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill
VERSION := 0.54.0 VERSION := 0.56.0
COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown)
DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ)
LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)'
@ -57,7 +57,7 @@ run-node:
go run ./cmd/node --config node.yaml go run ./cmd/node --config node.yaml
# Run second node (regular) - requires join address of bootstrap node # Run second node (regular) - requires join address of bootstrap node
# Usage: make run-node2 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002 # Usage: make run-node2 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5002 RAFT=7002 P2P=4002
run-node2: run-node2:
@echo "Starting regular node (node.yaml)..." @echo "Starting regular node (node.yaml)..."
@echo "Config: ~/.debros/node.yaml" @echo "Config: ~/.debros/node.yaml"
@ -65,7 +65,7 @@ run-node2:
go run ./cmd/node --config node2.yaml go run ./cmd/node --config node2.yaml
# Run third node (regular) - requires join address of bootstrap node # Run third node (regular) - requires join address of bootstrap node
# Usage: make run-node3 JOINADDR=/ip4/127.0.0.1/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003 # Usage: make run-node3 JOINADDR=/ip4/localhost/tcp/5001 HTTP=5003 RAFT=7003 P2P=4003
run-node3: run-node3:
@echo "Starting regular node (node2.yaml)..." @echo "Starting regular node (node2.yaml)..."
@echo "Config: ~/.debros/node2.yaml" @echo "Config: ~/.debros/node2.yaml"
@ -109,15 +109,168 @@ dev: build
echo " ⚠️ systemctl not found - skipping Anon"; \ echo " ⚠️ systemctl not found - skipping Anon"; \
fi; \ fi; \
fi fi
@echo "Initializing IPFS and Cluster for all nodes..."
@if command -v ipfs >/dev/null 2>&1 && command -v ipfs-cluster-service >/dev/null 2>&1; then \
CLUSTER_SECRET=$$HOME/.debros/cluster-secret; \
if [ ! -f $$CLUSTER_SECRET ]; then \
echo " Generating shared cluster secret..."; \
ipfs-cluster-service --version >/dev/null 2>&1 && openssl rand -hex 32 > $$CLUSTER_SECRET || echo "0000000000000000000000000000000000000000000000000000000000000000" > $$CLUSTER_SECRET; \
fi; \
SECRET=$$(cat $$CLUSTER_SECRET); \
SWARM_KEY=$$HOME/.debros/swarm.key; \
if [ ! -f $$SWARM_KEY ]; then \
echo " Generating private swarm key..."; \
KEY_HEX=$$(openssl rand -hex 32 | tr '[:lower:]' '[:upper:]'); \
printf "/key/swarm/psk/1.0.0/\n/base16/\n%s\n" "$$KEY_HEX" > $$SWARM_KEY; \
chmod 600 $$SWARM_KEY; \
fi; \
echo " Setting up bootstrap node (IPFS: 5001, Cluster: 9094)..."; \
if [ ! -d $$HOME/.debros/bootstrap/ipfs/repo ]; then \
echo " Initializing IPFS..."; \
mkdir -p $$HOME/.debros/bootstrap/ipfs; \
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \
cp $$SWARM_KEY $$HOME/.debros/bootstrap/ipfs/repo/swarm.key; \
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5001"]' 2>&1 | grep -v "generating" || true; \
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8080"]' 2>&1 | grep -v "generating" || true; \
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4101","/ip6/::/tcp/4101"]' 2>&1 | grep -v "generating" || true; \
else \
if [ ! -f $$HOME/.debros/bootstrap/ipfs/repo/swarm.key ]; then \
cp $$SWARM_KEY $$HOME/.debros/bootstrap/ipfs/repo/swarm.key; \
fi; \
fi; \
echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \
mkdir -p $$HOME/.debros/bootstrap/ipfs-cluster; \
echo " Setting up node2 (IPFS: 5002, Cluster: 9104)..."; \
if [ ! -d $$HOME/.debros/node2/ipfs/repo ]; then \
echo " Initializing IPFS..."; \
mkdir -p $$HOME/.debros/node2/ipfs; \
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \
cp $$SWARM_KEY $$HOME/.debros/node2/ipfs/repo/swarm.key; \
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5002"]' 2>&1 | grep -v "generating" || true; \
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8081"]' 2>&1 | grep -v "generating" || true; \
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4102","/ip6/::/tcp/4102"]' 2>&1 | grep -v "generating" || true; \
else \
if [ ! -f $$HOME/.debros/node2/ipfs/repo/swarm.key ]; then \
cp $$SWARM_KEY $$HOME/.debros/node2/ipfs/repo/swarm.key; \
fi; \
fi; \
echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \
mkdir -p $$HOME/.debros/node2/ipfs-cluster; \
echo " Setting up node3 (IPFS: 5003, Cluster: 9114)..."; \
if [ ! -d $$HOME/.debros/node3/ipfs/repo ]; then \
echo " Initializing IPFS..."; \
mkdir -p $$HOME/.debros/node3/ipfs; \
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs init --profile=server 2>&1 | grep -v "generating" | grep -v "peer identity" || true; \
cp $$SWARM_KEY $$HOME/.debros/node3/ipfs/repo/swarm.key; \
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.API '["/ip4/127.0.0.1/tcp/5003"]' 2>&1 | grep -v "generating" || true; \
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Gateway '["/ip4/127.0.0.1/tcp/8082"]' 2>&1 | grep -v "generating" || true; \
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4103","/ip6/::/tcp/4103"]' 2>&1 | grep -v "generating" || true; \
else \
if [ ! -f $$HOME/.debros/node3/ipfs/repo/swarm.key ]; then \
cp $$SWARM_KEY $$HOME/.debros/node3/ipfs/repo/swarm.key; \
fi; \
fi; \
echo " Creating IPFS Cluster directories (config will be managed by Go code)..."; \
mkdir -p $$HOME/.debros/node3/ipfs-cluster; \
echo "Starting IPFS daemons..."; \
if [ ! -f .dev/pids/ipfs-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-bootstrap.pid) 2>/dev/null; then \
IPFS_PATH=$$HOME/.debros/bootstrap/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-bootstrap.pid; \
echo " Bootstrap IPFS started (PID: $$(cat .dev/pids/ipfs-bootstrap.pid), API: 5001)"; \
sleep 3; \
else \
echo " ✓ Bootstrap IPFS already running"; \
fi; \
if [ ! -f .dev/pids/ipfs-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-node2.pid) 2>/dev/null; then \
IPFS_PATH=$$HOME/.debros/node2/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-node2.pid; \
echo " Node2 IPFS started (PID: $$(cat .dev/pids/ipfs-node2.pid), API: 5002)"; \
sleep 3; \
else \
echo " ✓ Node2 IPFS already running"; \
fi; \
if [ ! -f .dev/pids/ipfs-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-node3.pid) 2>/dev/null; then \
IPFS_PATH=$$HOME/.debros/node3/ipfs/repo nohup ipfs daemon --enable-pubsub-experiment > $$HOME/.debros/logs/ipfs-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-node3.pid; \
echo " Node3 IPFS started (PID: $$(cat .dev/pids/ipfs-node3.pid), API: 5003)"; \
sleep 3; \
else \
echo " ✓ Node3 IPFS already running"; \
fi; \
else \
echo " ⚠️ ipfs or ipfs-cluster-service not found - skipping IPFS setup"; \
echo " Install with: https://docs.ipfs.tech/install/ and https://ipfscluster.io/documentation/guides/install/"; \
fi
@sleep 2 @sleep 2
@echo "Starting bootstrap node..." @echo "Starting bootstrap node..."
@nohup ./bin/node --config bootstrap.yaml > $$HOME/.debros/logs/bootstrap.log 2>&1 & echo $$! > .dev/pids/bootstrap.pid @nohup ./bin/node --config bootstrap.yaml > $$HOME/.debros/logs/bootstrap.log 2>&1 & echo $$! > .dev/pids/bootstrap.pid
@sleep 2 @sleep 3
@echo "Starting node2..." @echo "Starting node2..."
@nohup ./bin/node --config node2.yaml > $$HOME/.debros/logs/node2.log 2>&1 & echo $$! > .dev/pids/node2.pid @nohup ./bin/node --config node2.yaml > $$HOME/.debros/logs/node2.log 2>&1 & echo $$! > .dev/pids/node2.pid
@sleep 1 @sleep 2
@echo "Starting node3..." @echo "Starting node3..."
@nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid @nohup ./bin/node --config node3.yaml > $$HOME/.debros/logs/node3.log 2>&1 & echo $$! > .dev/pids/node3.pid
@sleep 3
@echo "Starting IPFS Cluster daemons (after Go nodes have configured them)..."
@if command -v ipfs-cluster-service >/dev/null 2>&1; then \
if [ ! -f .dev/pids/ipfs-cluster-bootstrap.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null; then \
if [ -f $$HOME/.debros/bootstrap/ipfs-cluster/service.json ]; then \
env IPFS_CLUSTER_PATH=$$HOME/.debros/bootstrap/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-bootstrap.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-bootstrap.pid; \
echo " Bootstrap Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-bootstrap.pid), API: 9094)"; \
echo " Waiting for bootstrap cluster to be ready..."; \
for i in $$(seq 1 30); do \
if curl -s http://localhost:9094/peers >/dev/null 2>&1; then \
break; \
fi; \
sleep 1; \
done; \
sleep 2; \
else \
echo " ⚠️ Bootstrap cluster config not ready yet"; \
fi; \
else \
echo " ✓ Bootstrap Cluster already running"; \
fi; \
if [ ! -f .dev/pids/ipfs-cluster-node2.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null; then \
if [ -f $$HOME/.debros/node2/ipfs-cluster/service.json ]; then \
env IPFS_CLUSTER_PATH=$$HOME/.debros/node2/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node2.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node2.pid; \
echo " Node2 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node2.pid), API: 9104)"; \
sleep 3; \
else \
echo " ⚠️ Node2 cluster config not ready yet"; \
fi; \
else \
echo " ✓ Node2 Cluster already running"; \
fi; \
if [ ! -f .dev/pids/ipfs-cluster-node3.pid ] || ! kill -0 $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null; then \
if [ -f $$HOME/.debros/node3/ipfs-cluster/service.json ]; then \
env IPFS_CLUSTER_PATH=$$HOME/.debros/node3/ipfs-cluster nohup ipfs-cluster-service daemon > $$HOME/.debros/logs/ipfs-cluster-node3.log 2>&1 & echo $$! > .dev/pids/ipfs-cluster-node3.pid; \
echo " Node3 Cluster started (PID: $$(cat .dev/pids/ipfs-cluster-node3.pid), API: 9114)"; \
sleep 3; \
else \
echo " ⚠️ Node3 cluster config not ready yet"; \
fi; \
else \
echo " ✓ Node3 Cluster already running"; \
fi; \
else \
echo " ⚠️ ipfs-cluster-service not found - skipping cluster daemon startup"; \
fi
@sleep 1
@echo "Starting Olric cache server..."
@if command -v olric-server >/dev/null 2>&1; then \
if [ ! -f $$HOME/.debros/olric-config.yaml ]; then \
echo " Creating Olric config..."; \
mkdir -p $$HOME/.debros; \
fi; \
if ! pgrep -f "olric-server" >/dev/null 2>&1; then \
OLRIC_SERVER_CONFIG=$$HOME/.debros/olric-config.yaml nohup olric-server > $$HOME/.debros/logs/olric.log 2>&1 & echo $$! > .dev/pids/olric.pid; \
echo " Olric cache server started (PID: $$(cat .dev/pids/olric.pid))"; \
sleep 3; \
else \
echo " ✓ Olric cache server already running"; \
fi; \
else \
echo " ⚠️ olric-server command not found - skipping Olric (cache endpoints will be disabled)"; \
echo " Install with: go install github.com/olric-data/olric/cmd/olric-server@v0.7.0"; \
fi
@sleep 1 @sleep 1
@echo "Starting gateway..." @echo "Starting gateway..."
@nohup ./bin/gateway --config gateway.yaml > $$HOME/.debros/logs/gateway.log 2>&1 & echo $$! > .dev/pids/gateway.pid @nohup ./bin/gateway --config gateway.yaml > $$HOME/.debros/logs/gateway.log 2>&1 & echo $$! > .dev/pids/gateway.pid
@ -130,6 +283,27 @@ dev: build
@if [ -f .dev/pids/anon.pid ]; then \ @if [ -f .dev/pids/anon.pid ]; then \
echo " Anon: PID=$$(cat .dev/pids/anon.pid) (SOCKS: 9050)"; \ echo " Anon: PID=$$(cat .dev/pids/anon.pid) (SOCKS: 9050)"; \
fi fi
@if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \
echo " Bootstrap IPFS: PID=$$(cat .dev/pids/ipfs-bootstrap.pid) (API: 5001)"; \
fi
@if [ -f .dev/pids/ipfs-node2.pid ]; then \
echo " Node2 IPFS: PID=$$(cat .dev/pids/ipfs-node2.pid) (API: 5002)"; \
fi
@if [ -f .dev/pids/ipfs-node3.pid ]; then \
echo " Node3 IPFS: PID=$$(cat .dev/pids/ipfs-node3.pid) (API: 5003)"; \
fi
@if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \
echo " Bootstrap Cluster: PID=$$(cat .dev/pids/ipfs-cluster-bootstrap.pid) (API: 9094)"; \
fi
@if [ -f .dev/pids/ipfs-cluster-node2.pid ]; then \
echo " Node2 Cluster: PID=$$(cat .dev/pids/ipfs-cluster-node2.pid) (API: 9104)"; \
fi
@if [ -f .dev/pids/ipfs-cluster-node3.pid ]; then \
echo " Node3 Cluster: PID=$$(cat .dev/pids/ipfs-cluster-node3.pid) (API: 9114)"; \
fi
@if [ -f .dev/pids/olric.pid ]; then \
echo " Olric: PID=$$(cat .dev/pids/olric.pid) (API: 3320)"; \
fi
@echo " Bootstrap: PID=$$(cat .dev/pids/bootstrap.pid)" @echo " Bootstrap: PID=$$(cat .dev/pids/bootstrap.pid)"
@echo " Node2: PID=$$(cat .dev/pids/node2.pid)" @echo " Node2: PID=$$(cat .dev/pids/node2.pid)"
@echo " Node3: PID=$$(cat .dev/pids/node3.pid)" @echo " Node3: PID=$$(cat .dev/pids/node3.pid)"
@ -137,6 +311,17 @@ dev: build
@echo "" @echo ""
@echo "Ports:" @echo "Ports:"
@echo " Anon SOCKS: 9050 (proxy endpoint: POST /v1/proxy/anon)" @echo " Anon SOCKS: 9050 (proxy endpoint: POST /v1/proxy/anon)"
@if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \
echo " Bootstrap IPFS API: 5001"; \
echo " Node2 IPFS API: 5002"; \
echo " Node3 IPFS API: 5003"; \
echo " Bootstrap Cluster: 9094 (pin management)"; \
echo " Node2 Cluster: 9104 (pin management)"; \
echo " Node3 Cluster: 9114 (pin management)"; \
fi
@if [ -f .dev/pids/olric.pid ]; then \
echo " Olric: 3320 (cache API)"; \
fi
@echo " Bootstrap P2P: 4001, HTTP: 5001, Raft: 7001" @echo " Bootstrap P2P: 4001, HTTP: 5001, Raft: 7001"
@echo " Node2 P2P: 4002, HTTP: 5002, Raft: 7002" @echo " Node2 P2P: 4002, HTTP: 5002, Raft: 7002"
@echo " Node3 P2P: 4003, HTTP: 5003, Raft: 7003" @echo " Node3 P2P: 4003, HTTP: 5003, Raft: 7003"
@ -145,13 +330,88 @@ dev: build
@echo "Press Ctrl+C to stop all processes" @echo "Press Ctrl+C to stop all processes"
@echo "============================================================" @echo "============================================================"
@echo "" @echo ""
@if [ -f .dev/pids/anon.pid ]; then \ @LOGS="$$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log"; \
trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \ if [ -f .dev/pids/anon.pid ]; then \
tail -f $$HOME/.debros/logs/anon.log $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \ LOGS="$$LOGS $$HOME/.debros/logs/anon.log"; \
else \ fi; \
trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \ if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \
tail -f $$HOME/.debros/logs/bootstrap.log $$HOME/.debros/logs/node2.log $$HOME/.debros/logs/node3.log $$HOME/.debros/logs/gateway.log; \ LOGS="$$LOGS $$HOME/.debros/logs/ipfs-bootstrap.log $$HOME/.debros/logs/ipfs-node2.log $$HOME/.debros/logs/ipfs-node3.log"; \
fi; \
if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \
LOGS="$$LOGS $$HOME/.debros/logs/ipfs-cluster-bootstrap.log $$HOME/.debros/logs/ipfs-cluster-node2.log $$HOME/.debros/logs/ipfs-cluster-node3.log"; \
fi; \
if [ -f .dev/pids/olric.pid ]; then \
LOGS="$$LOGS $$HOME/.debros/logs/olric.log"; \
fi; \
trap 'echo "Stopping all processes..."; kill $$(cat .dev/pids/*.pid) 2>/dev/null; rm -f .dev/pids/*.pid; exit 0' INT; \
tail -f $$LOGS
# Kill all processes
kill:
@echo "🛑 Stopping all DeBros network services..."
@echo ""
@echo "Stopping DeBros nodes and gateway..."
@if [ -f .dev/pids/gateway.pid ]; then \
kill -TERM $$(cat .dev/pids/gateway.pid) 2>/dev/null && echo " ✓ Gateway stopped" || echo " ✗ Gateway not running"; \
rm -f .dev/pids/gateway.pid; \
fi fi
@if [ -f .dev/pids/bootstrap.pid ]; then \
kill -TERM $$(cat .dev/pids/bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap node stopped" || echo " ✗ Bootstrap not running"; \
rm -f .dev/pids/bootstrap.pid; \
fi
@if [ -f .dev/pids/node2.pid ]; then \
kill -TERM $$(cat .dev/pids/node2.pid) 2>/dev/null && echo " ✓ Node2 stopped" || echo " ✗ Node2 not running"; \
rm -f .dev/pids/node2.pid; \
fi
@if [ -f .dev/pids/node3.pid ]; then \
kill -TERM $$(cat .dev/pids/node3.pid) 2>/dev/null && echo " ✓ Node3 stopped" || echo " ✗ Node3 not running"; \
rm -f .dev/pids/node3.pid; \
fi
@echo ""
@echo "Stopping IPFS Cluster peers..."
@if [ -f .dev/pids/ipfs-cluster-bootstrap.pid ]; then \
kill -TERM $$(cat .dev/pids/ipfs-cluster-bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap Cluster stopped" || echo " ✗ Bootstrap Cluster not running"; \
rm -f .dev/pids/ipfs-cluster-bootstrap.pid; \
fi
@if [ -f .dev/pids/ipfs-cluster-node2.pid ]; then \
kill -TERM $$(cat .dev/pids/ipfs-cluster-node2.pid) 2>/dev/null && echo " ✓ Node2 Cluster stopped" || echo " ✗ Node2 Cluster not running"; \
rm -f .dev/pids/ipfs-cluster-node2.pid; \
fi
@if [ -f .dev/pids/ipfs-cluster-node3.pid ]; then \
kill -TERM $$(cat .dev/pids/ipfs-cluster-node3.pid) 2>/dev/null && echo " ✓ Node3 Cluster stopped" || echo " ✗ Node3 Cluster not running"; \
rm -f .dev/pids/ipfs-cluster-node3.pid; \
fi
@echo ""
@echo "Stopping IPFS daemons..."
@if [ -f .dev/pids/ipfs-bootstrap.pid ]; then \
kill -TERM $$(cat .dev/pids/ipfs-bootstrap.pid) 2>/dev/null && echo " ✓ Bootstrap IPFS stopped" || echo " ✗ Bootstrap IPFS not running"; \
rm -f .dev/pids/ipfs-bootstrap.pid; \
fi
@if [ -f .dev/pids/ipfs-node2.pid ]; then \
kill -TERM $$(cat .dev/pids/ipfs-node2.pid) 2>/dev/null && echo " ✓ Node2 IPFS stopped" || echo " ✗ Node2 IPFS not running"; \
rm -f .dev/pids/ipfs-node2.pid; \
fi
@if [ -f .dev/pids/ipfs-node3.pid ]; then \
kill -TERM $$(cat .dev/pids/ipfs-node3.pid) 2>/dev/null && echo " ✓ Node3 IPFS stopped" || echo " ✗ Node3 IPFS not running"; \
rm -f .dev/pids/ipfs-node3.pid; \
fi
@echo ""
@echo "Stopping Olric cache..."
@if [ -f .dev/pids/olric.pid ]; then \
kill -TERM $$(cat .dev/pids/olric.pid) 2>/dev/null && echo " ✓ Olric stopped" || echo " ✗ Olric not running"; \
rm -f .dev/pids/olric.pid; \
fi
@echo ""
@echo "Stopping Anon proxy..."
@if [ -f .dev/pids/anyone.pid ]; then \
kill -TERM $$(cat .dev/pids/anyone.pid) 2>/dev/null && echo " ✓ Anon proxy stopped" || echo " ✗ Anon proxy not running"; \
rm -f .dev/pids/anyone.pid; \
fi
@echo ""
@echo "Cleaning up any remaining processes on ports..."
@lsof -ti:7001,7002,7003,5001,5002,5003,6001,4001,4002,4003,9050,3320,3322,9094,9095,9096,9097,9104,9105,9106,9107,9114,9115,9116,9117,8080,8081,8082 2>/dev/null | xargs kill -9 2>/dev/null && echo " ✓ Cleaned up remaining port bindings" || echo " ✓ No lingering processes found"
@echo ""
@echo "✅ All services stopped!"
# Help # Help
help: help:
@ -204,6 +464,7 @@ help:
@echo " vet - Vet code" @echo " vet - Vet code"
@echo " lint - Lint code (fmt + vet)" @echo " lint - Lint code (fmt + vet)"
@echo " clear-ports - Clear common dev ports" @echo " clear-ports - Clear common dev ports"
@echo " kill - Stop all running services (nodes, IPFS, cluster, gateway, olric)"
@echo " dev-setup - Setup development environment" @echo " dev-setup - Setup development environment"
@echo " dev-cluster - Show cluster startup commands" @echo " dev-cluster - Show cluster startup commands"
@echo " dev - Full development workflow" @echo " dev - Full development workflow"

View File

@ -68,7 +68,7 @@ Use `make dev` for the complete stack or run binaries individually with `go run
All runtime configuration lives in `~/.debros/`. All runtime configuration lives in `~/.debros/`.
- `bootstrap.yaml`: `type: bootstrap`, blank `database.rqlite_join_address` - `bootstrap.yaml`: `type: bootstrap`, blank `database.rqlite_join_address`
- `node*.yaml`: `type: node`, set `database.rqlite_join_address` (e.g. `127.0.0.1:7001`) and include the bootstrap `discovery.bootstrap_peers` - `node*.yaml`: `type: node`, set `database.rqlite_join_address` (e.g. `localhost:7001`) and include the bootstrap `discovery.bootstrap_peers`
- `gateway.yaml`: configure `gateway.bootstrap_peers`, `gateway.namespace`, and optional auth flags - `gateway.yaml`: configure `gateway.bootstrap_peers`, `gateway.namespace`, and optional auth flags
Validation reminders: Validation reminders:
@ -127,7 +127,7 @@ Environment overrides:
```bash ```bash
export GATEWAY_ADDR="0.0.0.0:6001" export GATEWAY_ADDR="0.0.0.0:6001"
export GATEWAY_NAMESPACE="my-app" export GATEWAY_NAMESPACE="my-app"
export GATEWAY_BOOTSTRAP_PEERS="/ip4/127.0.0.1/tcp/4001/p2p/<peerID>" export GATEWAY_BOOTSTRAP_PEERS="/ip4/localhost/tcp/4001/p2p/<peerID>"
export GATEWAY_REQUIRE_AUTH=true export GATEWAY_REQUIRE_AUTH=true
export GATEWAY_API_KEYS="key1:namespace1,key2:namespace2" export GATEWAY_API_KEYS="key1:namespace1,key2:namespace2"
``` ```
@ -139,6 +139,7 @@ Common endpoints (see `openapi/gateway.yaml` for the full spec):
- `POST /v1/rqlite/exec`, `POST /v1/rqlite/find`, `POST /v1/rqlite/select`, `POST /v1/rqlite/transaction` - `POST /v1/rqlite/exec`, `POST /v1/rqlite/find`, `POST /v1/rqlite/select`, `POST /v1/rqlite/transaction`
- `GET /v1/rqlite/schema` - `GET /v1/rqlite/schema`
- `POST /v1/pubsub/publish`, `GET /v1/pubsub/topics`, `GET /v1/pubsub/ws?topic=<topic>` - `POST /v1/pubsub/publish`, `GET /v1/pubsub/topics`, `GET /v1/pubsub/ws?topic=<topic>`
- `POST /v1/storage/upload`, `POST /v1/storage/pin`, `GET /v1/storage/status/:cid`, `GET /v1/storage/get/:cid`, `DELETE /v1/storage/unpin/:cid`
## Troubleshooting ## Troubleshooting

View File

@ -51,15 +51,19 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
// Load YAML // Load YAML
type yamlCfg struct { type yamlCfg struct {
ListenAddr string `yaml:"listen_addr"` ListenAddr string `yaml:"listen_addr"`
ClientNamespace string `yaml:"client_namespace"` ClientNamespace string `yaml:"client_namespace"`
RQLiteDSN string `yaml:"rqlite_dsn"` RQLiteDSN string `yaml:"rqlite_dsn"`
BootstrapPeers []string `yaml:"bootstrap_peers"` BootstrapPeers []string `yaml:"bootstrap_peers"`
EnableHTTPS bool `yaml:"enable_https"` EnableHTTPS bool `yaml:"enable_https"`
DomainName string `yaml:"domain_name"` DomainName string `yaml:"domain_name"`
TLSCacheDir string `yaml:"tls_cache_dir"` TLSCacheDir string `yaml:"tls_cache_dir"`
OlricServers []string `yaml:"olric_servers"` OlricServers []string `yaml:"olric_servers"`
OlricTimeout string `yaml:"olric_timeout"` OlricTimeout string `yaml:"olric_timeout"`
IPFSClusterAPIURL string `yaml:"ipfs_cluster_api_url"`
IPFSAPIURL string `yaml:"ipfs_api_url"`
IPFSTimeout string `yaml:"ipfs_timeout"`
IPFSReplicationFactor int `yaml:"ipfs_replication_factor"`
} }
data, err := os.ReadFile(configPath) data, err := os.ReadFile(configPath)
@ -82,15 +86,19 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
// Build config from YAML // Build config from YAML
cfg := &gateway.Config{ cfg := &gateway.Config{
ListenAddr: ":6001", ListenAddr: ":6001",
ClientNamespace: "default", ClientNamespace: "default",
BootstrapPeers: nil, BootstrapPeers: nil,
RQLiteDSN: "", RQLiteDSN: "",
EnableHTTPS: false, EnableHTTPS: false,
DomainName: "", DomainName: "",
TLSCacheDir: "", TLSCacheDir: "",
OlricServers: nil, OlricServers: nil,
OlricTimeout: 0, OlricTimeout: 0,
IPFSClusterAPIURL: "",
IPFSAPIURL: "",
IPFSTimeout: 0,
IPFSReplicationFactor: 0,
} }
if v := strings.TrimSpace(y.ListenAddr); v != "" { if v := strings.TrimSpace(y.ListenAddr); v != "" {
@ -142,6 +150,24 @@ func parseGatewayConfig(logger *logging.ColoredLogger) *gateway.Config {
} }
} }
// IPFS configuration
if v := strings.TrimSpace(y.IPFSClusterAPIURL); v != "" {
cfg.IPFSClusterAPIURL = v
}
if v := strings.TrimSpace(y.IPFSAPIURL); v != "" {
cfg.IPFSAPIURL = v
}
if v := strings.TrimSpace(y.IPFSTimeout); v != "" {
if parsed, err := time.ParseDuration(v); err == nil {
cfg.IPFSTimeout = parsed
} else {
logger.ComponentWarn(logging.ComponentGeneral, "invalid ipfs_timeout, using default", zap.String("value", v), zap.Error(err))
}
}
if y.IPFSReplicationFactor > 0 {
cfg.IPFSReplicationFactor = y.IPFSReplicationFactor
}
// Validate configuration // Validate configuration
if errs := cfg.ValidateConfig(); len(errs) > 0 { if errs := cfg.ValidateConfig(); len(errs) > 0 {
fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs)) fmt.Fprintf(os.Stderr, "\nGateway configuration errors (%d):\n", len(errs))

View File

@ -255,10 +255,10 @@ func main() {
// Set default advertised addresses if empty // Set default advertised addresses if empty
if cfg.Discovery.HttpAdvAddress == "" { if cfg.Discovery.HttpAdvAddress == "" {
cfg.Discovery.HttpAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLitePort) cfg.Discovery.HttpAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLitePort)
} }
if cfg.Discovery.RaftAdvAddress == "" { if cfg.Discovery.RaftAdvAddress == "" {
cfg.Discovery.RaftAdvAddress = fmt.Sprintf("127.0.0.1:%d", cfg.Database.RQLiteRaftPort) cfg.Discovery.RaftAdvAddress = fmt.Sprintf("localhost:%d", cfg.Database.RQLiteRaftPort)
} }
// Validate configuration // Validate configuration

View File

@ -3,10 +3,13 @@
package e2e package e2e
import ( import (
"bytes"
"crypto/rand" "crypto/rand"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"mime/multipart"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@ -34,7 +37,7 @@ func requireAPIKey(t *testing.T) string {
} }
func gatewayBaseURL() string { func gatewayBaseURL() string {
return getEnv("GATEWAY_BASE_URL", "http://127.0.0.1:6001") return getEnv("GATEWAY_BASE_URL", "http://localhost:6001")
} }
func httpClient() *http.Client { func httpClient() *http.Client {
@ -407,6 +410,201 @@ func TestGateway_Database_RecreateWithFK(t *testing.T) {
} }
} }
func TestGateway_Storage_UploadMultipart(t *testing.T) {
key := requireAPIKey(t)
base := gatewayBaseURL()
// Create multipart form data using proper multipart writer
content := []byte("test file content for IPFS upload")
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, err := writer.CreateFormFile("file", "test.txt")
if err != nil {
t.Fatalf("create form file: %v", err)
}
if _, err := part.Write(content); err != nil {
t.Fatalf("write content: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("close writer: %v", err)
}
req, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/upload", &buf)
req.Header = authHeader(key)
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := httpClient().Do(req)
if err != nil {
t.Fatalf("upload do: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusServiceUnavailable {
t.Skip("IPFS storage not available; skipping storage tests")
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("upload status: %d, body: %s", resp.StatusCode, string(body))
}
var uploadResp struct {
Cid string `json:"cid"`
Name string `json:"name"`
Size int64 `json:"size"`
}
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
t.Fatalf("upload decode: %v", err)
}
if uploadResp.Cid == "" {
t.Fatalf("upload returned empty CID")
}
if uploadResp.Name != "test.txt" {
t.Fatalf("upload name mismatch: got %s", uploadResp.Name)
}
if uploadResp.Size == 0 {
t.Fatalf("upload size is zero")
}
// Test pinning the uploaded content
pinBody := fmt.Sprintf(`{"cid":"%s","name":"test-pinned"}`, uploadResp.Cid)
req2, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/pin", strings.NewReader(pinBody))
req2.Header = authHeader(key)
resp2, err := httpClient().Do(req2)
if err != nil {
t.Fatalf("pin do: %v", err)
}
defer resp2.Body.Close()
if resp2.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp2.Body)
t.Fatalf("pin status: %d, body: %s", resp2.StatusCode, string(body))
}
// Test getting pin status
req3, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/status/"+uploadResp.Cid, nil)
req3.Header = authHeader(key)
resp3, err := httpClient().Do(req3)
if err != nil {
t.Fatalf("status do: %v", err)
}
defer resp3.Body.Close()
if resp3.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp3.Body)
t.Fatalf("status status: %d, body: %s", resp3.StatusCode, string(body))
}
var statusResp struct {
Cid string `json:"cid"`
Status string `json:"status"`
ReplicationFactor int `json:"replication_factor"`
Peers []string `json:"peers"`
}
if err := json.NewDecoder(resp3.Body).Decode(&statusResp); err != nil {
t.Fatalf("status decode: %v", err)
}
if statusResp.Cid != uploadResp.Cid {
t.Fatalf("status CID mismatch: got %s", statusResp.Cid)
}
// Test retrieving content
req4, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/get/"+uploadResp.Cid, nil)
req4.Header = authHeader(key)
resp4, err := httpClient().Do(req4)
if err != nil {
t.Fatalf("get do: %v", err)
}
defer resp4.Body.Close()
if resp4.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp4.Body)
t.Fatalf("get status: %d, body: %s", resp4.StatusCode, string(body))
}
retrieved, err := io.ReadAll(resp4.Body)
if err != nil {
t.Fatalf("get read: %v", err)
}
if string(retrieved) != string(content) {
t.Fatalf("retrieved content mismatch: got %q", string(retrieved))
}
// Test unpinning
req5, _ := http.NewRequest(http.MethodDelete, base+"/v1/storage/unpin/"+uploadResp.Cid, nil)
req5.Header = authHeader(key)
resp5, err := httpClient().Do(req5)
if err != nil {
t.Fatalf("unpin do: %v", err)
}
defer resp5.Body.Close()
if resp5.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp5.Body)
t.Fatalf("unpin status: %d, body: %s", resp5.StatusCode, string(body))
}
}
func TestGateway_Storage_UploadJSON(t *testing.T) {
key := requireAPIKey(t)
base := gatewayBaseURL()
// Test JSON upload with base64 data
content := []byte("test json upload content")
b64 := base64.StdEncoding.EncodeToString(content)
body := fmt.Sprintf(`{"name":"test.json","data":"%s"}`, b64)
req, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/upload", strings.NewReader(body))
req.Header = authHeader(key)
resp, err := httpClient().Do(req)
if err != nil {
t.Fatalf("upload json do: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusServiceUnavailable {
t.Skip("IPFS storage not available; skipping storage tests")
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
t.Fatalf("upload json status: %d, body: %s", resp.StatusCode, string(body))
}
var uploadResp struct {
Cid string `json:"cid"`
Name string `json:"name"`
Size int64 `json:"size"`
}
if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil {
t.Fatalf("upload json decode: %v", err)
}
if uploadResp.Cid == "" {
t.Fatalf("upload json returned empty CID")
}
if uploadResp.Name != "test.json" {
t.Fatalf("upload json name mismatch: got %s", uploadResp.Name)
}
}
func TestGateway_Storage_InvalidCID(t *testing.T) {
key := requireAPIKey(t)
base := gatewayBaseURL()
// Test status with invalid CID
req, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/status/QmInvalidCID123", nil)
req.Header = authHeader(key)
resp, err := httpClient().Do(req)
if err != nil {
t.Fatalf("status invalid do: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusServiceUnavailable {
t.Skip("IPFS storage not available; skipping storage tests")
}
// Should return error but not crash
if resp.StatusCode != http.StatusNotFound && resp.StatusCode != http.StatusInternalServerError {
t.Fatalf("expected error status for invalid CID, got %d", resp.StatusCode)
}
}
func toWSURL(httpURL string) string { func toWSURL(httpURL string) string {
u, err := url.Parse(httpURL) u, err := url.Parse(httpURL)
if err != nil { if err != nil {

View File

@ -19,7 +19,7 @@ var disabled bool
func SetDisabled(v bool) { disabled = v } func SetDisabled(v bool) { disabled = v }
// Enabled reports whether Anyone proxy routing is active. // Enabled reports whether Anyone proxy routing is active.
// Defaults to true, using SOCKS5 at 127.0.0.1:9050, unless explicitly disabled // Defaults to true, using SOCKS5 at localhost:9050, unless explicitly disabled
// via SetDisabled(true) or environment variable ANYONE_DISABLE=1. // via SetDisabled(true) or environment variable ANYONE_DISABLE=1.
// ANYONE_SOCKS5 may override the proxy address. // ANYONE_SOCKS5 may override the proxy address.
func Enabled() bool { func Enabled() bool {
@ -31,7 +31,7 @@ func Enabled() bool {
// socksAddr returns the SOCKS5 address to use for proxying (host:port). // socksAddr returns the SOCKS5 address to use for proxying (host:port).
func socksAddr() string { func socksAddr() string {
return "127.0.0.1:9050" return "localhost:9050"
} }
// socksContextDialer implements tcp.ContextDialer over a SOCKS5 proxy. // socksContextDialer implements tcp.ContextDialer over a SOCKS5 proxy.
@ -57,7 +57,7 @@ func (d *socksContextDialer) DialContext(ctx context.Context, network, address s
// DialerForAddr returns a tcp.DialerForAddr that routes through the Anyone SOCKS5 proxy. // DialerForAddr returns a tcp.DialerForAddr that routes through the Anyone SOCKS5 proxy.
// It automatically BYPASSES the proxy for loopback, private, and link-local addresses // It automatically BYPASSES the proxy for loopback, private, and link-local addresses
// to allow local/dev networking (e.g. 127.0.0.1, 10.0.0.0/8, 192.168.0.0/16, fc00::/7, fe80::/10). // to allow local/dev networking (e.g. localhost, 10.0.0.0/8, 192.168.0.0/16, fc00::/7, fe80::/10).
func DialerForAddr() tcp.DialerForAddr { func DialerForAddr() tcp.DialerForAddr {
return func(raddr ma.Multiaddr) (tcp.ContextDialer, error) { return func(raddr ma.Multiaddr) (tcp.ContextDialer, error) {
// Prefer direct dialing for local/private targets // Prefer direct dialing for local/private targets

View File

@ -430,8 +430,8 @@ discovery:
%s %s
discovery_interval: "15s" discovery_interval: "15s"
bootstrap_port: %d bootstrap_port: %d
http_adv_address: "127.0.0.1:%d" http_adv_address: "localhost:%d"
raft_adv_address: "127.0.0.1:%d" raft_adv_address: "localhost:%d"
node_namespace: "default" node_namespace: "default"
security: security:
@ -477,8 +477,8 @@ discovery:
bootstrap_peers: [] bootstrap_peers: []
discovery_interval: "15s" discovery_interval: "15s"
bootstrap_port: %d bootstrap_port: %d
http_adv_address: "127.0.0.1:%d" http_adv_address: "localhost:%d"
raft_adv_address: "127.0.0.1:%d" raft_adv_address: "localhost:%d"
node_namespace: "default" node_namespace: "default"
security: security:

View File

@ -2,6 +2,9 @@ package cli
import ( import (
"bufio" "bufio"
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt" "fmt"
"net" "net"
"os" "os"
@ -63,11 +66,12 @@ func HandleSetupCommand(args []string) {
fmt.Printf(" 4. Install RQLite database\n") fmt.Printf(" 4. Install RQLite database\n")
fmt.Printf(" 5. Install Anyone Relay (Anon) for anonymous networking\n") fmt.Printf(" 5. Install Anyone Relay (Anon) for anonymous networking\n")
fmt.Printf(" 6. Install Olric cache server\n") fmt.Printf(" 6. Install Olric cache server\n")
fmt.Printf(" 7. Create directories (/home/debros/bin, /home/debros/src)\n") fmt.Printf(" 7. Install IPFS (Kubo) and IPFS Cluster\n")
fmt.Printf(" 8. Clone and build DeBros Network\n") fmt.Printf(" 8. Create directories (/home/debros/bin, /home/debros/src)\n")
fmt.Printf(" 9. Generate configuration files\n") fmt.Printf(" 9. Clone and build DeBros Network\n")
fmt.Printf(" 10. Create systemd services (debros-node, debros-gateway, debros-olric)\n") fmt.Printf(" 10. Generate configuration files\n")
fmt.Printf(" 11. Start and enable services\n") fmt.Printf(" 11. Create systemd services (debros-ipfs, debros-ipfs-cluster, debros-node, debros-gateway, debros-olric)\n")
fmt.Printf(" 12. Start and enable services\n")
fmt.Printf(strings.Repeat("=", 70) + "\n\n") fmt.Printf(strings.Repeat("=", 70) + "\n\n")
fmt.Printf("Ready to begin setup? (yes/no): ") fmt.Printf("Ready to begin setup? (yes/no): ")
@ -96,6 +100,9 @@ func HandleSetupCommand(args []string) {
// Step 4.6: Install Olric cache server // Step 4.6: Install Olric cache server
installOlric() installOlric()
// Step 4.7: Install IPFS and IPFS Cluster
installIPFS()
// Step 5: Setup directories // Step 5: Setup directories
setupDirectories() setupDirectories()
@ -123,6 +130,14 @@ func HandleSetupCommand(args []string) {
fmt.Printf("🆔 Node Peer ID: %s\n\n", peerID) fmt.Printf("🆔 Node Peer ID: %s\n\n", peerID)
} }
// Display IPFS Cluster information
fmt.Printf("IPFS Cluster Setup:\n")
fmt.Printf(" Each node runs its own IPFS Cluster peer\n")
fmt.Printf(" Cluster peers use CRDT consensus for automatic discovery\n")
fmt.Printf(" To verify cluster is working:\n")
fmt.Printf(" sudo -u debros ipfs-cluster-ctl --host http://localhost:9094 peers ls\n")
fmt.Printf(" You should see all cluster peers listed\n\n")
fmt.Printf("Service Management:\n") fmt.Printf("Service Management:\n")
fmt.Printf(" network-cli service status all\n") fmt.Printf(" network-cli service status all\n")
fmt.Printf(" network-cli service logs node --follow\n") fmt.Printf(" network-cli service logs node --follow\n")
@ -1086,26 +1101,15 @@ func installOlric() {
if err := os.MkdirAll(olricConfigDir, 0755); err == nil { if err := os.MkdirAll(olricConfigDir, 0755); err == nil {
configPath := olricConfigDir + "/config.yaml" configPath := olricConfigDir + "/config.yaml"
if _, err := os.Stat(configPath); os.IsNotExist(err) { if _, err := os.Stat(configPath); os.IsNotExist(err) {
configContent := `memberlist: configContent := `server:
bind-addr: "0.0.0.0" bindAddr: "localhost"
bind-port: 3322 bindPort: 3320
client:
bind-addr: "0.0.0.0"
bind-port: 3320
# Durability and replication configuration memberlist:
# Replicates data across entire network for fault tolerance environment: local
dmaps: bindAddr: "localhost"
default: bindPort: 3322
replication:
mode: sync # Synchronous replication for durability
replica_count: 2 # Replicate to 2 backup nodes (3 total copies: 1 primary + 2 backups)
write_quorum: 2 # Require 2 nodes to acknowledge writes
read_quorum: 1 # Read from 1 node (faster reads)
read_repair: true # Enable read-repair for consistency
# Split-brain protection
member_count_quorum: 2 # Require at least 2 nodes to operate (prevents split-brain)
` `
if err := os.WriteFile(configPath, []byte(configContent), 0644); err == nil { if err := os.WriteFile(configPath, []byte(configContent), 0644); err == nil {
exec.Command("chown", "debros:debros", configPath).Run() exec.Command("chown", "debros:debros", configPath).Run()
@ -1167,6 +1171,92 @@ func configureFirewallForOlric() {
fmt.Printf(" No active firewall detected for Olric\n") fmt.Printf(" No active firewall detected for Olric\n")
} }
func installIPFS() {
fmt.Printf("🌐 Installing IPFS (Kubo) and IPFS Cluster...\n")
// Check if IPFS is already installed
if _, err := exec.LookPath("ipfs"); err == nil {
fmt.Printf(" ✓ IPFS (Kubo) already installed\n")
} else {
fmt.Printf(" Installing IPFS (Kubo)...\n")
// Install IPFS via official installation script
cmd := exec.Command("bash", "-c", "curl -fsSL https://dist.ipfs.tech/kubo/v0.27.0/install.sh | bash")
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Failed to install IPFS: %v\n", err)
fmt.Fprintf(os.Stderr, " You may need to install IPFS manually: https://docs.ipfs.tech/install/command-line/\n")
return
}
// Make sure ipfs is in PATH
exec.Command("ln", "-sf", "/usr/local/bin/ipfs", "/usr/bin/ipfs").Run()
fmt.Printf(" ✓ IPFS (Kubo) installed\n")
}
// Check if IPFS Cluster is already installed
if _, err := exec.LookPath("ipfs-cluster-service"); err == nil {
fmt.Printf(" ✓ IPFS Cluster already installed\n")
} else {
fmt.Printf(" Installing IPFS Cluster...\n")
// Install IPFS Cluster via go install
if _, err := exec.LookPath("go"); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Go not found - cannot install IPFS Cluster. Please install Go first.\n")
return
}
cmd := exec.Command("go", "install", "github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest")
cmd.Env = append(os.Environ(), "GOBIN=/usr/local/bin")
if output, err := cmd.CombinedOutput(); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Failed to install IPFS Cluster: %v\n", err)
if len(output) > 0 {
fmt.Fprintf(os.Stderr, " Output: %s\n", string(output))
}
fmt.Fprintf(os.Stderr, " You can manually install with: go install github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-service@latest\n")
return
}
// Also install ipfs-cluster-ctl for management
exec.Command("go", "install", "github.com/ipfs-cluster/ipfs-cluster/cmd/ipfs-cluster-ctl@latest").Run()
fmt.Printf(" ✓ IPFS Cluster installed\n")
}
// Configure firewall for IPFS and Cluster
configureFirewallForIPFS()
fmt.Printf(" ✓ IPFS and IPFS Cluster setup complete\n")
}
func configureFirewallForIPFS() {
fmt.Printf(" Checking firewall configuration for IPFS...\n")
// Check for UFW
if _, err := exec.LookPath("ufw"); err == nil {
output, _ := exec.Command("ufw", "status").CombinedOutput()
if strings.Contains(string(output), "Status: active") {
fmt.Printf(" Adding UFW rules for IPFS and Cluster...\n")
exec.Command("ufw", "allow", "4001/tcp", "comment", "IPFS Swarm").Run()
exec.Command("ufw", "allow", "5001/tcp", "comment", "IPFS API").Run()
exec.Command("ufw", "allow", "9094/tcp", "comment", "IPFS Cluster API").Run()
exec.Command("ufw", "allow", "9096/tcp", "comment", "IPFS Cluster Swarm").Run()
fmt.Printf(" ✓ UFW rules added for IPFS\n")
return
}
}
// Check for firewalld
if _, err := exec.LookPath("firewall-cmd"); err == nil {
output, _ := exec.Command("firewall-cmd", "--state").CombinedOutput()
if strings.Contains(string(output), "running") {
fmt.Printf(" Adding firewalld rules for IPFS...\n")
exec.Command("firewall-cmd", "--permanent", "--add-port=4001/tcp").Run()
exec.Command("firewall-cmd", "--permanent", "--add-port=5001/tcp").Run()
exec.Command("firewall-cmd", "--permanent", "--add-port=9094/tcp").Run()
exec.Command("firewall-cmd", "--permanent", "--add-port=9096/tcp").Run()
exec.Command("firewall-cmd", "--reload").Run()
fmt.Printf(" ✓ firewalld rules added for IPFS\n")
return
}
}
fmt.Printf(" No active firewall detected for IPFS\n")
}
func setupDirectories() { func setupDirectories() {
fmt.Printf("📁 Creating directories...\n") fmt.Printf("📁 Creating directories...\n")
@ -1416,6 +1506,18 @@ func generateConfigsInteractive(force bool) {
exec.Command("chown", "debros:debros", nodeConfigPath).Run() exec.Command("chown", "debros:debros", nodeConfigPath).Run()
fmt.Printf(" ✓ Node config created: %s\n", nodeConfigPath) fmt.Printf(" ✓ Node config created: %s\n", nodeConfigPath)
// Initialize IPFS and Cluster for this node
var nodeID string
if isBootstrap {
nodeID = "bootstrap"
} else {
nodeID = "node"
}
if err := initializeIPFSForNode(nodeID, vpsIP, isBootstrap); err != nil {
fmt.Fprintf(os.Stderr, "⚠️ Failed to initialize IPFS/Cluster: %v\n", err)
fmt.Fprintf(os.Stderr, " You may need to initialize IPFS and Cluster manually\n")
}
// Generate Olric config file for this node (uses multicast discovery) // Generate Olric config file for this node (uses multicast discovery)
var olricConfigPath string var olricConfigPath string
if isBootstrap { if isBootstrap {
@ -1532,6 +1634,17 @@ database:
cluster_sync_interval: "30s" cluster_sync_interval: "30s"
peer_inactivity_limit: "24h" peer_inactivity_limit: "24h"
min_cluster_size: 1 min_cluster_size: 1
ipfs:
# IPFS Cluster API endpoint for pin management (leave empty to disable)
cluster_api_url: "http://localhost:9094"
# IPFS HTTP API endpoint for content retrieval
api_url: "http://localhost:5001"
# Timeout for IPFS operations
timeout: "60s"
# Replication factor for pinned content
replication_factor: 3
# Enable client-side encryption before upload
enable_encryption: true
discovery: discovery:
bootstrap_peers: [] bootstrap_peers: []
@ -1607,6 +1720,17 @@ database:
cluster_sync_interval: "30s" cluster_sync_interval: "30s"
peer_inactivity_limit: "24h" peer_inactivity_limit: "24h"
min_cluster_size: 1 min_cluster_size: 1
ipfs:
# IPFS Cluster API endpoint for pin management (leave empty to disable)
cluster_api_url: "http://localhost:9094"
# IPFS HTTP API endpoint for content retrieval
api_url: "http://localhost:5001"
# Timeout for IPFS operations
timeout: "60s"
# Replication factor for pinned content
replication_factor: 3
# Enable client-side encryption before upload
enable_encryption: true
discovery: discovery:
%s %s
@ -1670,13 +1794,23 @@ func generateGatewayConfigDirect(bootstrapPeers string, enableHTTPS bool, domain
olricYAML.WriteString(" - \"localhost:3320\"\n") olricYAML.WriteString(" - \"localhost:3320\"\n")
} }
// IPFS Cluster configuration (defaults - can be customized later)
ipfsYAML := `# IPFS Cluster configuration (optional)
# Uncomment and configure if you have IPFS Cluster running:
# ipfs_cluster_api_url: "http://localhost:9094"
# ipfs_api_url: "http://localhost:5001"
# ipfs_timeout: "60s"
# ipfs_replication_factor: 3
`
return fmt.Sprintf(`listen_addr: ":6001" return fmt.Sprintf(`listen_addr: ":6001"
client_namespace: "default" client_namespace: "default"
rqlite_dsn: "" rqlite_dsn: ""
%s %s
%s %s
%s %s
`, peersYAML.String(), httpsYAML.String(), olricYAML.String()) %s
`, peersYAML.String(), httpsYAML.String(), olricYAML.String(), ipfsYAML)
} }
// generateOlricConfig generates an Olric configuration file // generateOlricConfig generates an Olric configuration file
@ -1689,30 +1823,15 @@ func generateOlricConfig(configPath, bindIP string, httpPort, memberlistPort int
} }
var config strings.Builder var config strings.Builder
config.WriteString("server:\n")
config.WriteString(fmt.Sprintf(" bindAddr: \"%s\"\n", bindIP))
config.WriteString(fmt.Sprintf(" bindPort: %d\n", httpPort))
config.WriteString("\n")
config.WriteString("memberlist:\n") config.WriteString("memberlist:\n")
config.WriteString(fmt.Sprintf(" bind-addr: \"%s\"\n", bindIP)) config.WriteString(" environment: local\n")
config.WriteString(fmt.Sprintf(" bind-port: %d\n", memberlistPort)) config.WriteString(fmt.Sprintf(" bindAddr: \"%s\"\n", bindIP))
config.WriteString(" # Multicast discovery enabled - peers discovered dynamically via LibP2P network\n") config.WriteString(fmt.Sprintf(" bindPort: %d\n", memberlistPort))
config.WriteString("\n")
config.WriteString("client:\n")
config.WriteString(fmt.Sprintf(" bind-addr: \"%s\"\n", bindIP))
config.WriteString(fmt.Sprintf(" bind-port: %d\n", httpPort))
// Durability and replication settings
config.WriteString("\n# Durability and replication configuration\n")
config.WriteString("# Replicates data across entire network for fault tolerance\n")
config.WriteString("dmaps:\n")
config.WriteString(" default:\n")
config.WriteString(" replication:\n")
config.WriteString(" mode: sync # Synchronous replication for durability\n")
config.WriteString(" replica_count: 2 # Replicate to 2 backup nodes (3 total copies: 1 primary + 2 backups)\n")
config.WriteString(" write_quorum: 2 # Require 2 nodes to acknowledge writes\n")
config.WriteString(" read_quorum: 1 # Read from 1 node (faster reads)\n")
config.WriteString(" read_repair: true # Enable read-repair for consistency\n")
// Split-brain protection
config.WriteString("\n# Split-brain protection\n")
config.WriteString("member_count_quorum: 2 # Require at least 2 nodes to operate (prevents split-brain)\n")
// Write config file // Write config file
if err := os.WriteFile(configPath, []byte(config.String()), 0644); err != nil { if err := os.WriteFile(configPath, []byte(config.String()), 0644); err != nil {
@ -1724,14 +1843,381 @@ func generateOlricConfig(configPath, bindIP string, httpPort, memberlistPort int
return nil return nil
} }
// getOrGenerateClusterSecret gets or generates a shared cluster secret
func getOrGenerateClusterSecret() (string, error) {
secretPath := "/home/debros/.debros/cluster-secret"
// Try to read existing secret
if data, err := os.ReadFile(secretPath); err == nil {
secret := strings.TrimSpace(string(data))
if len(secret) == 64 {
return secret, nil
}
}
// Generate new secret (64 hex characters = 32 bytes)
bytes := make([]byte, 32)
if _, err := rand.Read(bytes); err != nil {
return "", fmt.Errorf("failed to generate cluster secret: %w", err)
}
secret := hex.EncodeToString(bytes)
// Save secret
if err := os.WriteFile(secretPath, []byte(secret), 0600); err != nil {
return "", fmt.Errorf("failed to save cluster secret: %w", err)
}
exec.Command("chown", "debros:debros", secretPath).Run()
return secret, nil
}
// getOrGenerateSwarmKey gets or generates a shared IPFS swarm key
// Returns the swarm key content as bytes (formatted for IPFS)
func getOrGenerateSwarmKey() ([]byte, error) {
secretPath := "/home/debros/.debros/swarm.key"
// Try to read existing key
if data, err := os.ReadFile(secretPath); err == nil {
// Validate it's a proper swarm key format
content := string(data)
if strings.Contains(content, "/key/swarm/psk/1.0.0/") {
return data, nil
}
}
// Generate new key (32 bytes)
keyBytes := make([]byte, 32)
if _, err := rand.Read(keyBytes); err != nil {
return nil, fmt.Errorf("failed to generate swarm key: %w", err)
}
// Format as IPFS swarm key file
keyHex := strings.ToUpper(hex.EncodeToString(keyBytes))
content := fmt.Sprintf("/key/swarm/psk/1.0.0/\n/base16/\n%s\n", keyHex)
// Save key
if err := os.WriteFile(secretPath, []byte(content), 0600); err != nil {
return nil, fmt.Errorf("failed to save swarm key: %w", err)
}
exec.Command("chown", "debros:debros", secretPath).Run()
fmt.Printf(" ✓ Generated private swarm key\n")
return []byte(content), nil
}
// ensureSwarmKey ensures the swarm key exists in the IPFS repo
func ensureSwarmKey(repoPath string, swarmKey []byte) error {
swarmKeyPath := filepath.Join(repoPath, "swarm.key")
// Check if swarm key already exists
if _, err := os.Stat(swarmKeyPath); err == nil {
// Verify it matches (optional: could compare content)
return nil
}
// Create swarm key file in repo
if err := os.WriteFile(swarmKeyPath, swarmKey, 0600); err != nil {
return fmt.Errorf("failed to write swarm key to repo: %w", err)
}
// Fix ownership
exec.Command("chown", "debros:debros", swarmKeyPath).Run()
return nil
}
// initializeIPFSForNode initializes IPFS and IPFS Cluster for a node
func initializeIPFSForNode(nodeID, vpsIP string, isBootstrap bool) error {
fmt.Printf(" Initializing IPFS and Cluster for node %s...\n", nodeID)
// Get or generate cluster secret
secret, err := getOrGenerateClusterSecret()
if err != nil {
return fmt.Errorf("failed to get cluster secret: %w", err)
}
// Get or generate swarm key for private network
swarmKey, err := getOrGenerateSwarmKey()
if err != nil {
return fmt.Errorf("failed to get swarm key: %w", err)
}
// Determine data directories
var ipfsDataDir, clusterDataDir string
if nodeID == "bootstrap" {
ipfsDataDir = "/home/debros/.debros/bootstrap/ipfs"
clusterDataDir = "/home/debros/.debros/bootstrap/ipfs-cluster"
} else {
ipfsDataDir = "/home/debros/.debros/node/ipfs"
clusterDataDir = "/home/debros/.debros/node/ipfs-cluster"
}
// Create directories
os.MkdirAll(ipfsDataDir, 0755)
os.MkdirAll(clusterDataDir, 0755)
exec.Command("chown", "-R", "debros:debros", ipfsDataDir).Run()
exec.Command("chown", "-R", "debros:debros", clusterDataDir).Run()
// Initialize IPFS if not already initialized
ipfsRepoPath := filepath.Join(ipfsDataDir, "repo")
if _, err := os.Stat(filepath.Join(ipfsRepoPath, "config")); os.IsNotExist(err) {
fmt.Printf(" Initializing IPFS repository...\n")
cmd := exec.Command("sudo", "-u", "debros", "ipfs", "init", "--profile=server", "--repo-dir="+ipfsRepoPath)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to initialize IPFS: %v\n%s", err, string(output))
}
// Ensure swarm key is in place (creates private network)
if err := ensureSwarmKey(ipfsRepoPath, swarmKey); err != nil {
return fmt.Errorf("failed to set swarm key: %w", err)
}
// Configure IPFS API and Gateway addresses
exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.API", `["/ip4/localhost/tcp/5001"]`, "--repo-dir="+ipfsRepoPath).Run()
exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.Gateway", `["/ip4/localhost/tcp/8080"]`, "--repo-dir="+ipfsRepoPath).Run()
exec.Command("sudo", "-u", "debros", "ipfs", "config", "--json", "Addresses.Swarm", `["/ip4/0.0.0.0/tcp/4001","/ip6/::/tcp/4001"]`, "--repo-dir="+ipfsRepoPath).Run()
fmt.Printf(" ✓ IPFS initialized with private swarm key\n")
} else {
// Repo exists, but ensure swarm key is present
if err := ensureSwarmKey(ipfsRepoPath, swarmKey); err != nil {
return fmt.Errorf("failed to set swarm key: %w", err)
}
fmt.Printf(" ✓ IPFS repository already exists, swarm key ensured\n")
}
// Initialize IPFS Cluster if not already initialized
clusterConfigPath := filepath.Join(clusterDataDir, "service.json")
if _, err := os.Stat(clusterConfigPath); os.IsNotExist(err) {
fmt.Printf(" Initializing IPFS Cluster...\n")
// Generate cluster config
clusterConfig := generateClusterServiceConfig(nodeID, vpsIP, secret, isBootstrap)
// Write config
configJSON, err := json.MarshalIndent(clusterConfig, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal cluster config: %w", err)
}
if err := os.WriteFile(clusterConfigPath, configJSON, 0644); err != nil {
return fmt.Errorf("failed to write cluster config: %w", err)
}
exec.Command("chown", "debros:debros", clusterConfigPath).Run()
fmt.Printf(" ✓ IPFS Cluster initialized\n")
}
return nil
}
// getClusterPeerID gets the cluster peer ID from a running cluster service
func getClusterPeerID(clusterAPIURL string) (string, error) {
cmd := exec.Command("ipfs-cluster-ctl", "--host", clusterAPIURL, "id")
output, err := cmd.CombinedOutput()
if err != nil {
return "", fmt.Errorf("failed to get cluster peer ID: %v\n%s", err, string(output))
}
// Parse output to extract peer ID
// Output format: "12D3KooW..."
lines := strings.Split(string(output), "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "12D3Koo") {
return line, nil
}
}
return "", fmt.Errorf("could not parse cluster peer ID from output: %s", string(output))
}
// getClusterPeerMultiaddr constructs the cluster peer multiaddr
func getClusterPeerMultiaddr(vpsIP, peerID string) string {
return fmt.Sprintf("/ip4/%s/tcp/9096/p2p/%s", vpsIP, peerID)
}
// clusterServiceConfig represents IPFS Cluster service.json structure
type clusterServiceConfig struct {
Cluster clusterConfig `json:"cluster"`
Consensus consensusConfig `json:"consensus"`
API apiConfig `json:"api"`
IPFSConnector ipfsConnectorConfig `json:"ipfs_connector"`
Datastore datastoreConfig `json:"datastore"`
}
type clusterConfig struct {
ID string `json:"id"`
PrivateKey string `json:"private_key"`
Secret string `json:"secret"`
Peername string `json:"peername"`
Bootstrap []string `json:"bootstrap"`
LeaveOnShutdown bool `json:"leave_on_shutdown"`
ListenMultiaddr string `json:"listen_multiaddress"`
ConnectionManager connectionManagerConfig `json:"connection_manager"`
}
type connectionManagerConfig struct {
LowWater int `json:"low_water"`
HighWater int `json:"high_water"`
GracePeriod string `json:"grace_period"`
}
type consensusConfig struct {
CRDT crdtConfig `json:"crdt"`
}
type crdtConfig struct {
ClusterName string `json:"cluster_name"`
TrustedPeers []string `json:"trusted_peers"`
}
type apiConfig struct {
RestAPI restAPIConfig `json:"restapi"`
}
type restAPIConfig struct {
HTTPListenMultiaddress string `json:"http_listen_multiaddress"`
ID string `json:"id"`
BasicAuthCredentials interface{} `json:"basic_auth_credentials"`
}
type ipfsConnectorConfig struct {
IPFSHTTP ipfsHTTPConfig `json:"ipfshttp"`
}
type ipfsHTTPConfig struct {
NodeMultiaddress string `json:"node_multiaddress"`
}
type datastoreConfig struct {
Type string `json:"type"`
Path string `json:"path"`
}
// generateClusterServiceConfig generates IPFS Cluster service.json config
func generateClusterServiceConfig(nodeID, vpsIP, secret string, isBootstrap bool) clusterServiceConfig {
clusterListenAddr := "/ip4/0.0.0.0/tcp/9096"
restAPIListenAddr := "/ip4/0.0.0.0/tcp/9094"
// For bootstrap node, use empty bootstrap list
// For other nodes, bootstrap list will be set when starting the service
bootstrap := []string{}
return clusterServiceConfig{
Cluster: clusterConfig{
Peername: nodeID,
Secret: secret,
Bootstrap: bootstrap,
LeaveOnShutdown: false,
ListenMultiaddr: clusterListenAddr,
ConnectionManager: connectionManagerConfig{
LowWater: 50,
HighWater: 200,
GracePeriod: "20s",
},
},
Consensus: consensusConfig{
CRDT: crdtConfig{
ClusterName: "debros-cluster",
TrustedPeers: []string{"*"}, // Trust all peers
},
},
API: apiConfig{
RestAPI: restAPIConfig{
HTTPListenMultiaddress: restAPIListenAddr,
ID: "",
BasicAuthCredentials: nil,
},
},
IPFSConnector: ipfsConnectorConfig{
IPFSHTTP: ipfsHTTPConfig{
NodeMultiaddress: "/ip4/localhost/tcp/5001",
},
},
Datastore: datastoreConfig{
Type: "badger",
Path: fmt.Sprintf("/home/debros/.debros/%s/ipfs-cluster/badger", nodeID),
},
}
}
func createSystemdServices() { func createSystemdServices() {
fmt.Printf("🔧 Creating systemd services...\n") fmt.Printf("🔧 Creating systemd services...\n")
// IPFS service (runs on all nodes)
ipfsService := `[Unit]
Description=IPFS Daemon
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=debros
Group=debros
Environment=HOME=/home/debros
ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/node.yaml ]; then export IPFS_PATH=/home/debros/.debros/node/ipfs/repo; elif [ -f /home/debros/.debros/bootstrap.yaml ]; then export IPFS_PATH=/home/debros/.debros/bootstrap/ipfs/repo; else export IPFS_PATH=/home/debros/.debros/bootstrap/ipfs/repo; fi'
ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/swarm.key ] && [ ! -f ${IPFS_PATH}/swarm.key ]; then cp /home/debros/.debros/swarm.key ${IPFS_PATH}/swarm.key && chmod 600 ${IPFS_PATH}/swarm.key; fi'
ExecStart=/usr/bin/ipfs daemon --enable-pubsub-experiment --repo-dir=${IPFS_PATH}
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=ipfs
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=/home/debros
[Install]
WantedBy=multi-user.target
`
if err := os.WriteFile("/etc/systemd/system/debros-ipfs.service", []byte(ipfsService), 0644); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to create IPFS service: %v\n", err)
os.Exit(1)
}
// IPFS Cluster service (runs on all nodes)
clusterService := `[Unit]
Description=IPFS Cluster Service
After=debros-ipfs.service
Wants=debros-ipfs.service
Requires=debros-ipfs.service
[Service]
Type=simple
User=debros
Group=debros
WorkingDirectory=/home/debros
Environment=HOME=/home/debros
ExecStartPre=/bin/bash -c 'if [ -f /home/debros/.debros/node.yaml ]; then export CLUSTER_PATH=/home/debros/.debros/node/ipfs-cluster; elif [ -f /home/debros/.debros/bootstrap.yaml ]; then export CLUSTER_PATH=/home/debros/.debros/bootstrap/ipfs-cluster; else export CLUSTER_PATH=/home/debros/.debros/bootstrap/ipfs-cluster; fi'
ExecStart=/usr/local/bin/ipfs-cluster-service daemon --config ${CLUSTER_PATH}/service.json
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=ipfs-cluster
NoNewPrivileges=yes
PrivateTmp=yes
ProtectSystem=strict
ReadWritePaths=/home/debros
[Install]
WantedBy=multi-user.target
`
if err := os.WriteFile("/etc/systemd/system/debros-ipfs-cluster.service", []byte(clusterService), 0644); err != nil {
fmt.Fprintf(os.Stderr, "❌ Failed to create IPFS Cluster service: %v\n", err)
os.Exit(1)
}
// Node service // Node service
nodeService := `[Unit] nodeService := `[Unit]
Description=DeBros Network Node Description=DeBros Network Node
After=network-online.target After=network-online.target debros-ipfs-cluster.service
Wants=network-online.target Wants=network-online.target debros-ipfs-cluster.service
Requires=debros-ipfs-cluster.service
[Service] [Service]
Type=simple Type=simple
@ -1801,6 +2287,8 @@ WantedBy=multi-user.target
// Reload systemd // Reload systemd
exec.Command("systemctl", "daemon-reload").Run() exec.Command("systemctl", "daemon-reload").Run()
exec.Command("systemctl", "enable", "debros-ipfs").Run()
exec.Command("systemctl", "enable", "debros-ipfs-cluster").Run()
exec.Command("systemctl", "enable", "debros-node").Run() exec.Command("systemctl", "enable", "debros-node").Run()
exec.Command("systemctl", "enable", "debros-gateway").Run() exec.Command("systemctl", "enable", "debros-gateway").Run()
@ -1835,6 +2323,18 @@ func startServices() {
} }
} }
// Start IPFS first (required by Cluster)
startOrRestartService("debros-ipfs")
// Wait a bit for IPFS to start
time.Sleep(2 * time.Second)
// Start IPFS Cluster (required by Node)
startOrRestartService("debros-ipfs-cluster")
// Wait a bit for Cluster to start
time.Sleep(2 * time.Second)
// Start or restart node service // Start or restart node service
startOrRestartService("debros-node") startOrRestartService("debros-node")

View File

@ -35,6 +35,7 @@ type Client struct {
database *DatabaseClientImpl database *DatabaseClientImpl
network *NetworkInfoImpl network *NetworkInfoImpl
pubsub *pubSubBridge pubsub *pubSubBridge
storage *StorageClientImpl
// State // State
connected bool connected bool
@ -70,6 +71,7 @@ func NewClient(config *ClientConfig) (NetworkClient, error) {
// Initialize components (will be configured when connected) // Initialize components (will be configured when connected)
client.database = &DatabaseClientImpl{client: client} client.database = &DatabaseClientImpl{client: client}
client.network = &NetworkInfoImpl{client: client} client.network = &NetworkInfoImpl{client: client}
client.storage = &StorageClientImpl{client: client}
return client, nil return client, nil
} }
@ -89,6 +91,11 @@ func (c *Client) Network() NetworkInfo {
return c.network return c.network
} }
// Storage returns the storage client
func (c *Client) Storage() StorageClient {
return c.storage
}
// Config returns a snapshot copy of the client's configuration // Config returns a snapshot copy of the client's configuration
func (c *Client) Config() *ClientConfig { func (c *Client) Config() *ClientConfig {
c.mu.RLock() c.mu.RLock()

View File

@ -50,7 +50,10 @@ func TestNormalizeEndpoints(t *testing.T) {
} }
func TestEndpointFromMultiaddr(t *testing.T) { func TestEndpointFromMultiaddr(t *testing.T) {
ma, _ := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4001") ma, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4001")
if err != nil {
t.Fatalf("failed to create multiaddr: %v", err)
}
if ep := endpointFromMultiaddr(ma, 5001); ep != "http://127.0.0.1:5001" { if ep := endpointFromMultiaddr(ma, 5001); ep != "http://127.0.0.1:5001" {
t.Fatalf("unexpected endpoint: %s", ep) t.Fatalf("unexpected endpoint: %s", ep)
} }

View File

@ -3,6 +3,7 @@ package client
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"time" "time"
) )
@ -17,6 +18,9 @@ type NetworkClient interface {
// Network information // Network information
Network() NetworkInfo Network() NetworkInfo
// Storage operations (IPFS)
Storage() StorageClient
// Lifecycle // Lifecycle
Connect() error Connect() error
Disconnect() error Disconnect() error
@ -51,6 +55,24 @@ type NetworkInfo interface {
DisconnectFromPeer(ctx context.Context, peerID string) error DisconnectFromPeer(ctx context.Context, peerID string) error
} }
// StorageClient provides IPFS storage operations
type StorageClient interface {
// Upload uploads content to IPFS and pins it
Upload(ctx context.Context, reader io.Reader, name string) (*StorageUploadResult, error)
// Pin pins an existing CID
Pin(ctx context.Context, cid string, name string) (*StoragePinResult, error)
// Status gets the pin status for a CID
Status(ctx context.Context, cid string) (*StorageStatus, error)
// Get retrieves content from IPFS by CID
Get(ctx context.Context, cid string) (io.ReadCloser, error)
// Unpin removes a pin from a CID
Unpin(ctx context.Context, cid string) error
}
// MessageHandler is called when a pub/sub message is received // MessageHandler is called when a pub/sub message is received
type MessageHandler func(topic string, data []byte) error type MessageHandler func(topic string, data []byte) error
@ -107,12 +129,38 @@ type HealthStatus struct {
ResponseTime time.Duration `json:"response_time"` ResponseTime time.Duration `json:"response_time"`
} }
// StorageUploadResult represents the result of uploading content to IPFS
type StorageUploadResult struct {
Cid string `json:"cid"`
Name string `json:"name"`
Size int64 `json:"size"`
}
// StoragePinResult represents the result of pinning a CID
type StoragePinResult struct {
Cid string `json:"cid"`
Name string `json:"name"`
}
// StorageStatus represents the status of a pinned CID
type StorageStatus struct {
Cid string `json:"cid"`
Name string `json:"name"`
Status string `json:"status"` // "pinned", "pinning", "queued", "unpinned", "error"
ReplicationMin int `json:"replication_min"`
ReplicationMax int `json:"replication_max"`
ReplicationFactor int `json:"replication_factor"`
Peers []string `json:"peers"`
Error string `json:"error,omitempty"`
}
// ClientConfig represents configuration for network clients // ClientConfig represents configuration for network clients
type ClientConfig struct { type ClientConfig struct {
AppName string `json:"app_name"` AppName string `json:"app_name"`
DatabaseName string `json:"database_name"` DatabaseName string `json:"database_name"`
BootstrapPeers []string `json:"bootstrap_peers"` BootstrapPeers []string `json:"bootstrap_peers"`
DatabaseEndpoints []string `json:"database_endpoints"` DatabaseEndpoints []string `json:"database_endpoints"`
GatewayURL string `json:"gateway_url"` // Gateway URL for HTTP API access (e.g., "http://localhost:6001")
ConnectTimeout time.Duration `json:"connect_timeout"` ConnectTimeout time.Duration `json:"connect_timeout"`
RetryAttempts int `json:"retry_attempts"` RetryAttempts int `json:"retry_attempts"`
RetryDelay time.Duration `json:"retry_delay"` RetryDelay time.Duration `json:"retry_delay"`
@ -132,6 +180,7 @@ func DefaultClientConfig(appName string) *ClientConfig {
DatabaseName: fmt.Sprintf("%s_db", appName), DatabaseName: fmt.Sprintf("%s_db", appName),
BootstrapPeers: peers, BootstrapPeers: peers,
DatabaseEndpoints: endpoints, DatabaseEndpoints: endpoints,
GatewayURL: "http://localhost:6001",
ConnectTimeout: time.Second * 30, ConnectTimeout: time.Second * 30,
RetryAttempts: 3, RetryAttempts: 3,
RetryDelay: time.Second * 5, RetryDelay: time.Second * 5,

View File

@ -0,0 +1,245 @@
package client
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"strings"
"time"
)
// StorageClientImpl implements StorageClient using HTTP requests to the gateway
type StorageClientImpl struct {
client *Client
}
// Upload uploads content to IPFS and pins it
func (s *StorageClientImpl) Upload(ctx context.Context, reader io.Reader, name string) (*StorageUploadResult, error) {
if err := s.client.requireAccess(ctx); err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
// Create multipart form
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
// Add file field
part, err := writer.CreateFormFile("file", name)
if err != nil {
return nil, fmt.Errorf("failed to create form file: %w", err)
}
if _, err := io.Copy(part, reader); err != nil {
return nil, fmt.Errorf("failed to copy data: %w", err)
}
if err := writer.Close(); err != nil {
return nil, fmt.Errorf("failed to close writer: %w", err)
}
// Create request
req, err := http.NewRequestWithContext(ctx, "POST", gatewayURL+"/v1/storage/upload", &buf)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
s.addAuthHeaders(req)
// Execute request
client := &http.Client{Timeout: 5 * time.Minute} // Large timeout for file uploads
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(body))
}
var result StorageUploadResult
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
return &result, nil
}
// Pin pins an existing CID
func (s *StorageClientImpl) Pin(ctx context.Context, cid string, name string) (*StoragePinResult, error) {
if err := s.client.requireAccess(ctx); err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
reqBody := map[string]interface{}{
"cid": cid,
}
if name != "" {
reqBody["name"] = name
}
jsonBody, err := json.Marshal(reqBody)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", gatewayURL+"/v1/storage/pin", bytes.NewReader(jsonBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
s.addAuthHeaders(req)
client := &http.Client{Timeout: 60 * time.Second}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("pin failed with status %d: %s", resp.StatusCode, string(body))
}
var result StoragePinResult
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
return &result, nil
}
// Status gets the pin status for a CID
func (s *StorageClientImpl) Status(ctx context.Context, cid string) (*StorageStatus, error) {
if err := s.client.requireAccess(ctx); err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
req, err := http.NewRequestWithContext(ctx, "GET", gatewayURL+"/v1/storage/status/"+cid, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
s.addAuthHeaders(req)
client := &http.Client{Timeout: 30 * time.Second}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("status failed with status %d: %s", resp.StatusCode, string(body))
}
var result StorageStatus
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
return &result, nil
}
// Get retrieves content from IPFS by CID
func (s *StorageClientImpl) Get(ctx context.Context, cid string) (io.ReadCloser, error) {
if err := s.client.requireAccess(ctx); err != nil {
return nil, fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
req, err := http.NewRequestWithContext(ctx, "GET", gatewayURL+"/v1/storage/get/"+cid, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
s.addAuthHeaders(req)
client := &http.Client{Timeout: 5 * time.Minute} // Large timeout for file downloads
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return nil, fmt.Errorf("get failed with status %d", resp.StatusCode)
}
return resp.Body, nil
}
// Unpin removes a pin from a CID
func (s *StorageClientImpl) Unpin(ctx context.Context, cid string) error {
if err := s.client.requireAccess(ctx); err != nil {
return fmt.Errorf("authentication required: %w", err)
}
gatewayURL := s.getGatewayURL()
req, err := http.NewRequestWithContext(ctx, "DELETE", gatewayURL+"/v1/storage/unpin/"+cid, nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
s.addAuthHeaders(req)
client := &http.Client{Timeout: 30 * time.Second}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("unpin failed with status %d: %s", resp.StatusCode, string(body))
}
return nil
}
// getGatewayURL returns the gateway URL from config, defaulting to localhost:6001
func (s *StorageClientImpl) getGatewayURL() string {
cfg := s.client.Config()
if cfg != nil && cfg.GatewayURL != "" {
return strings.TrimSuffix(cfg.GatewayURL, "/")
}
return "http://localhost:6001"
}
// addAuthHeaders adds authentication headers to the request
func (s *StorageClientImpl) addAuthHeaders(req *http.Request) {
cfg := s.client.Config()
if cfg == nil {
return
}
// Prefer JWT if available
if cfg.JWT != "" {
req.Header.Set("Authorization", "Bearer "+cfg.JWT)
return
}
// Fallback to API key
if cfg.APIKey != "" {
req.Header.Set("Authorization", "Bearer "+cfg.APIKey)
req.Header.Set("X-API-Key", cfg.APIKey)
}
}

View File

@ -0,0 +1,378 @@
package client
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestStorageClientImpl_Upload(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmUpload123"
expectedName := "test.txt"
expectedSize := int64(100)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/v1/storage/upload" {
t.Errorf("Expected path '/v1/storage/upload', got %s", r.URL.Path)
}
// Verify multipart form
if err := r.ParseMultipartForm(32 << 20); err != nil {
t.Errorf("Failed to parse multipart form: %v", err)
return
}
file, header, err := r.FormFile("file")
if err != nil {
t.Errorf("Failed to get file: %v", err)
return
}
defer file.Close()
if header.Filename != expectedName {
t.Errorf("Expected filename %s, got %s", expectedName, header.Filename)
}
response := StorageUploadResult{
Cid: expectedCID,
Name: expectedName,
Size: expectedSize,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
reader := strings.NewReader("test content")
result, err := storage.Upload(context.Background(), reader, expectedName)
if err != nil {
t.Fatalf("Failed to upload: %v", err)
}
if result.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, result.Cid)
}
if result.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, result.Name)
}
if result.Size != expectedSize {
t.Errorf("Expected size %d, got %d", expectedSize, result.Size)
}
})
t.Run("server_error", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("internal error"))
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
reader := strings.NewReader("test")
_, err := storage.Upload(context.Background(), reader, "test.txt")
if err == nil {
t.Error("Expected error for server error")
}
})
t.Run("missing_credentials", func(t *testing.T) {
cfg := &ClientConfig{
GatewayURL: "http://localhost:6001",
// No AppName, JWT, or APIKey
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
reader := strings.NewReader("test")
_, err := storage.Upload(context.Background(), reader, "test.txt")
if err == nil {
t.Error("Expected error for missing credentials")
}
})
}
func TestStorageClientImpl_Pin(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmPin123"
expectedName := "pinned-file"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/v1/storage/pin" {
t.Errorf("Expected path '/v1/storage/pin', got %s", r.URL.Path)
}
var reqBody map[string]interface{}
if err := json.NewDecoder(r.Body).Decode(&reqBody); err != nil {
t.Errorf("Failed to decode request: %v", err)
return
}
if reqBody["cid"] != expectedCID {
t.Errorf("Expected CID %s, got %v", expectedCID, reqBody["cid"])
}
response := StoragePinResult{
Cid: expectedCID,
Name: expectedName,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
result, err := storage.Pin(context.Background(), expectedCID, expectedName)
if err != nil {
t.Fatalf("Failed to pin: %v", err)
}
if result.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, result.Cid)
}
if result.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, result.Name)
}
})
}
func TestStorageClientImpl_Status(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmStatus123"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/v1/storage/status/") {
t.Errorf("Expected path '/v1/storage/status/', got %s", r.URL.Path)
}
response := StorageStatus{
Cid: expectedCID,
Name: "test-file",
Status: "pinned",
ReplicationMin: 3,
ReplicationMax: 3,
ReplicationFactor: 3,
Peers: []string{"peer1", "peer2", "peer3"},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
status, err := storage.Status(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to get status: %v", err)
}
if status.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, status.Cid)
}
if status.Status != "pinned" {
t.Errorf("Expected status 'pinned', got %s", status.Status)
}
})
}
func TestStorageClientImpl_Get(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmGet123"
expectedContent := "test content"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/v1/storage/get/") {
t.Errorf("Expected path '/v1/storage/get/', got %s", r.URL.Path)
}
w.Write([]byte(expectedContent))
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
reader, err := storage.Get(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to get content: %v", err)
}
defer reader.Close()
data, err := io.ReadAll(reader)
if err != nil {
t.Fatalf("Failed to read content: %v", err)
}
if string(data) != expectedContent {
t.Errorf("Expected content %s, got %s", expectedContent, string(data))
}
})
}
func TestStorageClientImpl_Unpin(t *testing.T) {
t.Run("success", func(t *testing.T) {
expectedCID := "QmUnpin123"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/v1/storage/unpin/") {
t.Errorf("Expected path '/v1/storage/unpin/', got %s", r.URL.Path)
}
if r.Method != "DELETE" {
t.Errorf("Expected method DELETE, got %s", r.Method)
}
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
cfg := &ClientConfig{
GatewayURL: server.URL,
AppName: "test-app",
APIKey: "ak_test:test-app", // Required for requireAccess check
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
err := storage.Unpin(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to unpin: %v", err)
}
})
}
func TestStorageClientImpl_getGatewayURL(t *testing.T) {
storage := &StorageClientImpl{}
t.Run("from_config", func(t *testing.T) {
cfg := &ClientConfig{GatewayURL: "http://custom:6001"}
client := &Client{config: cfg}
storage.client = client
url := storage.getGatewayURL()
if url != "http://custom:6001" {
t.Errorf("Expected 'http://custom:6001', got %s", url)
}
})
t.Run("default", func(t *testing.T) {
cfg := &ClientConfig{}
client := &Client{config: cfg}
storage.client = client
url := storage.getGatewayURL()
if url != "http://localhost:6001" {
t.Errorf("Expected 'http://localhost:6001', got %s", url)
}
})
t.Run("nil_config", func(t *testing.T) {
client := &Client{config: nil}
storage.client = client
url := storage.getGatewayURL()
if url != "http://localhost:6001" {
t.Errorf("Expected 'http://localhost:6001', got %s", url)
}
})
}
func TestStorageClientImpl_addAuthHeaders(t *testing.T) {
t.Run("jwt_preferred", func(t *testing.T) {
cfg := &ClientConfig{
JWT: "test-jwt-token",
APIKey: "test-api-key",
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
req := httptest.NewRequest("POST", "/test", nil)
storage.addAuthHeaders(req)
auth := req.Header.Get("Authorization")
if auth != "Bearer test-jwt-token" {
t.Errorf("Expected JWT in Authorization header, got %s", auth)
}
})
t.Run("apikey_fallback", func(t *testing.T) {
cfg := &ClientConfig{
APIKey: "test-api-key",
}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
req := httptest.NewRequest("POST", "/test", nil)
storage.addAuthHeaders(req)
auth := req.Header.Get("Authorization")
if auth != "Bearer test-api-key" {
t.Errorf("Expected API key in Authorization header, got %s", auth)
}
apiKey := req.Header.Get("X-API-Key")
if apiKey != "test-api-key" {
t.Errorf("Expected API key in X-API-Key header, got %s", apiKey)
}
})
t.Run("no_auth", func(t *testing.T) {
cfg := &ClientConfig{}
client := &Client{config: cfg}
storage := &StorageClientImpl{client: client}
req := httptest.NewRequest("POST", "/test", nil)
storage.addAuthHeaders(req)
auth := req.Header.Get("Authorization")
if auth != "" {
t.Errorf("Expected no Authorization header, got %s", auth)
}
})
t.Run("nil_config", func(t *testing.T) {
client := &Client{config: nil}
storage := &StorageClientImpl{client: client}
req := httptest.NewRequest("POST", "/test", nil)
storage.addAuthHeaders(req)
auth := req.Header.Get("Authorization")
if auth != "" {
t.Errorf("Expected no Authorization header, got %s", auth)
}
})
}

View File

@ -36,7 +36,7 @@ type DatabaseConfig struct {
RQLitePort int `yaml:"rqlite_port"` // RQLite HTTP API port RQLitePort int `yaml:"rqlite_port"` // RQLite HTTP API port
RQLiteRaftPort int `yaml:"rqlite_raft_port"` // RQLite Raft consensus port RQLiteRaftPort int `yaml:"rqlite_raft_port"` // RQLite Raft consensus port
RQLiteJoinAddress string `yaml:"rqlite_join_address"` // Address to join RQLite cluster RQLiteJoinAddress string `yaml:"rqlite_join_address"` // Address to join RQLite cluster
// Dynamic discovery configuration (always enabled) // Dynamic discovery configuration (always enabled)
ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s ClusterSyncInterval time.Duration `yaml:"cluster_sync_interval"` // default: 30s
PeerInactivityLimit time.Duration `yaml:"peer_inactivity_limit"` // default: 24h PeerInactivityLimit time.Duration `yaml:"peer_inactivity_limit"` // default: 24h
@ -45,6 +45,32 @@ type DatabaseConfig struct {
// Olric cache configuration // Olric cache configuration
OlricHTTPPort int `yaml:"olric_http_port"` // Olric HTTP API port (default: 3320) OlricHTTPPort int `yaml:"olric_http_port"` // Olric HTTP API port (default: 3320)
OlricMemberlistPort int `yaml:"olric_memberlist_port"` // Olric memberlist port (default: 3322) OlricMemberlistPort int `yaml:"olric_memberlist_port"` // Olric memberlist port (default: 3322)
// IPFS storage configuration
IPFS IPFSConfig `yaml:"ipfs"`
}
// IPFSConfig contains IPFS storage configuration
type IPFSConfig struct {
// ClusterAPIURL is the IPFS Cluster HTTP API URL (e.g., "http://localhost:9094")
// If empty, IPFS storage is disabled for this node
ClusterAPIURL string `yaml:"cluster_api_url"`
// APIURL is the IPFS HTTP API URL for content retrieval (e.g., "http://localhost:5001")
// If empty, defaults to "http://localhost:5001"
APIURL string `yaml:"api_url"`
// Timeout for IPFS operations
// If zero, defaults to 60 seconds
Timeout time.Duration `yaml:"timeout"`
// ReplicationFactor is the replication factor for pinned content
// If zero, defaults to 3
ReplicationFactor int `yaml:"replication_factor"`
// EnableEncryption enables client-side encryption before upload
// Defaults to true
EnableEncryption bool `yaml:"enable_encryption"`
} }
// DiscoveryConfig contains peer discovery configuration // DiscoveryConfig contains peer discovery configuration
@ -115,7 +141,7 @@ func DefaultConfig() *Config {
RQLitePort: 5001, RQLitePort: 5001,
RQLiteRaftPort: 7001, RQLiteRaftPort: 7001,
RQLiteJoinAddress: "", // Empty for bootstrap node RQLiteJoinAddress: "", // Empty for bootstrap node
// Dynamic discovery (always enabled) // Dynamic discovery (always enabled)
ClusterSyncInterval: 30 * time.Second, ClusterSyncInterval: 30 * time.Second,
PeerInactivityLimit: 24 * time.Hour, PeerInactivityLimit: 24 * time.Hour,
@ -124,6 +150,15 @@ func DefaultConfig() *Config {
// Olric cache configuration // Olric cache configuration
OlricHTTPPort: 3320, OlricHTTPPort: 3320,
OlricMemberlistPort: 3322, OlricMemberlistPort: 3322,
// IPFS storage configuration
IPFS: IPFSConfig{
ClusterAPIURL: "", // Empty = disabled
APIURL: "http://localhost:5001",
Timeout: 60 * time.Second,
ReplicationFactor: 3,
EnableEncryption: true,
},
}, },
Discovery: DiscoveryConfig{ Discovery: DiscoveryConfig{
BootstrapPeers: []string{}, BootstrapPeers: []string{},

View File

@ -30,8 +30,8 @@ func validConfigForType(nodeType string) *Config {
BootstrapPeers: []string{validPeer}, BootstrapPeers: []string{validPeer},
DiscoveryInterval: 15 * time.Second, DiscoveryInterval: 15 * time.Second,
BootstrapPort: 4001, BootstrapPort: 4001,
HttpAdvAddress: "127.0.0.1:5001", HttpAdvAddress: "localhost:5001",
RaftAdvAddress: "127.0.0.1:7001", RaftAdvAddress: "localhost:7001",
NodeNamespace: "default", NodeNamespace: "default",
}, },
Logging: LoggingConfig{ Logging: LoggingConfig{
@ -392,7 +392,7 @@ func TestValidateCompleteConfig(t *testing.T) {
BackupInterval: 24 * time.Hour, BackupInterval: 24 * time.Hour,
RQLitePort: 5002, RQLitePort: 5002,
RQLiteRaftPort: 7002, RQLiteRaftPort: 7002,
RQLiteJoinAddress: "127.0.0.1:7001", RQLiteJoinAddress: "localhost:7001",
MinClusterSize: 1, MinClusterSize: 1,
}, },
Discovery: DiscoveryConfig{ Discovery: DiscoveryConfig{
@ -401,8 +401,8 @@ func TestValidateCompleteConfig(t *testing.T) {
}, },
DiscoveryInterval: 15 * time.Second, DiscoveryInterval: 15 * time.Second,
BootstrapPort: 4001, BootstrapPort: 4001,
HttpAdvAddress: "127.0.0.1:5001", HttpAdvAddress: "localhost:5001",
RaftAdvAddress: "127.0.0.1:7001", RaftAdvAddress: "localhost:7001",
NodeNamespace: "default", NodeNamespace: "default",
}, },
Security: SecurityConfig{ Security: SecurityConfig{

View File

@ -234,7 +234,7 @@ func isPrivateOrLocalHost(host string) bool {
} }
// Check for localhost variants // Check for localhost variants
if host == "localhost" || host == "127.0.0.1" || host == "::1" { if host == "localhost" || host == "::1" {
return true return true
} }

View File

@ -92,7 +92,7 @@ func TestAnonProxyHandler_PrivateAddressBlocking(t *testing.T) {
url string url string
}{ }{
{"localhost", "http://localhost/test"}, {"localhost", "http://localhost/test"},
{"127.0.0.1", "http://127.0.0.1/test"}, {"localhost", "http://localhost/test"},
{"private 10.x", "http://10.0.0.1/test"}, {"private 10.x", "http://10.0.0.1/test"},
{"private 192.168.x", "http://192.168.1.1/test"}, {"private 192.168.x", "http://192.168.1.1/test"},
{"private 172.16.x", "http://172.16.0.1/test"}, {"private 172.16.x", "http://172.16.0.1/test"},
@ -166,7 +166,7 @@ func TestIsPrivateOrLocalHost(t *testing.T) {
expected bool expected bool
}{ }{
{"localhost", true}, {"localhost", true},
{"127.0.0.1", true}, {"localhost", true},
{"::1", true}, {"::1", true},
{"10.0.0.1", true}, {"10.0.0.1", true},
{"192.168.1.1", true}, {"192.168.1.1", true},

View File

@ -6,11 +6,16 @@ import (
"crypto/rsa" "crypto/rsa"
"database/sql" "database/sql"
"net" "net"
"os"
"path/filepath"
"strconv" "strconv"
"strings"
"sync" "sync"
"time" "time"
"github.com/DeBrosOfficial/network/pkg/client" "github.com/DeBrosOfficial/network/pkg/client"
"github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/DeBrosOfficial/network/pkg/logging" "github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/olric" "github.com/DeBrosOfficial/network/pkg/olric"
"github.com/DeBrosOfficial/network/pkg/rqlite" "github.com/DeBrosOfficial/network/pkg/rqlite"
@ -38,6 +43,13 @@ type Config struct {
// Olric cache configuration // Olric cache configuration
OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"] OlricServers []string // List of Olric server addresses (e.g., ["localhost:3320"]). If empty, defaults to ["localhost:3320"]
OlricTimeout time.Duration // Timeout for Olric operations (default: 10s) OlricTimeout time.Duration // Timeout for Olric operations (default: 10s)
// IPFS Cluster configuration
IPFSClusterAPIURL string // IPFS Cluster HTTP API URL (e.g., "http://localhost:9094"). If empty, gateway will discover from node configs
IPFSAPIURL string // IPFS HTTP API URL for content retrieval (e.g., "http://localhost:5001"). If empty, gateway will discover from node configs
IPFSTimeout time.Duration // Timeout for IPFS operations (default: 60s)
IPFSReplicationFactor int // Replication factor for pins (default: 3)
IPFSEnableEncryption bool // Enable client-side encryption before upload (default: true, discovered from node configs)
} }
type Gateway struct { type Gateway struct {
@ -56,6 +68,9 @@ type Gateway struct {
// Olric cache client // Olric cache client
olricClient *olric.Client olricClient *olric.Client
// IPFS storage client
ipfsClient ipfs.IPFSClient
// Local pub/sub bypass for same-gateway subscribers // Local pub/sub bypass for same-gateway subscribers
localSubscribers map[string][]*localSubscriber // topic+namespace -> subscribers localSubscribers map[string][]*localSubscriber // topic+namespace -> subscribers
mu sync.RWMutex mu sync.RWMutex
@ -178,6 +193,99 @@ func New(logger *logging.ColoredLogger, cfg *Config) (*Gateway, error) {
) )
} }
logger.ComponentInfo(logging.ComponentGeneral, "Initializing IPFS Cluster client...")
// Discover IPFS endpoints from node configs if not explicitly configured
ipfsClusterURL := cfg.IPFSClusterAPIURL
ipfsAPIURL := cfg.IPFSAPIURL
ipfsTimeout := cfg.IPFSTimeout
ipfsReplicationFactor := cfg.IPFSReplicationFactor
ipfsEnableEncryption := cfg.IPFSEnableEncryption
if ipfsClusterURL == "" {
logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster URL not configured, discovering from node configs...")
discovered := discoverIPFSFromNodeConfigs(logger.Logger)
if discovered.clusterURL != "" {
ipfsClusterURL = discovered.clusterURL
ipfsAPIURL = discovered.apiURL
if discovered.timeout > 0 {
ipfsTimeout = discovered.timeout
}
if discovered.replicationFactor > 0 {
ipfsReplicationFactor = discovered.replicationFactor
}
ipfsEnableEncryption = discovered.enableEncryption
logger.ComponentInfo(logging.ComponentGeneral, "Discovered IPFS endpoints from node configs",
zap.String("cluster_url", ipfsClusterURL),
zap.String("api_url", ipfsAPIURL),
zap.Bool("encryption_enabled", ipfsEnableEncryption))
} else {
// Fallback to localhost defaults
ipfsClusterURL = "http://localhost:9094"
ipfsAPIURL = "http://localhost:5001"
ipfsEnableEncryption = true // Default to true
logger.ComponentInfo(logging.ComponentGeneral, "No IPFS config found in node configs, using localhost defaults")
}
}
if ipfsAPIURL == "" {
ipfsAPIURL = "http://localhost:5001"
}
if ipfsTimeout == 0 {
ipfsTimeout = 60 * time.Second
}
if ipfsReplicationFactor == 0 {
ipfsReplicationFactor = 3
}
if !cfg.IPFSEnableEncryption && !ipfsEnableEncryption {
// Only disable if explicitly set to false in both places
ipfsEnableEncryption = false
} else {
// Default to true if not explicitly disabled
ipfsEnableEncryption = true
}
ipfsCfg := ipfs.Config{
ClusterAPIURL: ipfsClusterURL,
Timeout: ipfsTimeout,
}
ipfsClient, ipfsErr := ipfs.NewClient(ipfsCfg, logger.Logger)
if ipfsErr != nil {
logger.ComponentWarn(logging.ComponentGeneral, "failed to initialize IPFS Cluster client; storage endpoints disabled", zap.Error(ipfsErr))
} else {
gw.ipfsClient = ipfsClient
// Check peer count and warn if insufficient (use background context to avoid blocking)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if peerCount, err := ipfsClient.GetPeerCount(ctx); err == nil {
if peerCount < ipfsReplicationFactor {
logger.ComponentWarn(logging.ComponentGeneral, "insufficient cluster peers for replication factor",
zap.Int("peer_count", peerCount),
zap.Int("replication_factor", ipfsReplicationFactor),
zap.String("message", "Some pin operations may fail until more peers join the cluster"))
} else {
logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster peer count sufficient",
zap.Int("peer_count", peerCount),
zap.Int("replication_factor", ipfsReplicationFactor))
}
} else {
logger.ComponentWarn(logging.ComponentGeneral, "failed to get cluster peer count", zap.Error(err))
}
logger.ComponentInfo(logging.ComponentGeneral, "IPFS Cluster client ready",
zap.String("cluster_api_url", ipfsCfg.ClusterAPIURL),
zap.String("ipfs_api_url", ipfsAPIURL),
zap.Duration("timeout", ipfsCfg.Timeout),
zap.Int("replication_factor", ipfsReplicationFactor),
zap.Bool("encryption_enabled", ipfsEnableEncryption),
)
}
// Store IPFS settings in gateway for use by handlers
gw.cfg.IPFSAPIURL = ipfsAPIURL
gw.cfg.IPFSReplicationFactor = ipfsReplicationFactor
gw.cfg.IPFSEnableEncryption = ipfsEnableEncryption
logger.ComponentInfo(logging.ComponentGeneral, "Gateway creation completed, returning...") logger.ComponentInfo(logging.ComponentGeneral, "Gateway creation completed, returning...")
return gw, nil return gw, nil
} }
@ -204,6 +312,13 @@ func (g *Gateway) Close() {
g.logger.ComponentWarn(logging.ComponentGeneral, "error during Olric client close", zap.Error(err)) g.logger.ComponentWarn(logging.ComponentGeneral, "error during Olric client close", zap.Error(err))
} }
} }
if g.ipfsClient != nil {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := g.ipfsClient.Close(ctx); err != nil {
g.logger.ComponentWarn(logging.ComponentGeneral, "error during IPFS client close", zap.Error(err))
}
}
} }
// getLocalSubscribers returns all local subscribers for a given topic and namespace // getLocalSubscribers returns all local subscribers for a given topic and namespace
@ -256,7 +371,7 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger
} }
// Skip localhost loopback addresses (we'll use localhost:3320 as fallback) // Skip localhost loopback addresses (we'll use localhost:3320 as fallback)
if ip == "127.0.0.1" || ip == "::1" || ip == "localhost" { if ip == "localhost" || ip == "::1" {
continue continue
} }
@ -287,7 +402,7 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger
} }
// Skip localhost // Skip localhost
if ip == "127.0.0.1" || ip == "::1" || ip == "localhost" { if ip == "localhost" || ip == "::1" {
continue continue
} }
@ -307,3 +422,77 @@ func discoverOlricServers(networkClient client.NetworkClient, logger *zap.Logger
return olricServers return olricServers
} }
// ipfsDiscoveryResult holds discovered IPFS configuration
type ipfsDiscoveryResult struct {
clusterURL string
apiURL string
timeout time.Duration
replicationFactor int
enableEncryption bool
}
// discoverIPFSFromNodeConfigs discovers IPFS configuration from node.yaml files
// Checks bootstrap.yaml first, then node.yaml, node2.yaml, etc.
func discoverIPFSFromNodeConfigs(logger *zap.Logger) ipfsDiscoveryResult {
homeDir, err := os.UserHomeDir()
if err != nil {
logger.Debug("Failed to get home directory for IPFS discovery", zap.Error(err))
return ipfsDiscoveryResult{}
}
configDir := filepath.Join(homeDir, ".debros")
// Try bootstrap.yaml first, then node.yaml, node2.yaml, etc.
configFiles := []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"}
for _, filename := range configFiles {
configPath := filepath.Join(configDir, filename)
data, err := os.ReadFile(configPath)
if err != nil {
continue
}
var nodeCfg config.Config
if err := config.DecodeStrict(strings.NewReader(string(data)), &nodeCfg); err != nil {
logger.Debug("Failed to parse node config for IPFS discovery",
zap.String("file", filename), zap.Error(err))
continue
}
// Check if IPFS is configured
if nodeCfg.Database.IPFS.ClusterAPIURL != "" {
result := ipfsDiscoveryResult{
clusterURL: nodeCfg.Database.IPFS.ClusterAPIURL,
apiURL: nodeCfg.Database.IPFS.APIURL,
timeout: nodeCfg.Database.IPFS.Timeout,
replicationFactor: nodeCfg.Database.IPFS.ReplicationFactor,
enableEncryption: nodeCfg.Database.IPFS.EnableEncryption,
}
if result.apiURL == "" {
result.apiURL = "http://localhost:5001"
}
if result.timeout == 0 {
result.timeout = 60 * time.Second
}
if result.replicationFactor == 0 {
result.replicationFactor = 3
}
// Default encryption to true if not set
if !result.enableEncryption {
result.enableEncryption = true
}
logger.Info("Discovered IPFS config from node config",
zap.String("file", filename),
zap.String("cluster_url", result.clusterURL),
zap.String("api_url", result.apiURL),
zap.Bool("encryption_enabled", result.enableEncryption))
return result
}
}
return ipfsDiscoveryResult{}
}

View File

@ -26,12 +26,3 @@ func TestExtractAPIKey(t *testing.T) {
t.Fatalf("got %q", got) t.Fatalf("got %q", got)
} }
} }
func TestValidateNamespaceParam(t *testing.T) {
g := &Gateway{}
r := httptest.NewRequest(http.MethodGet, "/v1/storage/get?namespace=ns1&key=k", nil)
// no context namespace: should be false
if g.validateNamespaceParam(r) {
t.Fatalf("expected false without context ns")
}
}

View File

@ -54,5 +54,12 @@ func (g *Gateway) Routes() http.Handler {
mux.HandleFunc("/v1/cache/delete", g.cacheDeleteHandler) mux.HandleFunc("/v1/cache/delete", g.cacheDeleteHandler)
mux.HandleFunc("/v1/cache/scan", g.cacheScanHandler) mux.HandleFunc("/v1/cache/scan", g.cacheScanHandler)
// storage endpoints (IPFS)
mux.HandleFunc("/v1/storage/upload", g.storageUploadHandler)
mux.HandleFunc("/v1/storage/pin", g.storagePinHandler)
mux.HandleFunc("/v1/storage/status/", g.storageStatusHandler)
mux.HandleFunc("/v1/storage/get/", g.storageGetHandler)
mux.HandleFunc("/v1/storage/unpin/", g.storageUnpinHandler)
return g.withMiddleware(mux) return g.withMiddleware(mux)
} }

View File

@ -1,13 +1,343 @@
package gateway package gateway
import ( import (
"bytes"
"context"
"encoding/base64"
"encoding/json" "encoding/json"
"fmt"
"io"
"net/http" "net/http"
"strings"
"github.com/DeBrosOfficial/network/pkg/client" "github.com/DeBrosOfficial/network/pkg/client"
"github.com/DeBrosOfficial/network/pkg/logging"
"go.uber.org/zap"
) )
// Database HTTP handlers // StorageUploadRequest represents a request to upload content to IPFS
type StorageUploadRequest struct {
Name string `json:"name,omitempty"`
Data string `json:"data,omitempty"` // Base64 encoded data (alternative to multipart)
}
// StorageUploadResponse represents the response from uploading content
type StorageUploadResponse struct {
Cid string `json:"cid"`
Name string `json:"name"`
Size int64 `json:"size"`
}
// StoragePinRequest represents a request to pin a CID
type StoragePinRequest struct {
Cid string `json:"cid"`
Name string `json:"name,omitempty"`
}
// StoragePinResponse represents the response from pinning a CID
type StoragePinResponse struct {
Cid string `json:"cid"`
Name string `json:"name"`
}
// StorageStatusResponse represents the status of a pinned CID
type StorageStatusResponse struct {
Cid string `json:"cid"`
Name string `json:"name"`
Status string `json:"status"`
ReplicationMin int `json:"replication_min"`
ReplicationMax int `json:"replication_max"`
ReplicationFactor int `json:"replication_factor"`
Peers []string `json:"peers"`
Error string `json:"error,omitempty"`
}
// storageUploadHandler handles POST /v1/storage/upload
func (g *Gateway) storageUploadHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
// Get namespace from context
namespace := g.getNamespaceFromContext(r.Context())
if namespace == "" {
writeError(w, http.StatusUnauthorized, "namespace required")
return
}
// Get replication factor from config (default: 3)
replicationFactor := g.cfg.IPFSReplicationFactor
if replicationFactor == 0 {
replicationFactor = 3
}
// Check if it's multipart/form-data or JSON
contentType := r.Header.Get("Content-Type")
var reader io.Reader
var name string
if strings.HasPrefix(contentType, "multipart/form-data") {
// Handle multipart upload
if err := r.ParseMultipartForm(32 << 20); err != nil { // 32MB max
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to parse multipart form: %v", err))
return
}
file, header, err := r.FormFile("file")
if err != nil {
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to get file: %v", err))
return
}
defer file.Close()
reader = file
name = header.Filename
} else {
// Handle JSON request with base64 data
var req StorageUploadRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to decode request: %v", err))
return
}
if req.Data == "" {
writeError(w, http.StatusBadRequest, "data field required")
return
}
// Decode base64 data
data, err := base64Decode(req.Data)
if err != nil {
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to decode base64 data: %v", err))
return
}
reader = bytes.NewReader(data)
name = req.Name
}
// Add to IPFS
ctx := r.Context()
addResp, err := g.ipfsClient.Add(ctx, reader, name)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to add content to IPFS", zap.Error(err))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to add content: %v", err))
return
}
// Pin with replication factor
_, err = g.ipfsClient.Pin(ctx, addResp.Cid, name, replicationFactor)
if err != nil {
g.logger.ComponentWarn(logging.ComponentGeneral, "failed to pin content", zap.Error(err), zap.String("cid", addResp.Cid))
// Still return success, but log the pin failure
}
response := StorageUploadResponse{
Cid: addResp.Cid,
Name: addResp.Name,
Size: addResp.Size,
}
writeJSON(w, http.StatusOK, response)
}
// storagePinHandler handles POST /v1/storage/pin
func (g *Gateway) storagePinHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
var req StoragePinRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, fmt.Sprintf("failed to decode request: %v", err))
return
}
if req.Cid == "" {
writeError(w, http.StatusBadRequest, "cid required")
return
}
// Get replication factor from config (default: 3)
replicationFactor := g.cfg.IPFSReplicationFactor
if replicationFactor == 0 {
replicationFactor = 3
}
ctx := r.Context()
pinResp, err := g.ipfsClient.Pin(ctx, req.Cid, req.Name, replicationFactor)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to pin CID", zap.Error(err), zap.String("cid", req.Cid))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to pin: %v", err))
return
}
// Use name from request if response doesn't have it
name := pinResp.Name
if name == "" {
name = req.Name
}
response := StoragePinResponse{
Cid: pinResp.Cid,
Name: name,
}
writeJSON(w, http.StatusOK, response)
}
// storageStatusHandler handles GET /v1/storage/status/:cid
func (g *Gateway) storageStatusHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
// Extract CID from path
path := strings.TrimPrefix(r.URL.Path, "/v1/storage/status/")
if path == "" {
writeError(w, http.StatusBadRequest, "cid required")
return
}
ctx := r.Context()
status, err := g.ipfsClient.PinStatus(ctx, path)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to get pin status", zap.Error(err), zap.String("cid", path))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get status: %v", err))
return
}
response := StorageStatusResponse{
Cid: status.Cid,
Name: status.Name,
Status: status.Status,
ReplicationMin: status.ReplicationMin,
ReplicationMax: status.ReplicationMax,
ReplicationFactor: status.ReplicationFactor,
Peers: status.Peers,
Error: status.Error,
}
writeJSON(w, http.StatusOK, response)
}
// storageGetHandler handles GET /v1/storage/get/:cid
func (g *Gateway) storageGetHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
// Extract CID from path
path := strings.TrimPrefix(r.URL.Path, "/v1/storage/get/")
if path == "" {
writeError(w, http.StatusBadRequest, "cid required")
return
}
// Get namespace from context
namespace := g.getNamespaceFromContext(r.Context())
if namespace == "" {
writeError(w, http.StatusUnauthorized, "namespace required")
return
}
// Get IPFS API URL from config
ipfsAPIURL := g.cfg.IPFSAPIURL
if ipfsAPIURL == "" {
ipfsAPIURL = "http://localhost:5001"
}
ctx := r.Context()
reader, err := g.ipfsClient.Get(ctx, path, ipfsAPIURL)
if err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to get content from IPFS", zap.Error(err), zap.String("cid", path))
// Check if error indicates content not found (404)
if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "status 404") {
writeError(w, http.StatusNotFound, fmt.Sprintf("content not found: %s", path))
} else {
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to get content: %v", err))
}
return
}
defer reader.Close()
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", path))
if _, err := io.Copy(w, reader); err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to write content", zap.Error(err))
}
}
// storageUnpinHandler handles DELETE /v1/storage/unpin/:cid
func (g *Gateway) storageUnpinHandler(w http.ResponseWriter, r *http.Request) {
if g.ipfsClient == nil {
writeError(w, http.StatusServiceUnavailable, "IPFS storage not available")
return
}
if r.Method != http.MethodDelete {
writeError(w, http.StatusMethodNotAllowed, "method not allowed")
return
}
// Extract CID from path
path := strings.TrimPrefix(r.URL.Path, "/v1/storage/unpin/")
if path == "" {
writeError(w, http.StatusBadRequest, "cid required")
return
}
ctx := r.Context()
if err := g.ipfsClient.Unpin(ctx, path); err != nil {
g.logger.ComponentError(logging.ComponentGeneral, "failed to unpin CID", zap.Error(err), zap.String("cid", path))
writeError(w, http.StatusInternalServerError, fmt.Sprintf("failed to unpin: %v", err))
return
}
writeJSON(w, http.StatusOK, map[string]any{"status": "ok", "cid": path})
}
// base64Decode decodes base64 string to bytes
func base64Decode(s string) ([]byte, error) {
return base64.StdEncoding.DecodeString(s)
}
// getNamespaceFromContext extracts namespace from request context
func (g *Gateway) getNamespaceFromContext(ctx context.Context) string {
if v := ctx.Value(ctxKeyNamespaceOverride); v != nil {
if s, ok := v.(string); ok && s != "" {
return s
}
}
return ""
}
// Network HTTP handlers
func (g *Gateway) networkStatusHandler(w http.ResponseWriter, r *http.Request) { func (g *Gateway) networkStatusHandler(w http.ResponseWriter, r *http.Request) {
if g.client == nil { if g.client == nil {
@ -84,17 +414,3 @@ func (g *Gateway) networkDisconnectHandler(w http.ResponseWriter, r *http.Reques
} }
writeJSON(w, http.StatusOK, map[string]any{"status": "ok"}) writeJSON(w, http.StatusOK, map[string]any{"status": "ok"})
} }
func (g *Gateway) validateNamespaceParam(r *http.Request) bool {
qns := r.URL.Query().Get("namespace")
if qns == "" {
return true
}
if v := r.Context().Value(ctxKeyNamespaceOverride); v != nil {
if s, ok := v.(string); ok && s != "" {
return s == qns
}
}
// If no namespace in context, disallow explicit namespace param
return false
}

View File

@ -0,0 +1,562 @@
package gateway
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/DeBrosOfficial/network/pkg/logging"
)
// mockIPFSClient is a mock implementation of ipfs.IPFSClient for testing
type mockIPFSClient struct {
addFunc func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error)
pinFunc func(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error)
pinStatusFunc func(ctx context.Context, cid string) (*ipfs.PinStatus, error)
getFunc func(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error)
unpinFunc func(ctx context.Context, cid string) error
getPeerCountFunc func(ctx context.Context) (int, error)
}
func (m *mockIPFSClient) Add(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) {
if m.addFunc != nil {
return m.addFunc(ctx, reader, name)
}
return &ipfs.AddResponse{Cid: "QmTest123", Name: name, Size: 100}, nil
}
func (m *mockIPFSClient) Pin(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error) {
if m.pinFunc != nil {
return m.pinFunc(ctx, cid, name, replicationFactor)
}
return &ipfs.PinResponse{Cid: cid, Name: name}, nil
}
func (m *mockIPFSClient) PinStatus(ctx context.Context, cid string) (*ipfs.PinStatus, error) {
if m.pinStatusFunc != nil {
return m.pinStatusFunc(ctx, cid)
}
return &ipfs.PinStatus{
Cid: cid,
Name: "test",
Status: "pinned",
ReplicationMin: 3,
ReplicationMax: 3,
ReplicationFactor: 3,
Peers: []string{"peer1", "peer2", "peer3"},
}, nil
}
func (m *mockIPFSClient) Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) {
if m.getFunc != nil {
return m.getFunc(ctx, cid, ipfsAPIURL)
}
return io.NopCloser(strings.NewReader("test content")), nil
}
func (m *mockIPFSClient) Unpin(ctx context.Context, cid string) error {
if m.unpinFunc != nil {
return m.unpinFunc(ctx, cid)
}
return nil
}
func (m *mockIPFSClient) Health(ctx context.Context) error {
return nil
}
func (m *mockIPFSClient) GetPeerCount(ctx context.Context) (int, error) {
if m.getPeerCountFunc != nil {
return m.getPeerCountFunc(ctx)
}
return 3, nil
}
func (m *mockIPFSClient) Close(ctx context.Context) error {
return nil
}
func newTestGatewayWithIPFS(t *testing.T, ipfsClient ipfs.IPFSClient) *Gateway {
logger, err := logging.NewColoredLogger(logging.ComponentGeneral, true)
if err != nil {
t.Fatalf("Failed to create logger: %v", err)
}
cfg := &Config{
ListenAddr: ":6001",
ClientNamespace: "test",
IPFSReplicationFactor: 3,
IPFSEnableEncryption: true,
IPFSAPIURL: "http://localhost:5001",
}
gw := &Gateway{
logger: logger,
cfg: cfg,
}
if ipfsClient != nil {
gw.ipfsClient = ipfsClient
}
return gw
}
func TestStorageUploadHandler_MissingIPFSClient(t *testing.T) {
gw := newTestGatewayWithIPFS(t, nil)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", nil)
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusServiceUnavailable {
t.Errorf("Expected status %d, got %d", http.StatusServiceUnavailable, w.Code)
}
}
func TestStorageUploadHandler_MethodNotAllowed(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodGet, "/v1/storage/upload", nil)
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusMethodNotAllowed {
t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, w.Code)
}
}
func TestStorageUploadHandler_MissingNamespace(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", nil)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusUnauthorized {
t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, w.Code)
}
}
func TestStorageUploadHandler_MultipartUpload(t *testing.T) {
expectedCID := "QmTest456"
expectedName := "test.txt"
expectedSize := int64(200)
mockClient := &mockIPFSClient{
addFunc: func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) {
// Read and verify content
data, _ := io.ReadAll(reader)
if len(data) == 0 {
return nil, io.ErrUnexpectedEOF
}
return &ipfs.AddResponse{
Cid: expectedCID,
Name: name,
Size: expectedSize,
}, nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, _ := writer.CreateFormFile("file", expectedName)
part.Write([]byte("test file content"))
writer.Close()
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", &buf)
req.Header.Set("Content-Type", writer.FormDataContentType())
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp StorageUploadResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, resp.Name)
}
if resp.Size != expectedSize {
t.Errorf("Expected size %d, got %d", expectedSize, resp.Size)
}
}
func TestStorageUploadHandler_JSONUpload(t *testing.T) {
expectedCID := "QmTest789"
expectedName := "test.json"
testData := []byte("test json data")
base64Data := base64.StdEncoding.EncodeToString(testData)
mockClient := &mockIPFSClient{
addFunc: func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) {
data, _ := io.ReadAll(reader)
if string(data) != string(testData) {
return nil, io.ErrUnexpectedEOF
}
return &ipfs.AddResponse{
Cid: expectedCID,
Name: name,
Size: int64(len(testData)),
}, nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
reqBody := StorageUploadRequest{
Name: expectedName,
Data: base64Data,
}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", bytes.NewReader(bodyBytes))
req.Header.Set("Content-Type", "application/json")
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp StorageUploadResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
}
func TestStorageUploadHandler_InvalidBase64(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
reqBody := StorageUploadRequest{
Name: "test.txt",
Data: "invalid base64!!!",
}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", bytes.NewReader(bodyBytes))
req.Header.Set("Content-Type", "application/json")
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
func TestStorageUploadHandler_IPFSError(t *testing.T) {
mockClient := &mockIPFSClient{
addFunc: func(ctx context.Context, reader io.Reader, name string) (*ipfs.AddResponse, error) {
return nil, io.ErrUnexpectedEOF
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
part, _ := writer.CreateFormFile("file", "test.txt")
part.Write([]byte("test"))
writer.Close()
req := httptest.NewRequest(http.MethodPost, "/v1/storage/upload", &buf)
req.Header.Set("Content-Type", writer.FormDataContentType())
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageUploadHandler(w, req)
if w.Code != http.StatusInternalServerError {
t.Errorf("Expected status %d, got %d", http.StatusInternalServerError, w.Code)
}
}
func TestStoragePinHandler_Success(t *testing.T) {
expectedCID := "QmPin123"
expectedName := "pinned-file"
mockClient := &mockIPFSClient{
pinFunc: func(ctx context.Context, cid string, name string, replicationFactor int) (*ipfs.PinResponse, error) {
if cid != expectedCID {
return nil, io.ErrUnexpectedEOF
}
if replicationFactor != 3 {
return nil, io.ErrUnexpectedEOF
}
return &ipfs.PinResponse{Cid: cid, Name: name}, nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
reqBody := StoragePinRequest{
Cid: expectedCID,
Name: expectedName,
}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/pin", bytes.NewReader(bodyBytes))
w := httptest.NewRecorder()
gw.storagePinHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp StoragePinResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, resp.Name)
}
}
func TestStoragePinHandler_MissingCID(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
reqBody := StoragePinRequest{}
bodyBytes, _ := json.Marshal(reqBody)
req := httptest.NewRequest(http.MethodPost, "/v1/storage/pin", bytes.NewReader(bodyBytes))
w := httptest.NewRecorder()
gw.storagePinHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
func TestStorageStatusHandler_Success(t *testing.T) {
expectedCID := "QmStatus123"
mockClient := &mockIPFSClient{
pinStatusFunc: func(ctx context.Context, cid string) (*ipfs.PinStatus, error) {
return &ipfs.PinStatus{
Cid: cid,
Name: "test-file",
Status: "pinned",
ReplicationMin: 3,
ReplicationMax: 3,
ReplicationFactor: 3,
Peers: []string{"peer1", "peer2", "peer3"},
}, nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
req := httptest.NewRequest(http.MethodGet, "/v1/storage/status/"+expectedCID, nil)
w := httptest.NewRecorder()
gw.storageStatusHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp StorageStatusResponse
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Status != "pinned" {
t.Errorf("Expected status 'pinned', got %s", resp.Status)
}
if resp.ReplicationFactor != 3 {
t.Errorf("Expected replication factor 3, got %d", resp.ReplicationFactor)
}
}
func TestStorageStatusHandler_MissingCID(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodGet, "/v1/storage/status/", nil)
w := httptest.NewRecorder()
gw.storageStatusHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
func TestStorageGetHandler_Success(t *testing.T) {
expectedCID := "QmGet123"
expectedContent := "test content from IPFS"
mockClient := &mockIPFSClient{
getFunc: func(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) {
if cid != expectedCID {
return nil, io.ErrUnexpectedEOF
}
return io.NopCloser(strings.NewReader(expectedContent)), nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
req := httptest.NewRequest(http.MethodGet, "/v1/storage/get/"+expectedCID, nil)
ctx := context.WithValue(req.Context(), ctxKeyNamespaceOverride, "test-ns")
req = req.WithContext(ctx)
w := httptest.NewRecorder()
gw.storageGetHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
if w.Body.String() != expectedContent {
t.Errorf("Expected content %s, got %s", expectedContent, w.Body.String())
}
if w.Header().Get("Content-Type") != "application/octet-stream" {
t.Errorf("Expected Content-Type 'application/octet-stream', got %s", w.Header().Get("Content-Type"))
}
}
func TestStorageGetHandler_MissingNamespace(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodGet, "/v1/storage/get/QmTest123", nil)
w := httptest.NewRecorder()
gw.storageGetHandler(w, req)
if w.Code != http.StatusUnauthorized {
t.Errorf("Expected status %d, got %d", http.StatusUnauthorized, w.Code)
}
}
func TestStorageUnpinHandler_Success(t *testing.T) {
expectedCID := "QmUnpin123"
mockClient := &mockIPFSClient{
unpinFunc: func(ctx context.Context, cid string) error {
if cid != expectedCID {
return io.ErrUnexpectedEOF
}
return nil
},
}
gw := newTestGatewayWithIPFS(t, mockClient)
req := httptest.NewRequest(http.MethodDelete, "/v1/storage/unpin/"+expectedCID, nil)
w := httptest.NewRecorder()
gw.storageUnpinHandler(w, req)
if w.Code != http.StatusOK {
t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code)
}
var resp map[string]any
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
t.Fatalf("Failed to decode response: %v", err)
}
if resp["cid"] != expectedCID {
t.Errorf("Expected CID %s, got %v", expectedCID, resp["cid"])
}
}
func TestStorageUnpinHandler_MissingCID(t *testing.T) {
gw := newTestGatewayWithIPFS(t, &mockIPFSClient{})
req := httptest.NewRequest(http.MethodDelete, "/v1/storage/unpin/", nil)
w := httptest.NewRecorder()
gw.storageUnpinHandler(w, req)
if w.Code != http.StatusBadRequest {
t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code)
}
}
// Test helper functions
func TestBase64Decode(t *testing.T) {
testData := []byte("test data")
encoded := base64.StdEncoding.EncodeToString(testData)
decoded, err := base64Decode(encoded)
if err != nil {
t.Fatalf("Failed to decode: %v", err)
}
if string(decoded) != string(testData) {
t.Errorf("Expected %s, got %s", string(testData), string(decoded))
}
// Test invalid base64
_, err = base64Decode("invalid!!!")
if err == nil {
t.Error("Expected error for invalid base64")
}
}
func TestGetNamespaceFromContext(t *testing.T) {
gw := newTestGatewayWithIPFS(t, nil)
// Test with namespace in context
ctx := context.WithValue(context.Background(), ctxKeyNamespaceOverride, "test-ns")
ns := gw.getNamespaceFromContext(ctx)
if ns != "test-ns" {
t.Errorf("Expected 'test-ns', got %s", ns)
}
// Test without namespace
ctx2 := context.Background()
ns2 := gw.getNamespaceFromContext(ctx2)
if ns2 != "" {
t.Errorf("Expected empty namespace, got %s", ns2)
}
}

378
pkg/ipfs/client.go Normal file
View File

@ -0,0 +1,378 @@
package ipfs
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"time"
"go.uber.org/zap"
)
// IPFSClient defines the interface for IPFS operations
type IPFSClient interface {
Add(ctx context.Context, reader io.Reader, name string) (*AddResponse, error)
Pin(ctx context.Context, cid string, name string, replicationFactor int) (*PinResponse, error)
PinStatus(ctx context.Context, cid string) (*PinStatus, error)
Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error)
Unpin(ctx context.Context, cid string) error
Health(ctx context.Context) error
GetPeerCount(ctx context.Context) (int, error)
Close(ctx context.Context) error
}
// Client wraps an IPFS Cluster HTTP API client for storage operations
type Client struct {
apiURL string
httpClient *http.Client
logger *zap.Logger
}
// Config holds configuration for the IPFS client
type Config struct {
// ClusterAPIURL is the base URL for IPFS Cluster HTTP API (e.g., "http://localhost:9094")
// If empty, defaults to "http://localhost:9094"
ClusterAPIURL string
// Timeout is the timeout for client operations
// If zero, defaults to 60 seconds
Timeout time.Duration
}
// PinStatus represents the status of a pinned CID
type PinStatus struct {
Cid string `json:"cid"`
Name string `json:"name"`
Status string `json:"status"` // "pinned", "pinning", "queued", "unpinned", "error"
ReplicationMin int `json:"replication_min"`
ReplicationMax int `json:"replication_max"`
ReplicationFactor int `json:"replication_factor"`
Peers []string `json:"peers"`
Error string `json:"error,omitempty"`
}
// AddResponse represents the response from adding content to IPFS
type AddResponse struct {
Name string `json:"name"`
Cid string `json:"cid"`
Size int64 `json:"size"`
}
// PinResponse represents the response from pinning a CID
type PinResponse struct {
Cid string `json:"cid"`
Name string `json:"name"`
}
// NewClient creates a new IPFS Cluster client wrapper
func NewClient(cfg Config, logger *zap.Logger) (*Client, error) {
apiURL := cfg.ClusterAPIURL
if apiURL == "" {
apiURL = "http://localhost:9094"
}
timeout := cfg.Timeout
if timeout == 0 {
timeout = 60 * time.Second
}
httpClient := &http.Client{
Timeout: timeout,
}
return &Client{
apiURL: apiURL,
httpClient: httpClient,
logger: logger,
}, nil
}
// Health checks if the IPFS Cluster API is healthy
func (c *Client) Health(ctx context.Context) error {
req, err := http.NewRequestWithContext(ctx, "GET", c.apiURL+"/id", nil)
if err != nil {
return fmt.Errorf("failed to create health check request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("health check request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("health check failed with status: %d", resp.StatusCode)
}
return nil
}
// GetPeerCount returns the number of cluster peers
func (c *Client) GetPeerCount(ctx context.Context) (int, error) {
req, err := http.NewRequestWithContext(ctx, "GET", c.apiURL+"/peers", nil)
if err != nil {
return 0, fmt.Errorf("failed to create peers request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return 0, fmt.Errorf("peers request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return 0, fmt.Errorf("peers request failed with status: %d", resp.StatusCode)
}
var peers []struct {
ID string `json:"id"`
}
if err := json.NewDecoder(resp.Body).Decode(&peers); err != nil {
return 0, fmt.Errorf("failed to decode peers response: %w", err)
}
return len(peers), nil
}
// Add adds content to IPFS and returns the CID
func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddResponse, error) {
// Create multipart form request for IPFS Cluster API
var buf bytes.Buffer
writer := multipart.NewWriter(&buf)
// Create form file field
part, err := writer.CreateFormFile("file", name)
if err != nil {
return nil, fmt.Errorf("failed to create form file: %w", err)
}
if _, err := io.Copy(part, reader); err != nil {
return nil, fmt.Errorf("failed to copy data: %w", err)
}
if err := writer.Close(); err != nil {
return nil, fmt.Errorf("failed to close writer: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", c.apiURL+"/add", &buf)
if err != nil {
return nil, fmt.Errorf("failed to create add request: %w", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("add request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("add failed with status %d: %s", resp.StatusCode, string(body))
}
var result AddResponse
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode add response: %w", err)
}
return &result, nil
}
// Pin pins a CID with specified replication factor
// IPFS Cluster expects pin options (including name) as query parameters, not in JSON body
func (c *Client) Pin(ctx context.Context, cid string, name string, replicationFactor int) (*PinResponse, error) {
// Build URL with query parameters
reqURL := c.apiURL + "/pins/" + cid
values := url.Values{}
values.Set("replication-min", fmt.Sprintf("%d", replicationFactor))
values.Set("replication-max", fmt.Sprintf("%d", replicationFactor))
if name != "" {
values.Set("name", name)
}
if len(values) > 0 {
reqURL += "?" + values.Encode()
}
req, err := http.NewRequestWithContext(ctx, "POST", reqURL, nil)
if err != nil {
return nil, fmt.Errorf("failed to create pin request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("pin request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("pin failed with status %d: %s", resp.StatusCode, string(body))
}
var result PinResponse
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode pin response: %w", err)
}
// If IPFS Cluster doesn't return the name in the response, use the one from the request
if result.Name == "" && name != "" {
result.Name = name
}
// Ensure CID is set
if result.Cid == "" {
result.Cid = cid
}
return &result, nil
}
// PinStatus retrieves the status of a pinned CID
func (c *Client) PinStatus(ctx context.Context, cid string) (*PinStatus, error) {
req, err := http.NewRequestWithContext(ctx, "GET", c.apiURL+"/pins/"+cid, nil)
if err != nil {
return nil, fmt.Errorf("failed to create pin status request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("pin status request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, fmt.Errorf("pin not found: %s", cid)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("pin status failed with status %d: %s", resp.StatusCode, string(body))
}
// IPFS Cluster returns GlobalPinInfo, we need to map it to our PinStatus
var gpi struct {
Cid string `json:"cid"`
Name string `json:"name"`
PeerMap map[string]struct {
Status interface{} `json:"status"` // TrackerStatus can be string or int
Error string `json:"error,omitempty"`
} `json:"peer_map"`
}
if err := json.NewDecoder(resp.Body).Decode(&gpi); err != nil {
return nil, fmt.Errorf("failed to decode pin status response: %w", err)
}
// Use name from GlobalPinInfo
name := gpi.Name
// Extract status from peer map (use first peer's status, or aggregate)
status := "unknown"
peers := make([]string, 0, len(gpi.PeerMap))
var errorMsg string
for peerID, pinInfo := range gpi.PeerMap {
peers = append(peers, peerID)
if pinInfo.Status != nil {
// Convert status to string
if s, ok := pinInfo.Status.(string); ok {
if status == "unknown" || s != "" {
status = s
}
} else if status == "unknown" {
// If status is not a string, try to convert it
status = fmt.Sprintf("%v", pinInfo.Status)
}
}
if pinInfo.Error != "" {
errorMsg = pinInfo.Error
}
}
// Normalize status string (common IPFS Cluster statuses)
if status == "" || status == "unknown" {
status = "pinned" // Default to pinned if we have peers
if len(peers) == 0 {
status = "unknown"
}
}
result := &PinStatus{
Cid: gpi.Cid,
Name: name,
Status: status,
ReplicationMin: 0, // Not available in GlobalPinInfo
ReplicationMax: 0, // Not available in GlobalPinInfo
ReplicationFactor: len(peers),
Peers: peers,
Error: errorMsg,
}
// Ensure CID is set
if result.Cid == "" {
result.Cid = cid
}
return result, nil
}
// Unpin removes a pin from a CID
func (c *Client) Unpin(ctx context.Context, cid string) error {
req, err := http.NewRequestWithContext(ctx, "DELETE", c.apiURL+"/pins/"+cid, nil)
if err != nil {
return fmt.Errorf("failed to create unpin request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("unpin request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("unpin failed with status %d: %s", resp.StatusCode, string(body))
}
return nil
}
// Get retrieves content from IPFS by CID
// Note: This uses the IPFS HTTP API (typically on port 5001), not the Cluster API
func (c *Client) Get(ctx context.Context, cid string, ipfsAPIURL string) (io.ReadCloser, error) {
if ipfsAPIURL == "" {
ipfsAPIURL = "http://localhost:5001"
}
url := fmt.Sprintf("%s/api/v0/cat?arg=%s", ipfsAPIURL, cid)
req, err := http.NewRequestWithContext(ctx, "POST", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create get request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("get request failed: %w", err)
}
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, fmt.Errorf("content not found (CID: %s). The content may not be available on the IPFS node, or the IPFS API may not be accessible at %s", cid, ipfsAPIURL)
}
return nil, fmt.Errorf("get failed with status %d: %s", resp.StatusCode, string(body))
}
return resp.Body, nil
}
// Close closes the IPFS client connection
func (c *Client) Close(ctx context.Context) error {
// HTTP client doesn't need explicit closing
return nil
}

489
pkg/ipfs/client_test.go Normal file
View File

@ -0,0 +1,489 @@
package ipfs
import (
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"time"
"go.uber.org/zap"
)
func TestNewClient(t *testing.T) {
logger := zap.NewNop()
t.Run("default_config", func(t *testing.T) {
cfg := Config{}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
if client.apiURL != "http://localhost:9094" {
t.Errorf("Expected default API URL 'http://localhost:9094', got %s", client.apiURL)
}
if client.httpClient.Timeout != 60*time.Second {
t.Errorf("Expected default timeout 60s, got %v", client.httpClient.Timeout)
}
})
t.Run("custom_config", func(t *testing.T) {
cfg := Config{
ClusterAPIURL: "http://custom:9094",
Timeout: 30 * time.Second,
}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
if client.apiURL != "http://custom:9094" {
t.Errorf("Expected API URL 'http://custom:9094', got %s", client.apiURL)
}
if client.httpClient.Timeout != 30*time.Second {
t.Errorf("Expected timeout 30s, got %v", client.httpClient.Timeout)
}
})
}
func TestClient_Add(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmTest123"
expectedName := "test.txt"
expectedSize := int64(100)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/add" {
t.Errorf("Expected path '/add', got %s", r.URL.Path)
}
if r.Method != "POST" {
t.Errorf("Expected method POST, got %s", r.Method)
}
// Verify multipart form
if err := r.ParseMultipartForm(32 << 20); err != nil {
t.Errorf("Failed to parse multipart form: %v", err)
return
}
file, header, err := r.FormFile("file")
if err != nil {
t.Errorf("Failed to get file: %v", err)
return
}
defer file.Close()
if header.Filename != expectedName {
t.Errorf("Expected filename %s, got %s", expectedName, header.Filename)
}
// Read file content
_, _ = io.ReadAll(file)
response := AddResponse{
Cid: expectedCID,
Name: expectedName,
Size: expectedSize,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
reader := strings.NewReader("test content")
resp, err := client.Add(context.Background(), reader, expectedName)
if err != nil {
t.Fatalf("Failed to add content: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, resp.Name)
}
if resp.Size != expectedSize {
t.Errorf("Expected size %d, got %d", expectedSize, resp.Size)
}
})
t.Run("server_error", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("internal error"))
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
reader := strings.NewReader("test")
_, err = client.Add(context.Background(), reader, "test.txt")
if err == nil {
t.Error("Expected error for server error")
}
})
}
func TestClient_Pin(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmPin123"
expectedName := "pinned-file"
expectedReplicationFactor := 3
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/pins/") {
t.Errorf("Expected path '/pins/', got %s", r.URL.Path)
}
if r.Method != "POST" {
t.Errorf("Expected method POST, got %s", r.Method)
}
if cid := strings.TrimPrefix(r.URL.Path, "/pins/"); cid != expectedCID {
t.Errorf("Expected CID %s in path, got %s", expectedCID, cid)
}
query := r.URL.Query()
if got := query.Get("replication-min"); got != strconv.Itoa(expectedReplicationFactor) {
t.Errorf("Expected replication-min %d, got %s", expectedReplicationFactor, got)
}
if got := query.Get("replication-max"); got != strconv.Itoa(expectedReplicationFactor) {
t.Errorf("Expected replication-max %d, got %s", expectedReplicationFactor, got)
}
if got := query.Get("name"); got != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, got)
}
response := PinResponse{
Cid: expectedCID,
Name: expectedName,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
resp, err := client.Pin(context.Background(), expectedCID, expectedName, expectedReplicationFactor)
if err != nil {
t.Fatalf("Failed to pin: %v", err)
}
if resp.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, resp.Cid)
}
if resp.Name != expectedName {
t.Errorf("Expected name %s, got %s", expectedName, resp.Name)
}
})
t.Run("accepted_status", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
response := PinResponse{Cid: "QmTest", Name: "test"}
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
_, err = client.Pin(context.Background(), "QmTest", "test", 3)
if err != nil {
t.Errorf("Expected success for Accepted status, got error: %v", err)
}
})
}
func TestClient_PinStatus(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmStatus123"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/pins/") {
t.Errorf("Expected path '/pins/', got %s", r.URL.Path)
}
if r.Method != "GET" {
t.Errorf("Expected method GET, got %s", r.Method)
}
response := map[string]interface{}{
"cid": expectedCID,
"name": "test-file",
"peer_map": map[string]interface{}{
"peer1": map[string]interface{}{"status": "pinned"},
"peer2": map[string]interface{}{"status": "pinned"},
"peer3": map[string]interface{}{"status": "pinned"},
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
status, err := client.PinStatus(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to get pin status: %v", err)
}
if status.Cid != expectedCID {
t.Errorf("Expected CID %s, got %s", expectedCID, status.Cid)
}
if status.Status != "pinned" {
t.Errorf("Expected status 'pinned', got %s", status.Status)
}
if len(status.Peers) != 3 {
t.Errorf("Expected 3 peers, got %d", len(status.Peers))
}
})
t.Run("not_found", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
_, err = client.PinStatus(context.Background(), "QmNotFound")
if err == nil {
t.Error("Expected error for not found")
}
})
}
func TestClient_Unpin(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmUnpin123"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.HasPrefix(r.URL.Path, "/pins/") {
t.Errorf("Expected path '/pins/', got %s", r.URL.Path)
}
if r.Method != "DELETE" {
t.Errorf("Expected method DELETE, got %s", r.Method)
}
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
err = client.Unpin(context.Background(), expectedCID)
if err != nil {
t.Fatalf("Failed to unpin: %v", err)
}
})
t.Run("accepted_status", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
err = client.Unpin(context.Background(), "QmTest")
if err != nil {
t.Errorf("Expected success for Accepted status, got error: %v", err)
}
})
}
func TestClient_Get(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
expectedCID := "QmGet123"
expectedContent := "test content from IPFS"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.URL.Path, "/api/v0/cat") {
t.Errorf("Expected path containing '/api/v0/cat', got %s", r.URL.Path)
}
if r.Method != "POST" {
t.Errorf("Expected method POST, got %s", r.Method)
}
// Verify CID parameter
if !strings.Contains(r.URL.RawQuery, expectedCID) {
t.Errorf("Expected CID %s in query, got %s", expectedCID, r.URL.RawQuery)
}
w.Write([]byte(expectedContent))
}))
defer server.Close()
cfg := Config{ClusterAPIURL: "http://localhost:9094"}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
reader, err := client.Get(context.Background(), expectedCID, server.URL)
if err != nil {
t.Fatalf("Failed to get content: %v", err)
}
defer reader.Close()
data, err := io.ReadAll(reader)
if err != nil {
t.Fatalf("Failed to read content: %v", err)
}
if string(data) != expectedContent {
t.Errorf("Expected content %s, got %s", expectedContent, string(data))
}
})
t.Run("not_found", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: "http://localhost:9094"}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
_, err = client.Get(context.Background(), "QmNotFound", server.URL)
if err == nil {
t.Error("Expected error for not found")
}
})
t.Run("default_ipfs_api_url", func(t *testing.T) {
expectedCID := "QmDefault"
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("content"))
}))
defer server.Close()
cfg := Config{ClusterAPIURL: "http://localhost:9094"}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
// Test with empty IPFS API URL (should use default)
// Note: This will fail because we're using a test server, but it tests the logic
_, err = client.Get(context.Background(), expectedCID, "")
// We expect an error here because default localhost:5001 won't exist
if err == nil {
t.Error("Expected error when using default localhost:5001")
}
})
}
func TestClient_Health(t *testing.T) {
logger := zap.NewNop()
t.Run("success", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/id" {
t.Errorf("Expected path '/id', got %s", r.URL.Path)
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"id": "test"}`))
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
err = client.Health(context.Background())
if err != nil {
t.Fatalf("Failed health check: %v", err)
}
})
t.Run("unhealthy", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer server.Close()
cfg := Config{ClusterAPIURL: server.URL}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
err = client.Health(context.Background())
if err == nil {
t.Error("Expected error for unhealthy status")
}
})
}
func TestClient_Close(t *testing.T) {
logger := zap.NewNop()
cfg := Config{ClusterAPIURL: "http://localhost:9094"}
client, err := NewClient(cfg, logger)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
// Close should not error
err = client.Close(context.Background())
if err != nil {
t.Errorf("Close should not error, got: %v", err)
}
}

717
pkg/ipfs/cluster.go Normal file
View File

@ -0,0 +1,717 @@
package ipfs
import (
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"go.uber.org/zap"
"github.com/DeBrosOfficial/network/pkg/config"
)
// ClusterConfigManager manages IPFS Cluster configuration files
type ClusterConfigManager struct {
cfg *config.Config
logger *zap.Logger
clusterPath string
secret string
}
// ClusterServiceConfig represents the structure of service.json
type ClusterServiceConfig struct {
Cluster struct {
Peername string `json:"peername"`
Secret string `json:"secret"`
LeaveOnShutdown bool `json:"leave_on_shutdown"`
ListenMultiaddress []string `json:"listen_multiaddress"`
PeerAddresses []string `json:"peer_addresses"`
// ... other fields kept from template
} `json:"cluster"`
Consensus struct {
CRDT struct {
ClusterName string `json:"cluster_name"`
TrustedPeers []string `json:"trusted_peers"`
Batching struct {
MaxBatchSize int `json:"max_batch_size"`
MaxBatchAge string `json:"max_batch_age"`
} `json:"batching"`
RepairInterval string `json:"repair_interval"`
} `json:"crdt"`
} `json:"consensus"`
API struct {
IPFSProxy struct {
ListenMultiaddress string `json:"listen_multiaddress"`
NodeMultiaddress string `json:"node_multiaddress"`
} `json:"ipfsproxy"`
PinSvcAPI struct {
HTTPListenMultiaddress string `json:"http_listen_multiaddress"`
} `json:"pinsvcapi"`
RestAPI struct {
HTTPListenMultiaddress string `json:"http_listen_multiaddress"`
} `json:"restapi"`
} `json:"api"`
IPFSConnector struct {
IPFSHTTP struct {
NodeMultiaddress string `json:"node_multiaddress"`
} `json:"ipfshttp"`
} `json:"ipfs_connector"`
// Keep rest of fields as raw JSON to preserve structure
Raw map[string]interface{} `json:"-"`
}
// NewClusterConfigManager creates a new IPFS Cluster config manager
func NewClusterConfigManager(cfg *config.Config, logger *zap.Logger) (*ClusterConfigManager, error) {
// Expand data directory path
dataDir := cfg.Node.DataDir
if strings.HasPrefix(dataDir, "~") {
home, err := os.UserHomeDir()
if err != nil {
return nil, fmt.Errorf("failed to determine home directory: %w", err)
}
dataDir = filepath.Join(home, dataDir[1:])
}
// Determine cluster path based on data directory structure
// Check if dataDir contains specific node names (e.g., ~/.debros/bootstrap, ~/.debros/node2)
clusterPath := filepath.Join(dataDir, "ipfs-cluster")
if strings.Contains(dataDir, "bootstrap") {
// Check if bootstrap is a direct child
if filepath.Base(filepath.Dir(dataDir)) == "bootstrap" || filepath.Base(dataDir) == "bootstrap" {
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
} else {
clusterPath = filepath.Join(dataDir, "bootstrap", "ipfs-cluster")
}
} else if strings.Contains(dataDir, "node2") {
if filepath.Base(filepath.Dir(dataDir)) == "node2" || filepath.Base(dataDir) == "node2" {
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
} else {
clusterPath = filepath.Join(dataDir, "node2", "ipfs-cluster")
}
} else if strings.Contains(dataDir, "node3") {
if filepath.Base(filepath.Dir(dataDir)) == "node3" || filepath.Base(dataDir) == "node3" {
clusterPath = filepath.Join(dataDir, "ipfs-cluster")
} else {
clusterPath = filepath.Join(dataDir, "node3", "ipfs-cluster")
}
}
// Load or generate cluster secret
secretPath := filepath.Join(dataDir, "..", "cluster-secret")
if strings.Contains(dataDir, ".debros") {
// Try to find cluster-secret in ~/.debros
home, err := os.UserHomeDir()
if err == nil {
secretPath = filepath.Join(home, ".debros", "cluster-secret")
}
}
secret, err := loadOrGenerateClusterSecret(secretPath)
if err != nil {
return nil, fmt.Errorf("failed to load/generate cluster secret: %w", err)
}
return &ClusterConfigManager{
cfg: cfg,
logger: logger,
clusterPath: clusterPath,
secret: secret,
}, nil
}
// EnsureConfig ensures the IPFS Cluster service.json exists and is properly configured
func (cm *ClusterConfigManager) EnsureConfig() error {
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
cm.logger.Debug("IPFS Cluster API URL not configured, skipping cluster config")
return nil
}
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
// Parse ports from URLs
clusterPort, restAPIPort, err := parseClusterPorts(cm.cfg.Database.IPFS.ClusterAPIURL)
if err != nil {
return fmt.Errorf("failed to parse cluster API URL: %w", err)
}
ipfsPort, err := parseIPFSPort(cm.cfg.Database.IPFS.APIURL)
if err != nil {
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
}
// Determine node name
nodeName := cm.cfg.Node.Type
if nodeName == "node" {
// Try to extract from data dir or ID
if strings.Contains(cm.cfg.Node.DataDir, "node2") || strings.Contains(cm.cfg.Node.ID, "node2") {
nodeName = "node2"
} else if strings.Contains(cm.cfg.Node.DataDir, "node3") || strings.Contains(cm.cfg.Node.ID, "node3") {
nodeName = "node3"
} else {
nodeName = "node"
}
}
// Calculate ports based on pattern
proxyPort := clusterPort - 1
pinSvcPort := clusterPort + 1
clusterListenPort := clusterPort + 2
// If config doesn't exist, initialize it with ipfs-cluster-service init
// This ensures we have all required sections (datastore, informer, etc.)
if _, err := os.Stat(serviceJSONPath); os.IsNotExist(err) {
cm.logger.Info("Initializing cluster config with ipfs-cluster-service init")
initCmd := exec.Command("ipfs-cluster-service", "init", "--force")
initCmd.Env = append(os.Environ(), "IPFS_CLUSTER_PATH="+cm.clusterPath)
if err := initCmd.Run(); err != nil {
cm.logger.Warn("Failed to initialize cluster config with ipfs-cluster-service init, will create minimal template", zap.Error(err))
}
}
// Load existing config or create new
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
if err != nil {
return fmt.Errorf("failed to load/create config: %w", err)
}
// Update configuration
cfg.Cluster.Peername = nodeName
cfg.Cluster.Secret = cm.secret
cfg.Cluster.ListenMultiaddress = []string{fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", clusterListenPort)}
cfg.Consensus.CRDT.ClusterName = "debros-cluster"
cfg.Consensus.CRDT.TrustedPeers = []string{"*"}
// API endpoints
cfg.API.RestAPI.HTTPListenMultiaddress = fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", restAPIPort)
cfg.API.IPFSProxy.ListenMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", proxyPort)
cfg.API.IPFSProxy.NodeMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsPort) // FIX: Correct path!
cfg.API.PinSvcAPI.HTTPListenMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", pinSvcPort)
// IPFS connector (also needs to be set)
cfg.IPFSConnector.IPFSHTTP.NodeMultiaddress = fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsPort)
// Save configuration
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
return fmt.Errorf("failed to save config: %w", err)
}
cm.logger.Info("IPFS Cluster configuration ensured",
zap.String("path", serviceJSONPath),
zap.String("node_name", nodeName),
zap.Int("ipfs_port", ipfsPort),
zap.Int("cluster_port", clusterPort),
zap.Int("rest_api_port", restAPIPort))
return nil
}
// UpdateBootstrapPeers updates peer_addresses and peerstore with bootstrap peer information
func (cm *ClusterConfigManager) UpdateBootstrapPeers(bootstrapAPIURL string) error {
if cm.cfg.Database.IPFS.ClusterAPIURL == "" {
return nil // IPFS not configured
}
// Skip if this is the bootstrap node itself
if cm.cfg.Node.Type == "bootstrap" {
return nil
}
// Query bootstrap cluster API to get peer ID
peerID, err := getBootstrapPeerID(bootstrapAPIURL)
if err != nil {
return fmt.Errorf("failed to get bootstrap peer ID: %w", err)
}
if peerID == "" {
cm.logger.Warn("Bootstrap peer ID not available yet")
return nil
}
// Extract bootstrap cluster port from URL
_, clusterPort, err := parseClusterPorts(bootstrapAPIURL)
if err != nil {
return fmt.Errorf("failed to parse bootstrap cluster API URL: %w", err)
}
// Bootstrap listens on clusterPort + 2 (same pattern)
bootstrapClusterPort := clusterPort + 2
bootstrapPeerAddr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", bootstrapClusterPort, peerID)
// Load current config
serviceJSONPath := filepath.Join(cm.clusterPath, "service.json")
cfg, err := cm.loadOrCreateConfig(serviceJSONPath)
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
}
// Update peer_addresses
cfg.Cluster.PeerAddresses = []string{bootstrapPeerAddr}
// Save config
if err := cm.saveConfig(serviceJSONPath, cfg); err != nil {
return fmt.Errorf("failed to save config: %w", err)
}
// Write to peerstore file
peerstorePath := filepath.Join(cm.clusterPath, "peerstore")
if err := os.WriteFile(peerstorePath, []byte(bootstrapPeerAddr+"\n"), 0644); err != nil {
return fmt.Errorf("failed to write peerstore: %w", err)
}
cm.logger.Info("Updated bootstrap peer configuration",
zap.String("bootstrap_peer_addr", bootstrapPeerAddr),
zap.String("peerstore_path", peerstorePath))
return nil
}
// loadOrCreateConfig loads existing service.json or creates a template
func (cm *ClusterConfigManager) loadOrCreateConfig(path string) (*ClusterServiceConfig, error) {
// Try to load existing config
if data, err := os.ReadFile(path); err == nil {
var cfg ClusterServiceConfig
if err := json.Unmarshal(data, &cfg); err == nil {
// Also unmarshal into raw map to preserve all fields
var raw map[string]interface{}
if err := json.Unmarshal(data, &raw); err == nil {
cfg.Raw = raw
}
return &cfg, nil
}
}
// Create new config from template
return cm.createTemplateConfig(), nil
}
// createTemplateConfig creates a template configuration matching the structure
func (cm *ClusterConfigManager) createTemplateConfig() *ClusterServiceConfig {
cfg := &ClusterServiceConfig{}
cfg.Cluster.LeaveOnShutdown = false
cfg.Cluster.PeerAddresses = []string{}
cfg.Consensus.CRDT.TrustedPeers = []string{"*"}
cfg.Consensus.CRDT.Batching.MaxBatchSize = 0
cfg.Consensus.CRDT.Batching.MaxBatchAge = "0s"
cfg.Consensus.CRDT.RepairInterval = "1h0m0s"
cfg.Raw = make(map[string]interface{})
return cfg
}
// saveConfig saves the configuration, preserving all existing fields
func (cm *ClusterConfigManager) saveConfig(path string, cfg *ClusterServiceConfig) error {
// Create directory if needed
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return fmt.Errorf("failed to create cluster directory: %w", err)
}
// Load existing config if it exists to preserve all fields
var final map[string]interface{}
if data, err := os.ReadFile(path); err == nil {
if err := json.Unmarshal(data, &final); err != nil {
// If parsing fails, start fresh
final = make(map[string]interface{})
}
} else {
final = make(map[string]interface{})
}
// Deep merge: update nested structures while preserving other fields
updateNestedMap(final, "cluster", map[string]interface{}{
"peername": cfg.Cluster.Peername,
"secret": cfg.Cluster.Secret,
"leave_on_shutdown": cfg.Cluster.LeaveOnShutdown,
"listen_multiaddress": cfg.Cluster.ListenMultiaddress,
"peer_addresses": cfg.Cluster.PeerAddresses,
})
updateNestedMap(final, "consensus", map[string]interface{}{
"crdt": map[string]interface{}{
"cluster_name": cfg.Consensus.CRDT.ClusterName,
"trusted_peers": cfg.Consensus.CRDT.TrustedPeers,
"batching": map[string]interface{}{
"max_batch_size": cfg.Consensus.CRDT.Batching.MaxBatchSize,
"max_batch_age": cfg.Consensus.CRDT.Batching.MaxBatchAge,
},
"repair_interval": cfg.Consensus.CRDT.RepairInterval,
},
})
// Update API section, preserving other fields
updateNestedMap(final, "api", map[string]interface{}{
"ipfsproxy": map[string]interface{}{
"listen_multiaddress": cfg.API.IPFSProxy.ListenMultiaddress,
"node_multiaddress": cfg.API.IPFSProxy.NodeMultiaddress, // FIX: Correct path!
},
"pinsvcapi": map[string]interface{}{
"http_listen_multiaddress": cfg.API.PinSvcAPI.HTTPListenMultiaddress,
},
"restapi": map[string]interface{}{
"http_listen_multiaddress": cfg.API.RestAPI.HTTPListenMultiaddress,
},
})
// Update IPFS connector section
updateNestedMap(final, "ipfs_connector", map[string]interface{}{
"ipfshttp": map[string]interface{}{
"node_multiaddress": cfg.IPFSConnector.IPFSHTTP.NodeMultiaddress,
"connect_swarms_delay": "30s",
"ipfs_request_timeout": "5m0s",
"pin_timeout": "2m0s",
"unpin_timeout": "3h0m0s",
"repogc_timeout": "24h0m0s",
"informer_trigger_interval": 0,
},
})
// Ensure all required sections exist with defaults if missing
ensureRequiredSection(final, "datastore", map[string]interface{}{
"pebble": map[string]interface{}{
"pebble_options": map[string]interface{}{
"cache_size_bytes": 1073741824,
"bytes_per_sync": 1048576,
"disable_wal": false,
},
},
})
ensureRequiredSection(final, "informer", map[string]interface{}{
"disk": map[string]interface{}{
"metric_ttl": "30s",
"metric_type": "freespace",
},
"pinqueue": map[string]interface{}{
"metric_ttl": "30s",
"weight_bucket_size": 100000,
},
"tags": map[string]interface{}{
"metric_ttl": "30s",
"tags": map[string]interface{}{
"group": "default",
},
},
})
ensureRequiredSection(final, "monitor", map[string]interface{}{
"pubsubmon": map[string]interface{}{
"check_interval": "15s",
},
})
ensureRequiredSection(final, "pin_tracker", map[string]interface{}{
"stateless": map[string]interface{}{
"concurrent_pins": 10,
"priority_pin_max_age": "24h0m0s",
"priority_pin_max_retries": 5,
},
})
ensureRequiredSection(final, "allocator", map[string]interface{}{
"balanced": map[string]interface{}{
"allocate_by": []interface{}{"tag:group", "freespace"},
},
})
// Write JSON
data, err := json.MarshalIndent(final, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal config: %w", err)
}
if err := os.WriteFile(path, data, 0644); err != nil {
return fmt.Errorf("failed to write config: %w", err)
}
return nil
}
// updateNestedMap updates a nested map structure, merging values
func updateNestedMap(parent map[string]interface{}, key string, updates map[string]interface{}) {
existing, ok := parent[key].(map[string]interface{})
if !ok {
parent[key] = updates
return
}
// Merge updates into existing
for k, v := range updates {
if vm, ok := v.(map[string]interface{}); ok {
// Recursively merge nested maps
if _, ok := existing[k].(map[string]interface{}); !ok {
existing[k] = vm
} else {
updateNestedMap(existing, k, vm)
}
} else {
existing[k] = v
}
}
parent[key] = existing
}
// ensureRequiredSection ensures a section exists in the config, creating it with defaults if missing
func ensureRequiredSection(parent map[string]interface{}, key string, defaults map[string]interface{}) {
if _, exists := parent[key]; !exists {
parent[key] = defaults
return
}
// If section exists, merge defaults to ensure all required subsections exist
existing, ok := parent[key].(map[string]interface{})
if ok {
updateNestedMap(parent, key, defaults)
parent[key] = existing
}
}
// parseClusterPorts extracts cluster port and REST API port from ClusterAPIURL
func parseClusterPorts(clusterAPIURL string) (clusterPort, restAPIPort int, err error) {
u, err := url.Parse(clusterAPIURL)
if err != nil {
return 0, 0, err
}
portStr := u.Port()
if portStr == "" {
// Default port based on scheme
if u.Scheme == "http" {
portStr = "9094"
} else if u.Scheme == "https" {
portStr = "443"
} else {
return 0, 0, fmt.Errorf("unknown scheme: %s", u.Scheme)
}
}
_, err = fmt.Sscanf(portStr, "%d", &restAPIPort)
if err != nil {
return 0, 0, fmt.Errorf("invalid port: %s", portStr)
}
// Cluster listen port is typically REST API port + 2
clusterPort = restAPIPort + 2
return clusterPort, restAPIPort, nil
}
// parseIPFSPort extracts IPFS API port from APIURL
func parseIPFSPort(apiURL string) (int, error) {
if apiURL == "" {
return 5001, nil // Default
}
u, err := url.Parse(apiURL)
if err != nil {
return 0, err
}
portStr := u.Port()
if portStr == "" {
if u.Scheme == "http" {
return 5001, nil // Default HTTP port
}
return 0, fmt.Errorf("unknown scheme: %s", u.Scheme)
}
var port int
_, err = fmt.Sscanf(portStr, "%d", &port)
if err != nil {
return 0, fmt.Errorf("invalid port: %s", portStr)
}
return port, nil
}
// getBootstrapPeerID queries the bootstrap cluster API to get the peer ID
func getBootstrapPeerID(apiURL string) (string, error) {
// Simple HTTP client to query /peers endpoint
client := &standardHTTPClient{}
peersResp, err := client.Get(fmt.Sprintf("%s/peers", apiURL))
if err != nil {
return "", err
}
var peersData struct {
ID string `json:"id"`
}
if err := json.Unmarshal(peersResp, &peersData); err != nil {
return "", err
}
return peersData.ID, nil
}
// loadOrGenerateClusterSecret loads cluster secret or generates a new one
func loadOrGenerateClusterSecret(path string) (string, error) {
// Try to load existing secret
if data, err := os.ReadFile(path); err == nil {
return strings.TrimSpace(string(data)), nil
}
// Generate new secret (32 bytes hex = 64 hex chars)
secret := generateRandomSecret(64)
// Save secret
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return "", err
}
if err := os.WriteFile(path, []byte(secret), 0600); err != nil {
return "", err
}
return secret, nil
}
// generateRandomSecret generates a random hex string
func generateRandomSecret(length int) string {
bytes := make([]byte, length/2)
if _, err := rand.Read(bytes); err != nil {
// Fallback to simple generation if crypto/rand fails
for i := range bytes {
bytes[i] = byte(os.Getpid() + i)
}
}
return hex.EncodeToString(bytes)
}
// standardHTTPClient implements HTTP client using net/http
type standardHTTPClient struct{}
func (c *standardHTTPClient) Get(url string) ([]byte, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("HTTP %d: %s", resp.StatusCode, resp.Status)
}
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return data, nil
}
// FixIPFSConfigAddresses fixes localhost addresses in IPFS config to use 127.0.0.1
// This is necessary because IPFS doesn't accept "localhost" as a valid IP address in multiaddrs
// This function always ensures the config is correct, regardless of current state
func (cm *ClusterConfigManager) FixIPFSConfigAddresses() error {
if cm.cfg.Database.IPFS.APIURL == "" {
return nil // IPFS not configured
}
// Determine IPFS repo path from config
dataDir := cm.cfg.Node.DataDir
if strings.HasPrefix(dataDir, "~") {
home, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("failed to determine home directory: %w", err)
}
dataDir = filepath.Join(home, dataDir[1:])
}
// Try to find IPFS repo path
// Check common locations: dataDir/ipfs/repo, or dataDir/bootstrap/ipfs/repo, etc.
possiblePaths := []string{
filepath.Join(dataDir, "ipfs", "repo"),
filepath.Join(dataDir, "bootstrap", "ipfs", "repo"),
filepath.Join(dataDir, "node2", "ipfs", "repo"),
filepath.Join(dataDir, "node3", "ipfs", "repo"),
filepath.Join(filepath.Dir(dataDir), "bootstrap", "ipfs", "repo"),
filepath.Join(filepath.Dir(dataDir), "node2", "ipfs", "repo"),
filepath.Join(filepath.Dir(dataDir), "node3", "ipfs", "repo"),
}
var ipfsRepoPath string
for _, path := range possiblePaths {
if _, err := os.Stat(filepath.Join(path, "config")); err == nil {
ipfsRepoPath = path
break
}
}
if ipfsRepoPath == "" {
cm.logger.Debug("IPFS repo not found, skipping config fix")
return nil // Not an error if repo doesn't exist yet
}
// Parse IPFS API port from config
ipfsPort, err := parseIPFSPort(cm.cfg.Database.IPFS.APIURL)
if err != nil {
return fmt.Errorf("failed to parse IPFS API URL: %w", err)
}
// Determine gateway port (typically API port + 3079, or 8080 for bootstrap, 8081 for node2, etc.)
gatewayPort := 8080
if strings.Contains(dataDir, "node2") {
gatewayPort = 8081
} else if strings.Contains(dataDir, "node3") {
gatewayPort = 8082
} else if ipfsPort == 5002 {
gatewayPort = 8081
} else if ipfsPort == 5003 {
gatewayPort = 8082
}
// Always ensure API address is correct (don't just check, always set it)
correctAPIAddr := fmt.Sprintf(`["/ip4/127.0.0.1/tcp/%d"]`, ipfsPort)
cm.logger.Info("Ensuring IPFS API address is correct",
zap.String("repo", ipfsRepoPath),
zap.Int("port", ipfsPort),
zap.String("correct_address", correctAPIAddr))
fixCmd := exec.Command("ipfs", "config", "--json", "Addresses.API", correctAPIAddr)
fixCmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
if err := fixCmd.Run(); err != nil {
cm.logger.Warn("Failed to fix IPFS API address", zap.Error(err))
return fmt.Errorf("failed to set IPFS API address: %w", err)
}
// Always ensure Gateway address is correct
correctGatewayAddr := fmt.Sprintf(`["/ip4/127.0.0.1/tcp/%d"]`, gatewayPort)
cm.logger.Info("Ensuring IPFS Gateway address is correct",
zap.String("repo", ipfsRepoPath),
zap.Int("port", gatewayPort),
zap.String("correct_address", correctGatewayAddr))
fixCmd = exec.Command("ipfs", "config", "--json", "Addresses.Gateway", correctGatewayAddr)
fixCmd.Env = append(os.Environ(), "IPFS_PATH="+ipfsRepoPath)
if err := fixCmd.Run(); err != nil {
cm.logger.Warn("Failed to fix IPFS Gateway address", zap.Error(err))
return fmt.Errorf("failed to set IPFS Gateway address: %w", err)
}
// Check if IPFS daemon is running - if so, it may need to be restarted for changes to take effect
// We can't restart it from here (it's managed by Makefile/systemd), but we can warn
if cm.isIPFSRunning(ipfsPort) {
cm.logger.Warn("IPFS daemon appears to be running - it may need to be restarted for config changes to take effect",
zap.Int("port", ipfsPort),
zap.String("repo", ipfsRepoPath))
}
return nil
}
// isIPFSRunning checks if IPFS daemon is running by attempting to connect to the API
func (cm *ClusterConfigManager) isIPFSRunning(port int) bool {
client := &http.Client{
Timeout: 1 * time.Second,
}
resp, err := client.Get(fmt.Sprintf("http://127.0.0.1:%d/api/v0/id", port))
if err != nil {
return false
}
resp.Body.Close()
return resp.StatusCode == 200
}

View File

@ -22,6 +22,7 @@ import (
"github.com/DeBrosOfficial/network/pkg/config" "github.com/DeBrosOfficial/network/pkg/config"
"github.com/DeBrosOfficial/network/pkg/discovery" "github.com/DeBrosOfficial/network/pkg/discovery"
"github.com/DeBrosOfficial/network/pkg/encryption" "github.com/DeBrosOfficial/network/pkg/encryption"
"github.com/DeBrosOfficial/network/pkg/ipfs"
"github.com/DeBrosOfficial/network/pkg/logging" "github.com/DeBrosOfficial/network/pkg/logging"
"github.com/DeBrosOfficial/network/pkg/pubsub" "github.com/DeBrosOfficial/network/pkg/pubsub"
database "github.com/DeBrosOfficial/network/pkg/rqlite" database "github.com/DeBrosOfficial/network/pkg/rqlite"
@ -45,6 +46,9 @@ type Node struct {
// Discovery // Discovery
discoveryManager *discovery.Manager discoveryManager *discovery.Manager
// IPFS Cluster config manager
clusterConfigManager *ipfs.ClusterConfigManager
} }
// NewNode creates a new network node // NewNode creates a new network node
@ -321,7 +325,7 @@ func (n *Node) startLibP2P() error {
// For localhost/development, disable NAT services // For localhost/development, disable NAT services
// For production, these would be enabled // For production, these would be enabled
isLocalhost := len(n.config.Node.ListenAddresses) > 0 && isLocalhost := len(n.config.Node.ListenAddresses) > 0 &&
(strings.Contains(n.config.Node.ListenAddresses[0], "127.0.0.1") || (strings.Contains(n.config.Node.ListenAddresses[0], "localhost") ||
strings.Contains(n.config.Node.ListenAddresses[0], "localhost")) strings.Contains(n.config.Node.ListenAddresses[0], "localhost"))
if isLocalhost { if isLocalhost {
@ -631,6 +635,14 @@ func (n *Node) Start(ctx context.Context) error {
return fmt.Errorf("failed to start LibP2P: %w", err) return fmt.Errorf("failed to start LibP2P: %w", err)
} }
// Initialize IPFS Cluster configuration if enabled
if n.config.Database.IPFS.ClusterAPIURL != "" {
if err := n.startIPFSClusterConfig(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to initialize IPFS Cluster config", zap.Error(err))
// Don't fail node startup if cluster config fails
}
}
// Start RQLite with cluster discovery // Start RQLite with cluster discovery
if err := n.startRQLite(ctx); err != nil { if err := n.startRQLite(ctx); err != nil {
return fmt.Errorf("failed to start RQLite: %w", err) return fmt.Errorf("failed to start RQLite: %w", err)
@ -651,3 +663,41 @@ func (n *Node) Start(ctx context.Context) error {
return nil return nil
} }
// startIPFSClusterConfig initializes and ensures IPFS Cluster configuration
func (n *Node) startIPFSClusterConfig() error {
n.logger.ComponentInfo(logging.ComponentNode, "Initializing IPFS Cluster configuration")
// Create config manager
cm, err := ipfs.NewClusterConfigManager(n.config, n.logger.Logger)
if err != nil {
return fmt.Errorf("failed to create cluster config manager: %w", err)
}
n.clusterConfigManager = cm
// Fix IPFS config addresses (localhost -> 127.0.0.1) before ensuring cluster config
if err := cm.FixIPFSConfigAddresses(); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to fix IPFS config addresses", zap.Error(err))
// Don't fail startup if config fix fails - cluster config will handle it
}
// Ensure configuration exists and is correct
if err := cm.EnsureConfig(); err != nil {
return fmt.Errorf("failed to ensure cluster config: %w", err)
}
// If this is not the bootstrap node, try to update bootstrap peer info
if n.config.Node.Type != "bootstrap" && len(n.config.Discovery.BootstrapPeers) > 0 {
// Try to find bootstrap cluster API URL from config
// For now, we'll discover it from the first bootstrap peer
// In a real scenario, you might want to configure this explicitly
bootstrapClusterAPI := "http://localhost:9094" // Default bootstrap cluster API
if err := cm.UpdateBootstrapPeers(bootstrapClusterAPI); err != nil {
n.logger.ComponentWarn(logging.ComponentNode, "Failed to update bootstrap peers, will retry later", zap.Error(err))
// Don't fail - peers can connect later via mDNS or manual config
}
}
n.logger.ComponentInfo(logging.ComponentNode, "IPFS Cluster configuration initialized")
return nil
}

View File

@ -67,6 +67,15 @@ if ! command -v curl > /dev/null 2>&1; then
exit 1 exit 1
fi fi
# Check for skip flag
# To skip changelog generation, set SKIP_CHANGELOG=1 before committing:
# SKIP_CHANGELOG=1 git commit -m "your message"
# SKIP_CHANGELOG=1 git commit
if [ "$SKIP_CHANGELOG" = "1" ] || [ "$SKIP_CHANGELOG" = "true" ]; then
log "Skipping changelog update (SKIP_CHANGELOG is set)"
exit 0
fi
# Check if we're in a git repo # Check if we're in a git repo
if ! git rev-parse --git-dir > /dev/null 2>&1; then if ! git rev-parse --git-dir > /dev/null 2>&1; then
error "Not in a git repository" error "Not in a git repository"