From 0ea58354cac60277980ea8092b6ad1d661e05adc Mon Sep 17 00:00:00 2001 From: anonpenguin23 Date: Mon, 10 Nov 2025 15:36:58 +0200 Subject: [PATCH] feat: enhance E2E testing and dependency management - Added new E2E tests for authentication, cache operations, and IPFS interactions to improve coverage and reliability. - Introduced concurrency tests for cache operations to validate performance under load. - Updated `go.mod` to include `github.com/mattn/go-sqlite3` as a dependency for database interactions. - Refined Makefile to simplify E2E test execution and configuration discovery. - Removed outdated client E2E tests and consolidated related functionality for better maintainability. --- CHANGELOG.md | 21 ++ Makefile | 12 +- e2e/auth_negative_test.go | 294 +++++++++++++++ e2e/cache_http_test.go | 511 +++++++++++++++++++++++++ e2e/client_e2e_test.go | 93 ----- e2e/concurrency_test.go | 503 +++++++++++++++++++++++++ e2e/env.go | 644 ++++++++++++++++++++++++++++++++ e2e/gateway_e2e_test.go | 625 ------------------------------- e2e/ipfs_cluster_test.go | 400 ++++++++++++++++++++ e2e/libp2p_connectivity_test.go | 294 +++++++++++++++ e2e/network_http_test.go | 223 +++++++++++ e2e/pubsub_client_test.go | 421 +++++++++++++++++++++ e2e/rqlite_http_test.go | 446 ++++++++++++++++++++++ e2e/storage_http_test.go | 550 +++++++++++++++++++++++++++ go.mod | 1 + go.sum | 2 + pkg/discovery/discovery.go | 12 +- pkg/gateway/cache_handlers.go | 7 +- pkg/gateway/storage_handlers.go | 14 +- pkg/ipfs/client.go | 13 +- pkg/pubsub/publish.go | 24 ++ pkg/pubsub/subscriptions.go | 72 ++-- pkg/rqlite/gateway.go | 30 +- 23 files changed, 4443 insertions(+), 769 deletions(-) create mode 100644 e2e/auth_negative_test.go create mode 100644 e2e/cache_http_test.go delete mode 100644 e2e/client_e2e_test.go create mode 100644 e2e/concurrency_test.go create mode 100644 e2e/env.go delete mode 100644 e2e/gateway_e2e_test.go create mode 100644 e2e/ipfs_cluster_test.go create mode 100644 e2e/libp2p_connectivity_test.go create mode 100644 e2e/network_http_test.go create mode 100644 e2e/pubsub_client_test.go create mode 100644 e2e/rqlite_http_test.go create mode 100644 e2e/storage_http_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index fbc66f7..1437b84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,27 @@ The format is based on [Keep a Changelog][keepachangelog] and adheres to [Semant ### Deprecated ### Fixed +## [0.64.0] - 2025-11-10 + +### Added +- Comprehensive End-to-End (E2E) test suite for Gateway API endpoints (Cache, RQLite, Storage, Network, Auth). +- New E2E tests for concurrent operations and TTL expiry in the distributed cache. +- New E2E tests for LibP2P peer connectivity and discovery. + +### Changed +- Improved Gateway E2E test configuration: automatically discovers Gateway URL and API Key from local `~/.debros` configuration files, removing the need for environment variables. +- The `/v1/network/peers` endpoint now returns a flattened list of multiaddresses for all connected peers. +- Improved robustness of Cache API handlers to correctly identify and return 404 (Not Found) errors when keys are missing, even when wrapped by underlying library errors. +- The RQLite transaction handler now supports the legacy `statements` array format in addition to the `ops` array format for easier use. +- The RQLite schema endpoint now returns tables under the `tables` key instead of `objects`. + +### Deprecated + +### Removed + +### Fixed +- Corrected IPFS Add operation to return the actual file size (byte count) instead of the DAG size in the response. + ## [0.63.3] - 2025-11-10 ### Added diff --git a/Makefile b/Makefile index 5d725cb..7d729ea 100644 --- a/Makefile +++ b/Makefile @@ -6,14 +6,12 @@ test: go test -v $(TEST) # Gateway-focused E2E tests assume gateway and nodes are already running -# Configure via env: -# GATEWAY_BASE_URL (default http://localhost:6001) -# GATEWAY_API_KEY (required for auth-protected routes) +# Auto-discovers configuration from ~/.debros and queries database for API key +# No environment variables required .PHONY: test-e2e test-e2e: - @echo "Running gateway E2E tests (HTTP/WS only)..." - @echo "Base URL: $${GATEWAY_BASE_URL:-http://localhost:6001}" - @test -n "$$GATEWAY_API_KEY" || (echo "GATEWAY_API_KEY must be set" && exit 1) + @echo "Running comprehensive E2E tests..." + @echo "Auto-discovering configuration from ~/.debros..." go test -v -tags e2e ./e2e # Network - Distributed P2P Database System @@ -21,7 +19,7 @@ test-e2e: .PHONY: build clean test run-node run-node2 run-node3 run-example deps tidy fmt vet lint clear-ports install-hooks kill -VERSION := 0.63.3 +VERSION := 0.64.0 COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo unknown) DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) LDFLAGS := -X 'main.version=$(VERSION)' -X 'main.commit=$(COMMIT)' -X 'main.date=$(DATE)' diff --git a/e2e/auth_negative_test.go b/e2e/auth_negative_test.go new file mode 100644 index 0000000..130dc63 --- /dev/null +++ b/e2e/auth_negative_test.go @@ -0,0 +1,294 @@ +//go:build e2e + +package e2e + +import ( + "context" + "net/http" + "testing" + "time" + "unicode" +) + +func TestAuth_MissingAPIKey(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request without auth headers + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/network/status", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for missing auth, got %d (auth may not be enforced on this endpoint)", resp.StatusCode) + } +} + +func TestAuth_InvalidAPIKey(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with invalid API key + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Authorization", "Bearer invalid-key-xyz") + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for invalid key, got %d", resp.StatusCode) + } +} + +func TestAuth_CacheWithoutAuth(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request cache endpoint without auth + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/cache/health", + SkipAuth: true, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should fail with 401 or 403 + if status != http.StatusUnauthorized && status != http.StatusForbidden { + t.Logf("warning: expected 401/403 for cache without auth, got %d", status) + } +} + +func TestAuth_StorageWithoutAuth(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request storage endpoint without auth + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/storage/status/QmTest", + SkipAuth: true, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should fail with 401 or 403 + if status != http.StatusUnauthorized && status != http.StatusForbidden { + t.Logf("warning: expected 401/403 for storage without auth, got %d", status) + } +} + +func TestAuth_RQLiteWithoutAuth(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request rqlite endpoint without auth + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/rqlite/schema", + SkipAuth: true, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should fail with 401 or 403 + if status != http.StatusUnauthorized && status != http.StatusForbidden { + t.Logf("warning: expected 401/403 for rqlite without auth, got %d", status) + } +} + +func TestAuth_MalformedBearerToken(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with malformed bearer token + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + // Missing "Bearer " prefix + req.Header.Set("Authorization", "invalid-token-format") + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for malformed token, got %d", resp.StatusCode) + } +} + +func TestAuth_ExpiredJWT(t *testing.T) { + // Skip if JWT is not being used + if GetJWT() == "" && GetAPIKey() == "" { + t.Skip("No JWT or API key configured") + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // This test would require an expired JWT token + // For now, test with a clearly invalid JWT structure + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Authorization", "Bearer expired.jwt.token") + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for expired JWT, got %d", resp.StatusCode) + } +} + +func TestAuth_EmptyBearerToken(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with empty bearer token + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Authorization", "Bearer ") + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should be unauthorized + if resp.StatusCode != http.StatusUnauthorized && resp.StatusCode != http.StatusForbidden { + t.Logf("warning: expected 401/403 for empty token, got %d", resp.StatusCode) + } +} + +func TestAuth_DuplicateAuthHeaders(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with both API key and invalid JWT + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/cache/health", + Headers: map[string]string{ + "Authorization": "Bearer " + GetAPIKey(), + "X-API-Key": GetAPIKey(), + }, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should succeed if API key is valid + if status != http.StatusOK { + t.Logf("request with both headers returned %d", status) + } +} + +func TestAuth_CaseSensitiveAPIKey(t *testing.T) { + if GetAPIKey() == "" { + t.Skip("No API key configured") + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Request with incorrectly cased API key + apiKey := GetAPIKey() + incorrectKey := "" + for i, ch := range apiKey { + if i%2 == 0 && unicode.IsLetter(ch) { + incorrectKey += string(unicode.ToUpper(ch)) // Convert to uppercase + } else { + incorrectKey += string(ch) + } + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/cache/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Authorization", "Bearer "+incorrectKey) + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // API keys should be case-sensitive + if resp.StatusCode == http.StatusOK { + t.Logf("warning: API key check may not be case-sensitive (got 200)") + } +} + +func TestAuth_HealthEndpointNoAuth(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Health endpoint at /health should not require auth + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/health", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + client := NewHTTPClient(30 * time.Second) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + + // Should succeed without auth + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected 200 for /health without auth, got %d", resp.StatusCode) + } +} diff --git a/e2e/cache_http_test.go b/e2e/cache_http_test.go new file mode 100644 index 0000000..6f4a3ed --- /dev/null +++ b/e2e/cache_http_test.go @@ -0,0 +1,511 @@ +//go:build e2e + +package e2e + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" +) + +func TestCache_Health(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/cache/health", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("health check failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["status"] != "ok" { + t.Fatalf("expected status 'ok', got %v", resp["status"]) + } + + if resp["service"] != "olric" { + t.Fatalf("expected service 'olric', got %v", resp["service"]) + } +} + +func TestCache_PutGet(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "test-key" + value := "test-value" + + // Put value + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + body, status, err := putReq.Do(ctx) + if err != nil { + t.Fatalf("put failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body)) + } + + // Get value + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + body, status, err = getReq.Do(ctx) + if err != nil { + t.Fatalf("get failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var getResp map[string]interface{} + if err := DecodeJSON(body, &getResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if getResp["value"] != value { + t.Fatalf("expected value %q, got %v", value, getResp["value"]) + } +} + +func TestCache_PutGetJSON(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "json-key" + jsonValue := map[string]interface{}{ + "name": "John", + "age": 30, + "tags": []string{"developer", "golang"}, + } + + // Put JSON value + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": jsonValue, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil { + t.Fatalf("put failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + // Get JSON value + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + body, status, err := getReq.Do(ctx) + if err != nil { + t.Fatalf("get failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var getResp map[string]interface{} + if err := DecodeJSON(body, &getResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + retrievedValue := getResp["value"].(map[string]interface{}) + if retrievedValue["name"] != jsonValue["name"] { + t.Fatalf("expected name %q, got %v", jsonValue["name"], retrievedValue["name"]) + } + if retrievedValue["age"] != float64(30) { + t.Fatalf("expected age 30, got %v", retrievedValue["age"]) + } +} + +func TestCache_Delete(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "delete-key" + value := "delete-value" + + // Put value + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + + // Delete value + deleteReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/delete", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = deleteReq.Do(ctx) + if err != nil { + t.Fatalf("delete failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + // Verify deletion + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = getReq.Do(ctx) + // Should get 404 for missing key + if status != http.StatusNotFound { + t.Fatalf("expected status 404 for deleted key, got %d", status) + } +} + +func TestCache_TTL(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "ttl-key" + value := "ttl-value" + + // Put value with TTL + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + "ttl": "2s", + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil { + t.Fatalf("put with TTL failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + // Verify value exists + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = getReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("get immediately after put failed: status %d, err %v", status, err) + } + + // Wait for TTL expiry (2 seconds + buffer) + Delay(2500) + + // Verify value is expired + _, status, err = getReq.Do(ctx) + if status != http.StatusNotFound { + t.Logf("warning: TTL expiry may not be fully implemented; got status %d", status) + } +} + +func TestCache_Scan(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + + // Put multiple keys + keys := []string{"user-1", "user-2", "session-1", "session-2"} + for _, key := range keys { + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": "value-" + key, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + } + + // Scan all keys + scanReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/scan", + Body: map[string]interface{}{ + "dmap": dmap, + }, + } + + body, status, err := scanReq.Do(ctx) + if err != nil { + t.Fatalf("scan failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var scanResp map[string]interface{} + if err := DecodeJSON(body, &scanResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + keysResp := scanResp["keys"].([]interface{}) + if len(keysResp) < 4 { + t.Fatalf("expected at least 4 keys, got %d", len(keysResp)) + } +} + +func TestCache_ScanWithRegex(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + + // Put keys with different patterns + keys := []string{"user-1", "user-2", "session-1", "session-2"} + for _, key := range keys { + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": "value-" + key, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + } + + // Scan with regex pattern + scanReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/scan", + Body: map[string]interface{}{ + "dmap": dmap, + "pattern": "^user-", + }, + } + + body, status, err := scanReq.Do(ctx) + if err != nil { + t.Fatalf("scan with regex failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var scanResp map[string]interface{} + if err := DecodeJSON(body, &scanResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + keysResp := scanResp["keys"].([]interface{}) + if len(keysResp) < 2 { + t.Fatalf("expected at least 2 keys matching pattern, got %d", len(keysResp)) + } +} + +func TestCache_MultiGet(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + keys := []string{"key-1", "key-2", "key-3"} + + // Put values + for i, key := range keys { + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": fmt.Sprintf("value-%d", i), + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + } + + // Multi-get + multiGetReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/mget", + Body: map[string]interface{}{ + "dmap": dmap, + "keys": keys, + }, + } + + body, status, err := multiGetReq.Do(ctx) + if err != nil { + t.Fatalf("mget failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var mgetResp map[string]interface{} + if err := DecodeJSON(body, &mgetResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + results := mgetResp["results"].([]interface{}) + if len(results) != 3 { + t.Fatalf("expected 3 results, got %d", len(results)) + } +} + +func TestCache_MissingDMap(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": "", + "key": "any-key", + }, + } + + _, status, err := getReq.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + if status != http.StatusBadRequest { + t.Fatalf("expected status 400 for missing dmap, got %d", status) + } +} + +func TestCache_MissingKey(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dmap := GenerateDMapName() + + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": "non-existent-key", + }, + } + + _, status, err := getReq.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + if status != http.StatusNotFound { + t.Fatalf("expected status 404 for missing key, got %d", status) + } +} diff --git a/e2e/client_e2e_test.go b/e2e/client_e2e_test.go deleted file mode 100644 index 04962e3..0000000 --- a/e2e/client_e2e_test.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build e2e - -package e2e - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/DeBrosOfficial/network/pkg/client" -) - -func getenv(k, def string) string { - if v := strings.TrimSpace(os.Getenv(k)); v != "" { - return v - } - return def -} - -func requireEnv(t *testing.T, key string) string { - t.Helper() - v := strings.TrimSpace(os.Getenv(key)) - if v == "" { - t.Skipf("%s not set; skipping", key) - } - return v -} - -func TestClient_Database_CreateQueryMigrate(t *testing.T) { - apiKey := requireEnv(t, "GATEWAY_API_KEY") - namespace := getenv("E2E_CLIENT_NAMESPACE", "default") - - cfg := client.DefaultClientConfig(namespace) - cfg.APIKey = apiKey - cfg.QuietMode = true - - if v := strings.TrimSpace(os.Getenv("E2E_BOOTSTRAP_PEERS")); v != "" { - parts := strings.Split(v, ",") - var peers []string - for _, p := range parts { - p = strings.TrimSpace(p) - if p != "" { - peers = append(peers, p) - } - } - cfg.BootstrapPeers = peers - } - if v := strings.TrimSpace(os.Getenv("E2E_RQLITE_NODES")); v != "" { - nodes := strings.Fields(strings.ReplaceAll(v, ",", " ")) - cfg.DatabaseEndpoints = nodes - } - - c, err := client.NewClient(cfg) - if err != nil { - t.Fatalf("new client: %v", err) - } - if err := c.Connect(); err != nil { - t.Fatalf("connect: %v", err) - } - t.Cleanup(func() { _ = c.Disconnect() }) - - // Unique table per run - table := fmt.Sprintf("e2e_items_client_%d", time.Now().UnixNano()) - schema := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)", table) - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - if err := c.Database().CreateTable(ctx, schema); err != nil { - t.Fatalf("create table: %v", err) - } - // Insert via transaction - stmts := []string{ - fmt.Sprintf("INSERT INTO %s(name) VALUES ('alpha')", table), - fmt.Sprintf("INSERT INTO %s(name) VALUES ('beta')", table), - } - ctx2, cancel2 := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel2() - if err := c.Database().Transaction(ctx2, stmts); err != nil { - t.Fatalf("transaction: %v", err) - } - // Query rows - ctx3, cancel3 := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel3() - res, err := c.Database().Query(ctx3, fmt.Sprintf("SELECT name FROM %s ORDER BY id", table)) - if err != nil { - t.Fatalf("query: %v", err) - } - if res.Count < 2 { - t.Fatalf("expected at least 2 rows, got %d", res.Count) - } -} diff --git a/e2e/concurrency_test.go b/e2e/concurrency_test.go new file mode 100644 index 0000000..16342c8 --- /dev/null +++ b/e2e/concurrency_test.go @@ -0,0 +1,503 @@ +//go:build e2e + +package e2e + +import ( + "context" + "fmt" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" +) + +// TestCache_ConcurrentWrites tests concurrent cache writes +func TestCache_ConcurrentWrites(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + dmap := GenerateDMapName() + numGoroutines := 10 + var wg sync.WaitGroup + var errorCount int32 + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + key := fmt.Sprintf("key-%d", idx) + value := fmt.Sprintf("value-%d", idx) + + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + } + }(i) + } + + wg.Wait() + + if errorCount > 0 { + t.Fatalf("expected no errors, got %d", errorCount) + } + + // Verify all values exist + scanReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/scan", + Body: map[string]interface{}{ + "dmap": dmap, + }, + } + + body, status, err := scanReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("scan failed: status %d, err %v", status, err) + } + + var scanResp map[string]interface{} + if err := DecodeJSON(body, &scanResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + keys := scanResp["keys"].([]interface{}) + if len(keys) < numGoroutines { + t.Fatalf("expected at least %d keys, got %d", numGoroutines, len(keys)) + } +} + +// TestCache_ConcurrentReads tests concurrent cache reads +func TestCache_ConcurrentReads(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "shared-key" + value := "shared-value" + + // Put value first + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed: status %d, err %v", status, err) + } + + // Read concurrently + numGoroutines := 10 + var wg sync.WaitGroup + var errorCount int32 + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + body, status, err := getReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + return + } + + var getResp map[string]interface{} + if err := DecodeJSON(body, &getResp); err != nil { + atomic.AddInt32(&errorCount, 1) + return + } + + if getResp["value"] != value { + atomic.AddInt32(&errorCount, 1) + } + }() + } + + wg.Wait() + + if errorCount > 0 { + t.Fatalf("expected no errors, got %d", errorCount) + } +} + +// TestCache_ConcurrentDeleteAndWrite tests concurrent delete and write +func TestCache_ConcurrentDeleteAndWrite(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + dmap := GenerateDMapName() + var wg sync.WaitGroup + var errorCount int32 + + numWrites := 5 + numDeletes := 3 + + // Write keys + for i := 0; i < numWrites; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + key := fmt.Sprintf("key-%d", idx) + value := fmt.Sprintf("value-%d", idx) + + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + } + }(i) + } + + wg.Wait() + + // Delete some keys + for i := 0; i < numDeletes; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + key := fmt.Sprintf("key-%d", idx) + + deleteReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/delete", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err := deleteReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + } + }(i) + } + + wg.Wait() + + if errorCount > 0 { + t.Fatalf("expected no errors, got %d", errorCount) + } +} + +// TestRQLite_ConcurrentInserts tests concurrent database inserts +func TestRQLite_ConcurrentInserts(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Insert concurrently + numInserts := 10 + var wg sync.WaitGroup + var errorCount int32 + + for i := 0; i < numInserts; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + + txReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": []string{ + fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, idx), + }, + }, + } + + _, status, err := txReq.Do(ctx) + if err != nil || status != http.StatusOK { + atomic.AddInt32(&errorCount, 1) + } + }(i) + } + + wg.Wait() + + if errorCount > 0 { + t.Logf("warning: %d concurrent inserts failed", errorCount) + } + + // Verify count + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("count query failed: status %d, err %v", status, err) + } + + var countResp map[string]interface{} + if err := DecodeJSON(body, &countResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 { + row := rows[0].([]interface{}) + count := int(row[0].(float64)) + if count < numInserts { + t.Logf("warning: expected %d inserts, got %d", numInserts, count) + } + } +} + +// TestRQLite_LargeBatchTransaction tests a large transaction with many statements +func TestRQLite_LargeBatchTransaction(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value TEXT)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Create large batch (100 statements) + var ops []map[string]interface{} + for i := 0; i < 100; i++ { + ops = append(ops, map[string]interface{}{ + "kind": "exec", + "sql": fmt.Sprintf("INSERT INTO %s(value) VALUES ('value-%d')", table, i), + }) + } + + txReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "ops": ops, + }, + } + + _, status, err = txReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("large batch transaction failed: status %d, err %v", status, err) + } + + // Verify count + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("count query failed: status %d, err %v", status, err) + } + + var countResp map[string]interface{} + if err := DecodeJSON(body, &countResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 { + row := rows[0].([]interface{}) + if int(row[0].(float64)) != 100 { + t.Fatalf("expected 100 rows, got %v", row[0]) + } + } +} + +// TestCache_TTLExpiryWithSleep tests TTL expiry with a controlled sleep +func TestCache_TTLExpiryWithSleep(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "ttl-expiry-key" + value := "ttl-expiry-value" + + // Put value with 2 second TTL + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": value, + "ttl": "2s", + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put with TTL failed: status %d, err %v", status, err) + } + + // Verify exists immediately + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = getReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("get immediately after put failed: status %d, err %v", status, err) + } + + // Sleep for TTL duration + buffer + Delay(2500) + + // Try to get after TTL expires + _, status, err = getReq.Do(ctx) + if status == http.StatusOK { + t.Logf("warning: TTL expiry may not be fully implemented; key still exists after TTL") + } +} + +// TestCache_ConcurrentWriteAndDelete tests concurrent writes and deletes on same key +func TestCache_ConcurrentWriteAndDelete(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + dmap := GenerateDMapName() + key := "contested-key" + + // Alternate between writes and deletes + numIterations := 5 + for i := 0; i < numIterations; i++ { + // Write + putReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/put", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + "value": fmt.Sprintf("value-%d", i), + }, + } + + _, status, err := putReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("put failed at iteration %d: status %d, err %v", i, status, err) + } + + // Read + getReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/get", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = getReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("get failed at iteration %d: status %d, err %v", i, status, err) + } + + // Delete + deleteReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/cache/delete", + Body: map[string]interface{}{ + "dmap": dmap, + "key": key, + }, + } + + _, status, err = deleteReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Logf("warning: delete at iteration %d failed: status %d, err %v", i, status, err) + } + } +} diff --git a/e2e/env.go b/e2e/env.go new file mode 100644 index 0000000..1f3c78c --- /dev/null +++ b/e2e/env.go @@ -0,0 +1,644 @@ +//go:build e2e + +package e2e + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "fmt" + "io" + "math/rand" + "net/http" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/DeBrosOfficial/network/pkg/client" + "github.com/DeBrosOfficial/network/pkg/config" + "github.com/DeBrosOfficial/network/pkg/ipfs" + _ "github.com/mattn/go-sqlite3" + "go.uber.org/zap" + "gopkg.in/yaml.v2" +) + +var ( + gatewayURLCache string + apiKeyCache string + bootstrapCache []string + rqliteCache []string + ipfsClusterCache string + ipfsAPICache string + cacheMutex sync.RWMutex +) + +// loadGatewayConfig loads gateway configuration from ~/.debros/gateway.yaml +func loadGatewayConfig() (map[string]interface{}, error) { + configPath, err := config.DefaultPath("gateway.yaml") + if err != nil { + return nil, fmt.Errorf("failed to get gateway config path: %w", err) + } + + data, err := os.ReadFile(configPath) + if err != nil { + return nil, fmt.Errorf("failed to read gateway config: %w", err) + } + + var cfg map[string]interface{} + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("failed to parse gateway config: %w", err) + } + + return cfg, nil +} + +// loadNodeConfig loads node configuration from ~/.debros/node.yaml or bootstrap.yaml +func loadNodeConfig(filename string) (map[string]interface{}, error) { + configPath, err := config.DefaultPath(filename) + if err != nil { + return nil, fmt.Errorf("failed to get config path: %w", err) + } + + data, err := os.ReadFile(configPath) + if err != nil { + return nil, fmt.Errorf("failed to read config: %w", err) + } + + var cfg map[string]interface{} + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("failed to parse config: %w", err) + } + + return cfg, nil +} + +// GetGatewayURL returns the gateway base URL from config +func GetGatewayURL() string { + cacheMutex.RLock() + if gatewayURLCache != "" { + defer cacheMutex.RUnlock() + return gatewayURLCache + } + cacheMutex.RUnlock() + + // Try to load from gateway config + gwCfg, err := loadGatewayConfig() + if err == nil { + if server, ok := gwCfg["server"].(map[interface{}]interface{}); ok { + if port, ok := server["port"].(int); ok { + url := fmt.Sprintf("http://localhost:%d", port) + cacheMutex.Lock() + gatewayURLCache = url + cacheMutex.Unlock() + return url + } + } + } + + // Default fallback + return "http://localhost:6001" +} + +// GetRQLiteNodes returns rqlite endpoint addresses from config +func GetRQLiteNodes() []string { + cacheMutex.RLock() + if len(rqliteCache) > 0 { + defer cacheMutex.RUnlock() + return rqliteCache + } + cacheMutex.RUnlock() + + // Try bootstrap.yaml first, then node.yaml variants + for _, cfgFile := range []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"} { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + + if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok { + if rqlitePort, ok := db["rqlite_port"].(int); ok { + nodes := []string{fmt.Sprintf("http://localhost:%d", rqlitePort)} + cacheMutex.Lock() + rqliteCache = nodes + cacheMutex.Unlock() + return nodes + } + } + } + + // Default fallback + return []string{"http://localhost:5001"} +} + +// queryAPIKeyFromRQLite queries the SQLite database directly for an API key +func queryAPIKeyFromRQLite() (string, error) { + // Build database path from bootstrap/node config + homeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("failed to get home directory: %w", err) + } + + // Try bootstrap first, then nodes + dbPaths := []string{ + filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite"), + filepath.Join(homeDir, ".debros", "node2", "rqlite", "db.sqlite"), + filepath.Join(homeDir, ".debros", "node3", "rqlite", "db.sqlite"), + } + + for _, dbPath := range dbPaths { + // Check if database file exists + if _, err := os.Stat(dbPath); err != nil { + continue + } + + // Open SQLite database + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + continue + } + defer db.Close() + + // Set timeout for connection + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Query the api_keys table + row := db.QueryRowContext(ctx, "SELECT key FROM api_keys ORDER BY id LIMIT 1") + var apiKey string + if err := row.Scan(&apiKey); err != nil { + if err == sql.ErrNoRows { + continue // Try next database + } + continue // Skip this database on error + } + + if apiKey != "" { + return apiKey, nil + } + } + + return "", fmt.Errorf("failed to retrieve API key from any SQLite database") +} + +// GetAPIKey returns the gateway API key from rqlite or cache +func GetAPIKey() string { + cacheMutex.RLock() + if apiKeyCache != "" { + defer cacheMutex.RUnlock() + return apiKeyCache + } + cacheMutex.RUnlock() + + // Query rqlite for API key + apiKey, err := queryAPIKeyFromRQLite() + if err != nil { + return "" + } + + cacheMutex.Lock() + apiKeyCache = apiKey + cacheMutex.Unlock() + + return apiKey +} + +// GetJWT returns the gateway JWT token (currently not auto-discovered) +func GetJWT() string { + return "" +} + +// GetBootstrapPeers returns bootstrap peer addresses from config +func GetBootstrapPeers() []string { + cacheMutex.RLock() + if len(bootstrapCache) > 0 { + defer cacheMutex.RUnlock() + return bootstrapCache + } + cacheMutex.RUnlock() + + configFiles := []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"} + seen := make(map[string]struct{}) + var peers []string + + for _, cfgFile := range configFiles { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + discovery, ok := nodeCfg["discovery"].(map[interface{}]interface{}) + if !ok { + continue + } + rawPeers, ok := discovery["bootstrap_peers"].([]interface{}) + if !ok { + continue + } + for _, v := range rawPeers { + peerStr, ok := v.(string) + if !ok || peerStr == "" { + continue + } + if _, exists := seen[peerStr]; exists { + continue + } + seen[peerStr] = struct{}{} + peers = append(peers, peerStr) + } + } + + if len(peers) == 0 { + return nil + } + + cacheMutex.Lock() + bootstrapCache = peers + cacheMutex.Unlock() + + return peers +} + +// GetIPFSClusterURL returns the IPFS cluster API URL from config +func GetIPFSClusterURL() string { + cacheMutex.RLock() + if ipfsClusterCache != "" { + defer cacheMutex.RUnlock() + return ipfsClusterCache + } + cacheMutex.RUnlock() + + // Try to load from node config + for _, cfgFile := range []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"} { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + + if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok { + if ipfs, ok := db["ipfs"].(map[interface{}]interface{}); ok { + if url, ok := ipfs["cluster_api_url"].(string); ok && url != "" { + cacheMutex.Lock() + ipfsClusterCache = url + cacheMutex.Unlock() + return url + } + } + } + } + + // Default fallback + return "http://localhost:9094" +} + +// GetIPFSAPIURL returns the IPFS API URL from config +func GetIPFSAPIURL() string { + cacheMutex.RLock() + if ipfsAPICache != "" { + defer cacheMutex.RUnlock() + return ipfsAPICache + } + cacheMutex.RUnlock() + + // Try to load from node config + for _, cfgFile := range []string{"bootstrap.yaml", "node.yaml", "node2.yaml", "node3.yaml"} { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + + if db, ok := nodeCfg["database"].(map[interface{}]interface{}); ok { + if ipfs, ok := db["ipfs"].(map[interface{}]interface{}); ok { + if url, ok := ipfs["api_url"].(string); ok && url != "" { + cacheMutex.Lock() + ipfsAPICache = url + cacheMutex.Unlock() + return url + } + } + } + } + + // Default fallback + return "http://localhost:5001" +} + +// GetClientNamespace returns the test client namespace from config +func GetClientNamespace() string { + // Try to load from node config + for _, cfgFile := range []string{"bootstrap.yaml", "node.yaml"} { + nodeCfg, err := loadNodeConfig(cfgFile) + if err != nil { + continue + } + + if discovery, ok := nodeCfg["discovery"].(map[interface{}]interface{}); ok { + if ns, ok := discovery["node_namespace"].(string); ok && ns != "" { + return ns + } + } + } + + return "default" +} + +// SkipIfMissingGateway skips the test if gateway is not accessible or API key not available +func SkipIfMissingGateway(t *testing.T) { + t.Helper() + apiKey := GetAPIKey() + if apiKey == "" { + t.Skip("API key not available from rqlite; gateway tests skipped") + } + + // Verify gateway is accessible + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/health", nil) + if err != nil { + t.Skip("Gateway not accessible; tests skipped") + return + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Skip("Gateway not accessible; tests skipped") + return + } + resp.Body.Close() +} + +// IsGatewayReady checks if the gateway is accessible and healthy +func IsGatewayReady(ctx context.Context) bool { + gatewayURL := GetGatewayURL() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, gatewayURL+"/v1/health", nil) + if err != nil { + return false + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false + } + defer resp.Body.Close() + return resp.StatusCode == http.StatusOK +} + +// NewHTTPClient creates an authenticated HTTP client for gateway requests +func NewHTTPClient(timeout time.Duration) *http.Client { + if timeout == 0 { + timeout = 30 * time.Second + } + return &http.Client{Timeout: timeout} +} + +// HTTPRequest is a helper for making authenticated HTTP requests +type HTTPRequest struct { + Method string + URL string + Body interface{} + Headers map[string]string + Timeout time.Duration + SkipAuth bool +} + +// Do executes an HTTP request and returns the response body +func (hr *HTTPRequest) Do(ctx context.Context) ([]byte, int, error) { + if hr.Timeout == 0 { + hr.Timeout = 30 * time.Second + } + + var reqBody io.Reader + if hr.Body != nil { + data, err := json.Marshal(hr.Body) + if err != nil { + return nil, 0, fmt.Errorf("failed to marshal request body: %w", err) + } + reqBody = bytes.NewReader(data) + } + + req, err := http.NewRequestWithContext(ctx, hr.Method, hr.URL, reqBody) + if err != nil { + return nil, 0, fmt.Errorf("failed to create request: %w", err) + } + + // Add headers + if hr.Headers != nil { + for k, v := range hr.Headers { + req.Header.Set(k, v) + } + } + + // Add JSON content type if body is present + if hr.Body != nil && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "application/json") + } + + // Add auth headers + if !hr.SkipAuth { + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + req.Header.Set("X-API-Key", apiKey) + } + } + + client := NewHTTPClient(hr.Timeout) + resp, err := client.Do(req) + if err != nil { + return nil, 0, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, resp.StatusCode, fmt.Errorf("failed to read response: %w", err) + } + + return respBody, resp.StatusCode, nil +} + +// DecodeJSON unmarshals response body into v +func DecodeJSON(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewNetworkClient creates a network client configured for e2e tests +func NewNetworkClient(t *testing.T) client.NetworkClient { + t.Helper() + + namespace := GetClientNamespace() + cfg := client.DefaultClientConfig(namespace) + cfg.APIKey = GetAPIKey() + cfg.QuietMode = true // Suppress debug logs in tests + + if jwt := GetJWT(); jwt != "" { + cfg.JWT = jwt + } + + if peers := GetBootstrapPeers(); len(peers) > 0 { + cfg.BootstrapPeers = peers + } + + if nodes := GetRQLiteNodes(); len(nodes) > 0 { + cfg.DatabaseEndpoints = nodes + } + + c, err := client.NewClient(cfg) + if err != nil { + t.Fatalf("failed to create network client: %v", err) + } + + return c +} + +// GenerateUniqueID generates a unique identifier for test resources +func GenerateUniqueID(prefix string) string { + return fmt.Sprintf("%s_%d_%d", prefix, time.Now().UnixNano(), rand.Intn(10000)) +} + +// GenerateTableName generates a unique table name for database tests +func GenerateTableName() string { + return GenerateUniqueID("e2e_test") +} + +// GenerateDMapName generates a unique dmap name for cache tests +func GenerateDMapName() string { + return GenerateUniqueID("test_dmap") +} + +// GenerateTopic generates a unique topic name for pubsub tests +func GenerateTopic() string { + return GenerateUniqueID("e2e_topic") +} + +// Delay pauses execution for the specified duration +func Delay(ms int) { + time.Sleep(time.Duration(ms) * time.Millisecond) +} + +// WaitForCondition waits for a condition with exponential backoff +func WaitForCondition(maxWait time.Duration, check func() bool) error { + deadline := time.Now().Add(maxWait) + backoff := 100 * time.Millisecond + + for { + if check() { + return nil + } + if time.Now().After(deadline) { + return fmt.Errorf("condition not met within %v", maxWait) + } + time.Sleep(backoff) + if backoff < 2*time.Second { + backoff = backoff * 2 + } + } +} + +// NewTestLogger creates a test logger for debugging +func NewTestLogger(t *testing.T) *zap.Logger { + t.Helper() + config := zap.NewDevelopmentConfig() + config.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + logger, err := config.Build() + if err != nil { + t.Fatalf("failed to create logger: %v", err) + } + return logger +} + +// CleanupDatabaseTable drops a table from the database after tests +func CleanupDatabaseTable(t *testing.T, tableName string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Query rqlite to drop the table + homeDir, err := os.UserHomeDir() + if err != nil { + t.Logf("warning: failed to get home directory for cleanup: %v", err) + return + } + + dbPath := filepath.Join(homeDir, ".debros", "bootstrap", "rqlite", "db.sqlite") + db, err := sql.Open("sqlite3", dbPath) + if err != nil { + t.Logf("warning: failed to open database for cleanup: %v", err) + return + } + defer db.Close() + + dropSQL := fmt.Sprintf("DROP TABLE IF EXISTS %s", tableName) + if _, err := db.ExecContext(ctx, dropSQL); err != nil { + t.Logf("warning: failed to drop table %s: %v", tableName, err) + } +} + +// CleanupDMapCache deletes a dmap from the cache after tests +func CleanupDMapCache(t *testing.T, dmapName string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodDelete, + URL: GetGatewayURL() + "/v1/cache/dmap/" + dmapName, + Timeout: 10 * time.Second, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Logf("warning: failed to delete dmap %s: %v", dmapName, err) + return + } + + if status != http.StatusOK && status != http.StatusNoContent && status != http.StatusNotFound { + t.Logf("warning: delete dmap returned status %d", status) + } +} + +// CleanupIPFSFile unpins a file from IPFS after tests +func CleanupIPFSFile(t *testing.T, cid string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := &ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(*cfg, logger) + if err != nil { + t.Logf("warning: failed to create IPFS client for cleanup: %v", err) + return + } + + if err := client.Unpin(ctx, cid); err != nil { + t.Logf("warning: failed to unpin file %s: %v", cid, err) + } +} + +// CleanupCacheEntry deletes a cache entry after tests +func CleanupCacheEntry(t *testing.T, dmapName, key string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodDelete, + URL: GetGatewayURL() + "/v1/cache/dmap/" + dmapName + "/key/" + key, + Timeout: 10 * time.Second, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Logf("warning: failed to delete cache entry: %v", err) + return + } + + if status != http.StatusOK && status != http.StatusNoContent && status != http.StatusNotFound { + t.Logf("warning: delete cache entry returned status %d", status) + } +} diff --git a/e2e/gateway_e2e_test.go b/e2e/gateway_e2e_test.go deleted file mode 100644 index 036d9b2..0000000 --- a/e2e/gateway_e2e_test.go +++ /dev/null @@ -1,625 +0,0 @@ -//go:build e2e - -package e2e - -import ( - "bytes" - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "mime/multipart" - "net/http" - "net/url" - "os" - "strings" - "testing" - "time" - - "github.com/gorilla/websocket" -) - -func getEnv(key, def string) string { - if v := strings.TrimSpace(os.Getenv(key)); v != "" { - return v - } - return def -} - -func requireAPIKey(t *testing.T) string { - t.Helper() - key := strings.TrimSpace(os.Getenv("GATEWAY_API_KEY")) - if key == "" { - t.Skip("GATEWAY_API_KEY not set; skipping gateway auth-required tests") - } - return key -} - -func gatewayBaseURL() string { - return getEnv("GATEWAY_BASE_URL", "http://localhost:6001") -} - -func httpClient() *http.Client { - return &http.Client{Timeout: 10 * time.Second} -} - -func authHeader(key string) http.Header { - h := http.Header{} - h.Set("Authorization", "Bearer "+key) - h.Set("Content-Type", "application/json") - return h -} - -func TestGateway_Health(t *testing.T) { - base := gatewayBaseURL() - resp, err := httpClient().Get(base + "/v1/health") - if err != nil { - t.Fatalf("health request error: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("unexpected status: %d", resp.StatusCode) - } - var body map[string]any - if err := json.NewDecoder(resp.Body).Decode(&body); err != nil { - t.Fatalf("decode: %v", err) - } - if body["status"] != "ok" { - t.Fatalf("status not ok: %+v", body) - } -} - -func TestGateway_PubSub_WS_Echo(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - topic := fmt.Sprintf("e2e-ws-%d", time.Now().UnixNano()) - wsURL, hdr := toWSURL(base+"/v1/pubsub/ws?topic="+url.QueryEscape(topic)), http.Header{} - hdr.Set("Authorization", "Bearer "+key) - - c, _, err := websocket.DefaultDialer.Dial(wsURL, hdr) - if err != nil { - t.Fatalf("ws dial: %v", err) - } - defer c.Close() - defer c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - - msg := []byte("hello-ws") - if err := c.WriteMessage(websocket.TextMessage, msg); err != nil { - t.Fatalf("ws write: %v", err) - } - - _, data, err := c.ReadMessage() - if err != nil { - t.Fatalf("ws read: %v", err) - } - if string(data) != string(msg) { - t.Fatalf("ws echo mismatch: %q", string(data)) - } -} - -func TestGateway_PubSub_RestPublishToWS(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - topic := fmt.Sprintf("e2e-rest-%d", time.Now().UnixNano()) - wsURL, hdr := toWSURL(base+"/v1/pubsub/ws?topic="+url.QueryEscape(topic)), http.Header{} - hdr.Set("Authorization", "Bearer "+key) - c, _, err := websocket.DefaultDialer.Dial(wsURL, hdr) - if err != nil { - t.Fatalf("ws dial: %v", err) - } - defer c.Close() - - // Publish via REST - payload := randomBytes(24) - b64 := base64.StdEncoding.EncodeToString(payload) - body := fmt.Sprintf(`{"topic":"%s","data_base64":"%s"}`, topic, b64) - req, _ := http.NewRequest(http.MethodPost, base+"/v1/pubsub/publish", strings.NewReader(body)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("publish do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("publish status: %d", resp.StatusCode) - } - - // Expect the message via WS - _ = c.SetReadDeadline(time.Now().Add(5 * time.Second)) - _, data, err := c.ReadMessage() - if err != nil { - t.Fatalf("ws read: %v", err) - } - if string(data) != string(payload) { - t.Fatalf("payload mismatch: %q != %q", string(data), string(payload)) - } - - // Topics list should include our topic (without namespace prefix) - req2, _ := http.NewRequest(http.MethodGet, base+"/v1/pubsub/topics", nil) - req2.Header = authHeader(key) - resp2, err := httpClient().Do(req2) - if err != nil { - t.Fatalf("topics do: %v", err) - } - defer resp2.Body.Close() - if resp2.StatusCode != http.StatusOK { - t.Fatalf("topics status: %d", resp2.StatusCode) - } - var tlist struct { - Topics []string `json:"topics"` - } - if err := json.NewDecoder(resp2.Body).Decode(&tlist); err != nil { - t.Fatalf("topics decode: %v", err) - } - found := false - for _, tt := range tlist.Topics { - if tt == topic { - found = true - break - } - } - if !found { - t.Fatalf("topic %s not found in topics list", topic) - } -} - -func TestGateway_Database_CreateQueryMigrate(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - // Create table - schema := `CREATE TABLE IF NOT EXISTS e2e_items (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)` - body := fmt.Sprintf(`{"schema":%q}`, schema) - req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("create-table do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusCreated { - t.Fatalf("create-table status: %d", resp.StatusCode) - } - - // Insert via transaction (simulate migration/data seed) - txBody := `{"statements":["INSERT INTO e2e_items(name) VALUES ('one')","INSERT INTO e2e_items(name) VALUES ('two')"]}` - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txBody)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("tx do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("tx status: %d", resp.StatusCode) - } - - // Query rows - qBody := `{"sql":"SELECT name FROM e2e_items ORDER BY id ASC"}` - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("query do: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("query status: %d", resp.StatusCode) - } - var qr struct { - Columns []string `json:"columns"` - Rows [][]any `json:"rows"` - Count int `json:"count"` - } - if err := json.NewDecoder(resp.Body).Decode(&qr); err != nil { - t.Fatalf("query decode: %v", err) - } - if qr.Count < 2 { - t.Fatalf("expected at least 2 rows, got %d", qr.Count) - } - - // Schema endpoint returns tables - req, _ = http.NewRequest(http.MethodGet, base+"/v1/rqlite/schema", nil) - req.Header = authHeader(key) - resp2, err := httpClient().Do(req) - if err != nil { - t.Fatalf("schema do: %v", err) - } - defer resp2.Body.Close() - if resp2.StatusCode != http.StatusOK { - t.Fatalf("schema status: %d", resp2.StatusCode) - } -} - -func TestGateway_Database_DropTable(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - table := fmt.Sprintf("e2e_tmp_%d", time.Now().UnixNano()) - schema := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)", table) - // create - body := fmt.Sprintf(`{"schema":%q}`, schema) - req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("create-table do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusCreated { - t.Fatalf("create-table status: %d", resp.StatusCode) - } - // drop - dbody := fmt.Sprintf(`{"table":%q}`, table) - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/drop-table", strings.NewReader(dbody)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("drop-table do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("drop-table status: %d", resp.StatusCode) - } - // verify not in schema - req, _ = http.NewRequest(http.MethodGet, base+"/v1/rqlite/schema", nil) - req.Header = authHeader(key) - resp2, err := httpClient().Do(req) - if err != nil { - t.Fatalf("schema do: %v", err) - } - defer resp2.Body.Close() - if resp2.StatusCode != http.StatusOK { - t.Fatalf("schema status: %d", resp2.StatusCode) - } - var schemaResp struct { - Tables []struct { - Name string `json:"name"` - } `json:"tables"` - } - if err := json.NewDecoder(resp2.Body).Decode(&schemaResp); err != nil { - t.Fatalf("schema decode: %v", err) - } - for _, tbl := range schemaResp.Tables { - if tbl.Name == table { - t.Fatalf("table %s still present after drop", table) - } - } -} - -func TestGateway_Database_RecreateWithFK(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - // base tables - orgs := fmt.Sprintf("e2e_orgs_%d", time.Now().UnixNano()) - users := fmt.Sprintf("e2e_users_%d", time.Now().UnixNano()) - createOrgs := fmt.Sprintf(`{"schema":%q}`, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)", orgs)) - createUsers := fmt.Sprintf(`{"schema":%q}`, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)", users)) - - for _, body := range []string{createOrgs, createUsers} { - req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/create-table", strings.NewReader(body)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("create-table do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusCreated { - t.Fatalf("create-table status: %d", resp.StatusCode) - } - } - // seed data - txSeed := fmt.Sprintf(`{"statements":["INSERT INTO %s(id,name) VALUES (1,'org')","INSERT INTO %s(id,name,org_id,age) VALUES (1,'alice',1,'30')"]}`, orgs, users) - req, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txSeed)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("seed tx do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("seed tx status: %d", resp.StatusCode) - } - - // migrate: change users.age TEXT -> INTEGER and add FK to orgs(id) - // Note: Some backends may not support connection-scoped BEGIN/COMMIT or PRAGMA via HTTP. - // We apply the standard recreate pattern without explicit PRAGMAs/transaction. - txMig := fmt.Sprintf(`{"statements":[ - "CREATE TABLE %s_new (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age INTEGER, FOREIGN KEY(org_id) REFERENCES %s(id) ON DELETE CASCADE)", - "INSERT INTO %s_new (id,name,org_id,age) SELECT id,name,org_id, CAST(age AS INTEGER) FROM %s", - "DROP TABLE %s", - "ALTER TABLE %s_new RENAME TO %s" - ]}`, users, orgs, users, users, users, users, users) - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/transaction", strings.NewReader(txMig)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("mig tx do: %v", err) - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("mig tx status: %d", resp.StatusCode) - } - - // verify schema type change - qBody := fmt.Sprintf(`{"sql":"PRAGMA table_info(%s)"}`, users) - req, _ = http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody)) - req.Header = authHeader(key) - resp, err = httpClient().Do(req) - if err != nil { - t.Fatalf("pragma do: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("pragma status: %d", resp.StatusCode) - } - var qr struct { - Columns []string `json:"columns"` - Rows [][]any `json:"rows"` - } - if err := json.NewDecoder(resp.Body).Decode(&qr); err != nil { - t.Fatalf("pragma decode: %v", err) - } - // column order: cid,name,type,notnull,dflt_value,pk - ageIsInt := false - for _, row := range qr.Rows { - if len(row) >= 3 && fmt.Sprintf("%v", row[1]) == "age" { - tstr := strings.ToUpper(fmt.Sprintf("%v", row[2])) - if strings.Contains(tstr, "INT") { - ageIsInt = true - break - } - } - } - if !ageIsInt { - // Fallback: inspect CREATE TABLE SQL from sqlite_master - qBody2 := fmt.Sprintf(`{"sql":"SELECT sql FROM sqlite_master WHERE type='table' AND name='%s'"}`, users) - req2, _ := http.NewRequest(http.MethodPost, base+"/v1/rqlite/query", strings.NewReader(qBody2)) - req2.Header = authHeader(key) - resp3, err := httpClient().Do(req2) - if err != nil { - t.Fatalf("sqlite_master do: %v", err) - } - defer resp3.Body.Close() - if resp3.StatusCode != http.StatusOK { - t.Fatalf("sqlite_master status: %d", resp3.StatusCode) - } - var qr2 struct { - Rows [][]any `json:"rows"` - } - if err := json.NewDecoder(resp3.Body).Decode(&qr2); err != nil { - t.Fatalf("sqlite_master decode: %v", err) - } - found := false - for _, row := range qr2.Rows { - if len(row) > 0 { - sql := strings.ToUpper(fmt.Sprintf("%v", row[0])) - if strings.Contains(sql, "AGE INT") || strings.Contains(sql, "AGE INTEGER") { - found = true - break - } - } - } - if !found { - t.Fatalf("age column type not INTEGER after migration") - } - } -} - -func TestGateway_Storage_UploadMultipart(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - // Create multipart form data using proper multipart writer - content := []byte("test file content for IPFS upload") - var buf bytes.Buffer - writer := multipart.NewWriter(&buf) - part, err := writer.CreateFormFile("file", "test.txt") - if err != nil { - t.Fatalf("create form file: %v", err) - } - if _, err := part.Write(content); err != nil { - t.Fatalf("write content: %v", err) - } - if err := writer.Close(); err != nil { - t.Fatalf("close writer: %v", err) - } - - req, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/upload", &buf) - req.Header = authHeader(key) - req.Header.Set("Content-Type", writer.FormDataContentType()) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("upload do: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusServiceUnavailable { - t.Skip("IPFS storage not available; skipping storage tests") - } - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - t.Fatalf("upload status: %d, body: %s", resp.StatusCode, string(body)) - } - - var uploadResp struct { - Cid string `json:"cid"` - Name string `json:"name"` - Size int64 `json:"size"` - } - if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil { - t.Fatalf("upload decode: %v", err) - } - if uploadResp.Cid == "" { - t.Fatalf("upload returned empty CID") - } - if uploadResp.Name != "test.txt" { - t.Fatalf("upload name mismatch: got %s", uploadResp.Name) - } - if uploadResp.Size == 0 { - t.Fatalf("upload size is zero") - } - - // Test pinning the uploaded content - pinBody := fmt.Sprintf(`{"cid":"%s","name":"test-pinned"}`, uploadResp.Cid) - req2, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/pin", strings.NewReader(pinBody)) - req2.Header = authHeader(key) - resp2, err := httpClient().Do(req2) - if err != nil { - t.Fatalf("pin do: %v", err) - } - defer resp2.Body.Close() - if resp2.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp2.Body) - t.Fatalf("pin status: %d, body: %s", resp2.StatusCode, string(body)) - } - - // Test getting pin status - req3, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/status/"+uploadResp.Cid, nil) - req3.Header = authHeader(key) - resp3, err := httpClient().Do(req3) - if err != nil { - t.Fatalf("status do: %v", err) - } - defer resp3.Body.Close() - if resp3.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp3.Body) - t.Fatalf("status status: %d, body: %s", resp3.StatusCode, string(body)) - } - - var statusResp struct { - Cid string `json:"cid"` - Status string `json:"status"` - ReplicationFactor int `json:"replication_factor"` - Peers []string `json:"peers"` - } - if err := json.NewDecoder(resp3.Body).Decode(&statusResp); err != nil { - t.Fatalf("status decode: %v", err) - } - if statusResp.Cid != uploadResp.Cid { - t.Fatalf("status CID mismatch: got %s", statusResp.Cid) - } - - // Test retrieving content - req4, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/get/"+uploadResp.Cid, nil) - req4.Header = authHeader(key) - resp4, err := httpClient().Do(req4) - if err != nil { - t.Fatalf("get do: %v", err) - } - defer resp4.Body.Close() - if resp4.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp4.Body) - t.Fatalf("get status: %d, body: %s", resp4.StatusCode, string(body)) - } - - retrieved, err := io.ReadAll(resp4.Body) - if err != nil { - t.Fatalf("get read: %v", err) - } - if string(retrieved) != string(content) { - t.Fatalf("retrieved content mismatch: got %q", string(retrieved)) - } - - // Test unpinning - req5, _ := http.NewRequest(http.MethodDelete, base+"/v1/storage/unpin/"+uploadResp.Cid, nil) - req5.Header = authHeader(key) - resp5, err := httpClient().Do(req5) - if err != nil { - t.Fatalf("unpin do: %v", err) - } - defer resp5.Body.Close() - if resp5.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp5.Body) - t.Fatalf("unpin status: %d, body: %s", resp5.StatusCode, string(body)) - } -} - -func TestGateway_Storage_UploadJSON(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - // Test JSON upload with base64 data - content := []byte("test json upload content") - b64 := base64.StdEncoding.EncodeToString(content) - body := fmt.Sprintf(`{"name":"test.json","data":"%s"}`, b64) - - req, _ := http.NewRequest(http.MethodPost, base+"/v1/storage/upload", strings.NewReader(body)) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("upload json do: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusServiceUnavailable { - t.Skip("IPFS storage not available; skipping storage tests") - } - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - t.Fatalf("upload json status: %d, body: %s", resp.StatusCode, string(body)) - } - - var uploadResp struct { - Cid string `json:"cid"` - Name string `json:"name"` - Size int64 `json:"size"` - } - if err := json.NewDecoder(resp.Body).Decode(&uploadResp); err != nil { - t.Fatalf("upload json decode: %v", err) - } - if uploadResp.Cid == "" { - t.Fatalf("upload json returned empty CID") - } - if uploadResp.Name != "test.json" { - t.Fatalf("upload json name mismatch: got %s", uploadResp.Name) - } -} - -func TestGateway_Storage_InvalidCID(t *testing.T) { - key := requireAPIKey(t) - base := gatewayBaseURL() - - // Test status with invalid CID - req, _ := http.NewRequest(http.MethodGet, base+"/v1/storage/status/QmInvalidCID123", nil) - req.Header = authHeader(key) - resp, err := httpClient().Do(req) - if err != nil { - t.Fatalf("status invalid do: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusServiceUnavailable { - t.Skip("IPFS storage not available; skipping storage tests") - } - - // Should return error but not crash - if resp.StatusCode != http.StatusNotFound && resp.StatusCode != http.StatusInternalServerError { - t.Fatalf("expected error status for invalid CID, got %d", resp.StatusCode) - } -} - -func toWSURL(httpURL string) string { - u, err := url.Parse(httpURL) - if err != nil { - return httpURL - } - if u.Scheme == "https" { - u.Scheme = "wss" - } else { - u.Scheme = "ws" - } - return u.String() -} - -func randomBytes(n int) []byte { - b := make([]byte, n) - _, _ = rand.Read(b) - return b -} diff --git a/e2e/ipfs_cluster_test.go b/e2e/ipfs_cluster_test.go new file mode 100644 index 0000000..5d8dff1 --- /dev/null +++ b/e2e/ipfs_cluster_test.go @@ -0,0 +1,400 @@ +//go:build e2e + +package e2e + +import ( + "bytes" + "context" + "fmt" + "io" + "testing" + "time" + + "github.com/DeBrosOfficial/network/pkg/ipfs" +) + +func TestIPFSCluster_Health(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 10 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + err = client.Health(ctx) + if err != nil { + t.Fatalf("health check failed: %v", err) + } +} + +func TestIPFSCluster_GetPeerCount(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 10 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + peerCount, err := client.GetPeerCount(ctx) + if err != nil { + t.Fatalf("get peer count failed: %v", err) + } + + if peerCount < 0 { + t.Fatalf("expected non-negative peer count, got %d", peerCount) + } + + t.Logf("IPFS cluster peers: %d", peerCount) +} + +func TestIPFSCluster_AddFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + content := []byte("IPFS cluster test content") + result, err := client.Add(ctx, bytes.NewReader(content), "test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + if result.Cid == "" { + t.Fatalf("expected non-empty CID") + } + + if result.Size != int64(len(content)) { + t.Fatalf("expected size %d, got %d", len(content), result.Size) + } + + t.Logf("Added file with CID: %s", result.Cid) +} + +func TestIPFSCluster_PinFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add file first + content := []byte("IPFS pin test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "pin-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + // Pin the file + pinResult, err := client.Pin(ctx, cid, "pinned-file", 1) + if err != nil { + t.Fatalf("pin file failed: %v", err) + } + + if pinResult.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid) + } + + t.Logf("Pinned file: %s", cid) +} + +func TestIPFSCluster_PinStatus(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add and pin file + content := []byte("IPFS status test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "status-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + pinResult, err := client.Pin(ctx, cid, "status-test", 1) + if err != nil { + t.Fatalf("pin file failed: %v", err) + } + + if pinResult.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid) + } + + // Give pin time to propagate + Delay(1000) + + // Get status + status, err := client.PinStatus(ctx, cid) + if err != nil { + t.Fatalf("get pin status failed: %v", err) + } + + if status.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, status.Cid) + } + + if status.Name != "status-test" { + t.Fatalf("expected name 'status-test', got %s", status.Name) + } + + if status.ReplicationFactor < 1 { + t.Logf("warning: replication factor is %d, expected >= 1", status.ReplicationFactor) + } + + t.Logf("Pin status: %s (replication: %d, peers: %d)", status.Status, status.ReplicationFactor, len(status.Peers)) +} + +func TestIPFSCluster_UnpinFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add and pin file + content := []byte("IPFS unpin test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "unpin-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + _, err = client.Pin(ctx, cid, "unpin-test", 1) + if err != nil { + t.Fatalf("pin file failed: %v", err) + } + + // Unpin file + err = client.Unpin(ctx, cid) + if err != nil { + t.Fatalf("unpin file failed: %v", err) + } + + t.Logf("Unpinned file: %s", cid) +} + +func TestIPFSCluster_GetFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add file + content := []byte("IPFS get test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "get-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + // Give time for propagation + Delay(1000) + + // Get file + rc, err := client.Get(ctx, cid, GetIPFSAPIURL()) + if err != nil { + t.Fatalf("get file failed: %v", err) + } + defer rc.Close() + + retrievedContent, err := io.ReadAll(rc) + if err != nil { + t.Fatalf("failed to read content: %v", err) + } + + if !bytes.Equal(retrievedContent, content) { + t.Fatalf("content mismatch: expected %q, got %q", string(content), string(retrievedContent)) + } + + t.Logf("Retrieved file: %s (%d bytes)", cid, len(retrievedContent)) +} + +func TestIPFSCluster_LargeFile(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 60 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Create 5MB file + content := bytes.Repeat([]byte("x"), 5*1024*1024) + result, err := client.Add(ctx, bytes.NewReader(content), "large.bin") + if err != nil { + t.Fatalf("add large file failed: %v", err) + } + + if result.Cid == "" { + t.Fatalf("expected non-empty CID") + } + + if result.Size != int64(len(content)) { + t.Fatalf("expected size %d, got %d", len(content), result.Size) + } + + t.Logf("Added large file with CID: %s (%d bytes)", result.Cid, result.Size) +} + +func TestIPFSCluster_ReplicationFactor(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add file + content := []byte("IPFS replication test content") + addResult, err := client.Add(ctx, bytes.NewReader(content), "replication-test.txt") + if err != nil { + t.Fatalf("add file failed: %v", err) + } + + cid := addResult.Cid + + // Pin with specific replication factor + replicationFactor := 2 + pinResult, err := client.Pin(ctx, cid, "replication-test", replicationFactor) + if err != nil { + t.Fatalf("pin file failed: %v", err) + } + + if pinResult.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, pinResult.Cid) + } + + // Give time for replication + Delay(2000) + + // Check status + status, err := client.PinStatus(ctx, cid) + if err != nil { + t.Fatalf("get pin status failed: %v", err) + } + + t.Logf("Replication factor: requested=%d, actual=%d, peers=%d", replicationFactor, status.ReplicationFactor, len(status.Peers)) +} + +func TestIPFSCluster_MultipleFiles(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + logger := NewTestLogger(t) + cfg := ipfs.Config{ + ClusterAPIURL: GetIPFSClusterURL(), + Timeout: 30 * time.Second, + } + + client, err := ipfs.NewClient(cfg, logger) + if err != nil { + t.Fatalf("failed to create IPFS client: %v", err) + } + + // Add multiple files + numFiles := 5 + var cids []string + + for i := 0; i < numFiles; i++ { + content := []byte(fmt.Sprintf("File %d", i)) + result, err := client.Add(ctx, bytes.NewReader(content), fmt.Sprintf("file%d.txt", i)) + if err != nil { + t.Fatalf("add file %d failed: %v", i, err) + } + cids = append(cids, result.Cid) + } + + if len(cids) != numFiles { + t.Fatalf("expected %d files added, got %d", numFiles, len(cids)) + } + + // Verify all files exist + for i, cid := range cids { + status, err := client.PinStatus(ctx, cid) + if err != nil { + t.Logf("warning: failed to get status for file %d: %v", i, err) + continue + } + + if status.Cid != cid { + t.Fatalf("expected cid %s, got %s", cid, status.Cid) + } + } + + t.Logf("Successfully added and verified %d files", numFiles) +} diff --git a/e2e/libp2p_connectivity_test.go b/e2e/libp2p_connectivity_test.go new file mode 100644 index 0000000..0a6408a --- /dev/null +++ b/e2e/libp2p_connectivity_test.go @@ -0,0 +1,294 @@ +//go:build e2e + +package e2e + +import ( + "context" + "net/http" + "strings" + "testing" + "time" +) + +func TestLibP2P_PeerConnectivity(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create and connect client + c := NewNetworkClient(t) + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + defer c.Disconnect() + + // Verify peer connectivity through the gateway + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + peers := resp["peers"].([]interface{}) + if len(peers) == 0 { + t.Logf("warning: no peers connected (cluster may still be initializing)") + } +} + +func TestLibP2P_BootstrapPeers(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + bootstrapPeers := GetBootstrapPeers() + if len(bootstrapPeers) == 0 { + t.Skipf("E2E_BOOTSTRAP_PEERS not set; skipping") + } + + // Create client with bootstrap peers explicitly set + c := NewNetworkClient(t) + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + defer c.Disconnect() + + // Give peer discovery time + Delay(2000) + + // Verify we're connected (check via gateway status) + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("status request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["connected"] != true { + t.Logf("warning: client not connected to network (cluster may still be initializing)") + } +} + +func TestLibP2P_MultipleClientConnections(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create multiple clients + c1 := NewNetworkClient(t) + c2 := NewNetworkClient(t) + c3 := NewNetworkClient(t) + + if err := c1.Connect(); err != nil { + t.Fatalf("c1 connect failed: %v", err) + } + defer c1.Disconnect() + + if err := c2.Connect(); err != nil { + t.Fatalf("c2 connect failed: %v", err) + } + defer c2.Disconnect() + + if err := c3.Connect(); err != nil { + t.Fatalf("c3 connect failed: %v", err) + } + defer c3.Disconnect() + + // Give peer discovery time + Delay(2000) + + // Verify gateway sees multiple peers + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + peers := resp["peers"].([]interface{}) + if len(peers) < 1 { + t.Logf("warning: expected at least 1 peer, got %d", len(peers)) + } +} + +func TestLibP2P_ReconnectAfterDisconnect(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + c := NewNetworkClient(t) + + // Connect + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + + // Verify connected via gateway + req1 := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + } + + _, status1, err := req1.Do(ctx) + if err != nil || status1 != http.StatusOK { + t.Logf("warning: gateway check failed before disconnect: status %d, err %v", status1, err) + } + + // Disconnect + if err := c.Disconnect(); err != nil { + t.Logf("warning: disconnect failed: %v", err) + } + + // Give time for disconnect to propagate + Delay(500) + + // Reconnect + if err := c.Connect(); err != nil { + t.Fatalf("reconnect failed: %v", err) + } + defer c.Disconnect() + + // Verify connected via gateway again + req2 := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + } + + _, status2, err := req2.Do(ctx) + if err != nil || status2 != http.StatusOK { + t.Logf("warning: gateway check failed after reconnect: status %d, err %v", status2, err) + } +} + +func TestLibP2P_PeerDiscovery(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create client + c := NewNetworkClient(t) + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + defer c.Disconnect() + + // Give peer discovery time + Delay(3000) + + // Get peer list + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + peers := resp["peers"].([]interface{}) + if len(peers) == 0 { + t.Logf("warning: no peers discovered (cluster may not have multiple nodes)") + } else { + // Verify peer format (should be multiaddr strings) + for _, p := range peers { + peerStr := p.(string) + if !strings.Contains(peerStr, "/p2p/") && !strings.Contains(peerStr, "/ipfs/") { + t.Logf("warning: unexpected peer format: %s", peerStr) + } + } + } +} + +func TestLibP2P_PeerAddressFormat(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create client + c := NewNetworkClient(t) + if err := c.Connect(); err != nil { + t.Fatalf("connect failed: %v", err) + } + defer c.Disconnect() + + // Get peer list + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + peers := resp["peers"].([]interface{}) + for _, p := range peers { + peerStr := p.(string) + // Multiaddrs should start with / + if !strings.HasPrefix(peerStr, "/") { + t.Fatalf("expected multiaddr format, got %s", peerStr) + } + } +} diff --git a/e2e/network_http_test.go b/e2e/network_http_test.go new file mode 100644 index 0000000..0f91f4e --- /dev/null +++ b/e2e/network_http_test.go @@ -0,0 +1,223 @@ +//go:build e2e + +package e2e + +import ( + "context" + "net/http" + "testing" + "time" +) + +func TestNetwork_Health(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/health", + SkipAuth: true, + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("health check failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["status"] != "ok" { + t.Fatalf("expected status 'ok', got %v", resp["status"]) + } +} + +func TestNetwork_Status(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("status check failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if _, ok := resp["connected"]; !ok { + t.Fatalf("expected 'connected' field in response") + } + + if _, ok := resp["peer_count"]; !ok { + t.Fatalf("expected 'peer_count' field in response") + } +} + +func TestNetwork_Peers(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/peers", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("peers check failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if _, ok := resp["peers"]; !ok { + t.Fatalf("expected 'peers' field in response") + } +} + +func TestNetwork_ProxyAnonSuccess(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/proxy/anon", + Body: map[string]interface{}{ + "url": "https://httpbin.org/get", + "method": "GET", + "headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"}, + }, + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("proxy anon request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body)) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["status_code"] != float64(200) { + t.Fatalf("expected proxy status 200, got %v", resp["status_code"]) + } + + if _, ok := resp["body"]; !ok { + t.Fatalf("expected 'body' field in response") + } +} + +func TestNetwork_ProxyAnonBadURL(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/proxy/anon", + Body: map[string]interface{}{ + "url": "http://localhost:1/nonexistent", + "method": "GET", + }, + } + + _, status, err := req.Do(ctx) + if err == nil && status == http.StatusOK { + t.Fatalf("expected error for bad URL, got status 200") + } +} + +func TestNetwork_ProxyAnonPostRequest(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/proxy/anon", + Body: map[string]interface{}{ + "url": "https://httpbin.org/post", + "method": "POST", + "headers": map[string]string{"User-Agent": "DeBros-E2E-Test/1.0"}, + "body": "test_data", + }, + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("proxy anon POST failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body)) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp["status_code"] != float64(200) { + t.Fatalf("expected proxy status 200, got %v", resp["status_code"]) + } +} + +func TestNetwork_Unauthorized(t *testing.T) { + // Test without API key + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Create request without auth + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/network/status", + SkipAuth: true, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + if status != http.StatusUnauthorized && status != http.StatusForbidden { + t.Logf("warning: expected 401/403, got %d (auth may not be enforced on this endpoint)", status) + } +} diff --git a/e2e/pubsub_client_test.go b/e2e/pubsub_client_test.go new file mode 100644 index 0000000..5063c47 --- /dev/null +++ b/e2e/pubsub_client_test.go @@ -0,0 +1,421 @@ +//go:build e2e + +package e2e + +import ( + "context" + "fmt" + "sync" + "testing" + "time" +) + +func newMessageCollector(ctx context.Context, buffer int) (chan []byte, func(string, []byte) error) { + if buffer <= 0 { + buffer = 1 + } + + ch := make(chan []byte, buffer) + handler := func(_ string, data []byte) error { + copied := append([]byte(nil), data...) + select { + case ch <- copied: + case <-ctx.Done(): + } + return nil + } + return ch, handler +} + +func waitForMessage(ctx context.Context, ch <-chan []byte) ([]byte, error) { + select { + case msg := <-ch: + return msg, nil + case <-ctx.Done(): + return nil, fmt.Errorf("context finished while waiting for pubsub message: %w", ctx.Err()) + } +} + +func TestPubSub_SubscribePublish(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create two clients + client1 := NewNetworkClient(t) + client2 := NewNetworkClient(t) + + if err := client1.Connect(); err != nil { + t.Fatalf("client1 connect failed: %v", err) + } + defer client1.Disconnect() + + if err := client2.Connect(); err != nil { + t.Fatalf("client2 connect failed: %v", err) + } + defer client2.Disconnect() + + topic := GenerateTopic() + message := "test-message-from-client1" + + // Subscribe on client2 + messageCh, handler := newMessageCollector(ctx, 1) + if err := client2.PubSub().Subscribe(ctx, topic, handler); err != nil { + t.Fatalf("subscribe failed: %v", err) + } + defer client2.PubSub().Unsubscribe(ctx, topic) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish from client1 + if err := client1.PubSub().Publish(ctx, topic, []byte(message)); err != nil { + t.Fatalf("publish failed: %v", err) + } + + // Receive message on client2 + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + msg, err := waitForMessage(recvCtx, messageCh) + if err != nil { + t.Fatalf("receive failed: %v", err) + } + + if string(msg) != message { + t.Fatalf("expected message %q, got %q", message, string(msg)) + } +} + +func TestPubSub_MultipleSubscribers(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create three clients + clientPub := NewNetworkClient(t) + clientSub1 := NewNetworkClient(t) + clientSub2 := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub1.Connect(); err != nil { + t.Fatalf("subscriber1 connect failed: %v", err) + } + defer clientSub1.Disconnect() + + if err := clientSub2.Connect(); err != nil { + t.Fatalf("subscriber2 connect failed: %v", err) + } + defer clientSub2.Disconnect() + + topic := GenerateTopic() + message1 := "message-for-sub1" + message2 := "message-for-sub2" + + // Subscribe on both clients + sub1Ch, sub1Handler := newMessageCollector(ctx, 4) + if err := clientSub1.PubSub().Subscribe(ctx, topic, sub1Handler); err != nil { + t.Fatalf("subscribe1 failed: %v", err) + } + defer clientSub1.PubSub().Unsubscribe(ctx, topic) + + sub2Ch, sub2Handler := newMessageCollector(ctx, 4) + if err := clientSub2.PubSub().Subscribe(ctx, topic, sub2Handler); err != nil { + t.Fatalf("subscribe2 failed: %v", err) + } + defer clientSub2.PubSub().Unsubscribe(ctx, topic) + + // Give subscriptions time to propagate + Delay(500) + + // Publish first message + if err := clientPub.PubSub().Publish(ctx, topic, []byte(message1)); err != nil { + t.Fatalf("publish1 failed: %v", err) + } + + // Both subscribers should receive first message + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + msg1a, err := waitForMessage(recvCtx, sub1Ch) + if err != nil { + t.Fatalf("sub1 receive1 failed: %v", err) + } + + if string(msg1a) != message1 { + t.Fatalf("sub1: expected %q, got %q", message1, string(msg1a)) + } + + msg1b, err := waitForMessage(recvCtx, sub2Ch) + if err != nil { + t.Fatalf("sub2 receive1 failed: %v", err) + } + + if string(msg1b) != message1 { + t.Fatalf("sub2: expected %q, got %q", message1, string(msg1b)) + } + + // Publish second message + if err := clientPub.PubSub().Publish(ctx, topic, []byte(message2)); err != nil { + t.Fatalf("publish2 failed: %v", err) + } + + // Both subscribers should receive second message + recvCtx2, recvCancel2 := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel2() + + msg2a, err := waitForMessage(recvCtx2, sub1Ch) + if err != nil { + t.Fatalf("sub1 receive2 failed: %v", err) + } + + if string(msg2a) != message2 { + t.Fatalf("sub1: expected %q, got %q", message2, string(msg2a)) + } + + msg2b, err := waitForMessage(recvCtx2, sub2Ch) + if err != nil { + t.Fatalf("sub2 receive2 failed: %v", err) + } + + if string(msg2b) != message2 { + t.Fatalf("sub2: expected %q, got %q", message2, string(msg2b)) + } +} + +func TestPubSub_Deduplication(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create two clients + clientPub := NewNetworkClient(t) + clientSub := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub.Connect(); err != nil { + t.Fatalf("subscriber connect failed: %v", err) + } + defer clientSub.Disconnect() + + topic := GenerateTopic() + message := "duplicate-test-message" + + // Subscribe on client + messageCh, handler := newMessageCollector(ctx, 3) + if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil { + t.Fatalf("subscribe failed: %v", err) + } + defer clientSub.PubSub().Unsubscribe(ctx, topic) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish the same message multiple times + for i := 0; i < 3; i++ { + if err := clientPub.PubSub().Publish(ctx, topic, []byte(message)); err != nil { + t.Fatalf("publish %d failed: %v", i, err) + } + } + + // Receive messages - should get all (no dedup filter on subscribe) + recvCtx, recvCancel := context.WithTimeout(ctx, 5*time.Second) + defer recvCancel() + + receivedCount := 0 + for receivedCount < 3 { + if _, err := waitForMessage(recvCtx, messageCh); err != nil { + break + } + receivedCount++ + } + + if receivedCount < 1 { + t.Fatalf("expected to receive at least 1 message, got %d", receivedCount) + } +} + +func TestPubSub_ConcurrentPublish(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create clients + clientPub := NewNetworkClient(t) + clientSub := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub.Connect(); err != nil { + t.Fatalf("subscriber connect failed: %v", err) + } + defer clientSub.Disconnect() + + topic := GenerateTopic() + numMessages := 10 + + // Subscribe + messageCh, handler := newMessageCollector(ctx, numMessages) + if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil { + t.Fatalf("subscribe failed: %v", err) + } + defer clientSub.PubSub().Unsubscribe(ctx, topic) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish multiple messages concurrently + var wg sync.WaitGroup + for i := 0; i < numMessages; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + msg := fmt.Sprintf("concurrent-msg-%d", idx) + if err := clientPub.PubSub().Publish(ctx, topic, []byte(msg)); err != nil { + t.Logf("publish %d failed: %v", idx, err) + } + }(i) + } + wg.Wait() + + // Receive messages + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + receivedCount := 0 + for receivedCount < numMessages { + if _, err := waitForMessage(recvCtx, messageCh); err != nil { + break + } + receivedCount++ + } + + if receivedCount < numMessages { + t.Logf("expected %d messages, got %d (some may have been dropped)", numMessages, receivedCount) + } +} + +func TestPubSub_TopicIsolation(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create clients + clientPub := NewNetworkClient(t) + clientSub := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub.Connect(); err != nil { + t.Fatalf("subscriber connect failed: %v", err) + } + defer clientSub.Disconnect() + + topic1 := GenerateTopic() + topic2 := GenerateTopic() + + // Subscribe to topic1 + messageCh, handler := newMessageCollector(ctx, 2) + if err := clientSub.PubSub().Subscribe(ctx, topic1, handler); err != nil { + t.Fatalf("subscribe1 failed: %v", err) + } + defer clientSub.PubSub().Unsubscribe(ctx, topic1) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish to topic2 + msg2 := "message-on-topic2" + if err := clientPub.PubSub().Publish(ctx, topic2, []byte(msg2)); err != nil { + t.Fatalf("publish2 failed: %v", err) + } + + // Publish to topic1 + msg1 := "message-on-topic1" + if err := clientPub.PubSub().Publish(ctx, topic1, []byte(msg1)); err != nil { + t.Fatalf("publish1 failed: %v", err) + } + + // Receive on sub1 - should get msg1 only + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + msg, err := waitForMessage(recvCtx, messageCh) + if err != nil { + t.Fatalf("receive failed: %v", err) + } + + if string(msg) != msg1 { + t.Fatalf("expected %q, got %q", msg1, string(msg)) + } +} + +func TestPubSub_EmptyMessage(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create clients + clientPub := NewNetworkClient(t) + clientSub := NewNetworkClient(t) + + if err := clientPub.Connect(); err != nil { + t.Fatalf("publisher connect failed: %v", err) + } + defer clientPub.Disconnect() + + if err := clientSub.Connect(); err != nil { + t.Fatalf("subscriber connect failed: %v", err) + } + defer clientSub.Disconnect() + + topic := GenerateTopic() + + // Subscribe + messageCh, handler := newMessageCollector(ctx, 1) + if err := clientSub.PubSub().Subscribe(ctx, topic, handler); err != nil { + t.Fatalf("subscribe failed: %v", err) + } + defer clientSub.PubSub().Unsubscribe(ctx, topic) + + // Give subscription time to propagate and mesh to form + Delay(2000) + + // Publish empty message + if err := clientPub.PubSub().Publish(ctx, topic, []byte("")); err != nil { + t.Fatalf("publish empty failed: %v", err) + } + + // Receive on sub - should get empty message + recvCtx, recvCancel := context.WithTimeout(ctx, 10*time.Second) + defer recvCancel() + + msg, err := waitForMessage(recvCtx, messageCh) + if err != nil { + t.Fatalf("receive failed: %v", err) + } + + if len(msg) != 0 { + t.Fatalf("expected empty message, got %q", string(msg)) + } +} diff --git a/e2e/rqlite_http_test.go b/e2e/rqlite_http_test.go new file mode 100644 index 0000000..0d7df2b --- /dev/null +++ b/e2e/rqlite_http_test.go @@ -0,0 +1,446 @@ +//go:build e2e + +package e2e + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" +) + +func TestRQLite_CreateTable(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP)", + table, + ) + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("create table request failed: %v", err) + } + + if status != http.StatusCreated && status != http.StatusOK { + t.Fatalf("expected status 201 or 200, got %d: %s", status, string(body)) + } +} + +func TestRQLite_InsertQuery(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Insert rows + insertReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": []string{ + fmt.Sprintf("INSERT INTO %s(name) VALUES ('alice')", table), + fmt.Sprintf("INSERT INTO %s(name) VALUES ('bob')", table), + }, + }, + } + + _, status, err = insertReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("insert failed: status %d, err %v", status, err) + } + + // Query rows + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT name FROM %s ORDER BY id", table), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil { + t.Fatalf("query failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var queryResp map[string]interface{} + if err := DecodeJSON(body, &queryResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if queryResp["count"].(float64) < 2 { + t.Fatalf("expected at least 2 rows, got %v", queryResp["count"]) + } +} + +func TestRQLite_DropTable(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, note TEXT)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Drop table + dropReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/drop-table", + Body: map[string]interface{}{ + "table": table, + }, + } + + _, status, err = dropReq.Do(ctx) + if err != nil { + t.Fatalf("drop table request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + // Verify table doesn't exist via schema + schemaReq := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/rqlite/schema", + } + + body, status, err := schemaReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Logf("warning: failed to verify schema after drop: status %d, err %v", status, err) + return + } + + var schemaResp map[string]interface{} + if err := DecodeJSON(body, &schemaResp); err != nil { + t.Logf("warning: failed to decode schema response: %v", err) + return + } + + if tables, ok := schemaResp["tables"].([]interface{}); ok { + for _, tbl := range tables { + tblMap := tbl.(map[string]interface{}) + if tblMap["name"] == table { + t.Fatalf("table %s still present after drop", table) + } + } + } +} + +func TestRQLite_Schema(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/rqlite/schema", + } + + body, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("schema request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var resp map[string]interface{} + if err := DecodeJSON(body, &resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if _, ok := resp["tables"]; !ok { + t.Fatalf("expected 'tables' field in response") + } +} + +func TestRQLite_MalformedSQL(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + req := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": "SELECT * FROM nonexistent_table WHERE invalid syntax", + }, + } + + _, status, err := req.Do(ctx) + if err != nil { + t.Fatalf("request failed: %v", err) + } + + // Should get an error response + if status == http.StatusOK { + t.Fatalf("expected error for malformed SQL, got status 200") + } +} + +func TestRQLite_LargeTransaction(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + table := GenerateTableName() + schema := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY AUTOINCREMENT, value INTEGER)", + table, + ) + + // Create table + createReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": schema, + }, + } + + _, status, err := createReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create table failed: status %d, err %v", status, err) + } + + // Generate large transaction (50 inserts) + var statements []string + for i := 0; i < 50; i++ { + statements = append(statements, fmt.Sprintf("INSERT INTO %s(value) VALUES (%d)", table, i)) + } + + txReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": statements, + }, + } + + _, status, err = txReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("large transaction failed: status %d, err %v", status, err) + } + + // Verify all rows were inserted + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT COUNT(*) as count FROM %s", table), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("count query failed: status %d, err %v", status, err) + } + + var countResp map[string]interface{} + if err := DecodeJSON(body, &countResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + // Extract count from result + if rows, ok := countResp["rows"].([]interface{}); ok && len(rows) > 0 { + row := rows[0].([]interface{}) + if row[0].(float64) != 50 { + t.Fatalf("expected 50 rows, got %v", row[0]) + } + } +} + +func TestRQLite_ForeignKeyMigration(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + orgsTable := GenerateTableName() + usersTable := GenerateTableName() + + // Create base tables + createOrgsReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT)", + orgsTable, + ), + }, + } + + _, status, err := createOrgsReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create orgs table failed: status %d, err %v", status, err) + } + + createUsersReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/create-table", + Body: map[string]interface{}{ + "schema": fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age TEXT)", + usersTable, + ), + }, + } + + _, status, err = createUsersReq.Do(ctx) + if err != nil || (status != http.StatusCreated && status != http.StatusOK) { + t.Fatalf("create users table failed: status %d, err %v", status, err) + } + + // Seed data + seedReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": []string{ + fmt.Sprintf("INSERT INTO %s(id,name) VALUES (1,'org')", orgsTable), + fmt.Sprintf("INSERT INTO %s(id,name,org_id,age) VALUES (1,'alice',1,'30')", usersTable), + }, + }, + } + + _, status, err = seedReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("seed transaction failed: status %d, err %v", status, err) + } + + // Migrate: change age type and add FK + migrationReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/transaction", + Body: map[string]interface{}{ + "statements": []string{ + fmt.Sprintf( + "CREATE TABLE %s_new (id INTEGER PRIMARY KEY, name TEXT, org_id INTEGER, age INTEGER, FOREIGN KEY(org_id) REFERENCES %s(id) ON DELETE CASCADE)", + usersTable, orgsTable, + ), + fmt.Sprintf( + "INSERT INTO %s_new (id,name,org_id,age) SELECT id,name,org_id, CAST(age AS INTEGER) FROM %s", + usersTable, usersTable, + ), + fmt.Sprintf("DROP TABLE %s", usersTable), + fmt.Sprintf("ALTER TABLE %s_new RENAME TO %s", usersTable, usersTable), + }, + }, + } + + _, status, err = migrationReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("migration transaction failed: status %d, err %v", status, err) + } + + // Verify data is intact + queryReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/query", + Body: map[string]interface{}{ + "sql": fmt.Sprintf("SELECT name, org_id, age FROM %s", usersTable), + }, + } + + body, status, err := queryReq.Do(ctx) + if err != nil || status != http.StatusOK { + t.Fatalf("query after migration failed: status %d, err %v", status, err) + } + + var queryResp map[string]interface{} + if err := DecodeJSON(body, &queryResp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if queryResp["count"].(float64) != 1 { + t.Fatalf("expected 1 row after migration, got %v", queryResp["count"]) + } +} + +func TestRQLite_DropNonexistentTable(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + dropReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/rqlite/drop-table", + Body: map[string]interface{}{ + "table": "nonexistent_table_xyz_" + fmt.Sprintf("%d", time.Now().UnixNano()), + }, + } + + _, status, err := dropReq.Do(ctx) + if err != nil { + t.Logf("warning: drop nonexistent table request failed: %v", err) + return + } + + // Should get an error (400 or 404) + if status == http.StatusOK { + t.Logf("warning: expected error for dropping nonexistent table, got status 200") + } +} diff --git a/e2e/storage_http_test.go b/e2e/storage_http_test.go new file mode 100644 index 0000000..ee8fb0c --- /dev/null +++ b/e2e/storage_http_test.go @@ -0,0 +1,550 @@ +//go:build e2e + +package e2e + +import ( + "bytes" + "context" + "io" + "mime/multipart" + "net/http" + "testing" + "time" +) + +// uploadFile is a helper to upload a file to storage +func uploadFile(t *testing.T, ctx context.Context, content []byte, filename string) string { + t.Helper() + + // Create multipart form + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // Add auth headers + if jwt := GetJWT(); jwt != "" { + req.Header.Set("Authorization", "Bearer "+jwt) + } else if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body)) + } + + result, err := DecodeJSONFromReader(resp.Body) + if err != nil { + t.Fatalf("failed to decode upload response: %v", err) + } + + return result["cid"].(string) +} + +// DecodeJSON is a helper to decode JSON from io.ReadCloser +func DecodeJSONFromReader(rc io.ReadCloser) (map[string]interface{}, error) { + defer rc.Close() + body, err := io.ReadAll(rc) + if err != nil { + return nil, err + } + var result map[string]interface{} + err = DecodeJSON(body, &result) + return result, err +} + +func TestStorage_UploadText(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + content := []byte("Hello, IPFS!") + filename := "test.txt" + + // Create multipart form + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &result); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if result["cid"] == nil { + t.Fatalf("expected cid in response") + } + + if result["name"] != filename { + t.Fatalf("expected name %q, got %v", filename, result["name"]) + } + + if result["size"] == nil || result["size"].(float64) <= 0 { + t.Fatalf("expected positive size") + } +} + +func TestStorage_UploadBinary(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // PNG header + content := []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a} + filename := "test.png" + + // Create multipart form + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &result); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if result["cid"] == nil { + t.Fatalf("expected cid in response") + } +} + +func TestStorage_UploadLarge(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Create 1MB file + content := bytes.Repeat([]byte("x"), 1024*1024) + filename := "large.bin" + + // Create multipart form + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", filename) + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("upload failed with status %d: %s", resp.StatusCode, string(body)) + } + + var result map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &result); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if result["size"] != float64(1024*1024) { + t.Fatalf("expected size %d, got %v", 1024*1024, result["size"]) + } +} + +func TestStorage_PinUnpin(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + content := []byte("test content for pinning") + + // Upload file first + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", "pin-test.txt") + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create upload request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload failed: %v", err) + } + defer resp.Body.Close() + + var uploadResult map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &uploadResult); err != nil { + t.Fatalf("failed to decode upload response: %v", err) + } + + cid := uploadResult["cid"].(string) + + // Pin the file + pinReq := &HTTPRequest{ + Method: http.MethodPost, + URL: GetGatewayURL() + "/v1/storage/pin", + Body: map[string]interface{}{ + "cid": cid, + "name": "pinned-file", + }, + } + + body2, status, err := pinReq.Do(ctx) + if err != nil { + t.Fatalf("pin failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body2)) + } + + var pinResult map[string]interface{} + if err := DecodeJSON(body2, &pinResult); err != nil { + t.Fatalf("failed to decode pin response: %v", err) + } + + if pinResult["cid"] != cid { + t.Fatalf("expected cid %s, got %v", cid, pinResult["cid"]) + } + + // Unpin the file + unpinReq := &HTTPRequest{ + Method: http.MethodDelete, + URL: GetGatewayURL() + "/v1/storage/unpin/" + cid, + } + + body3, status, err := unpinReq.Do(ctx) + if err != nil { + t.Fatalf("unpin failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d: %s", status, string(body3)) + } +} + +func TestStorage_Status(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + content := []byte("test content for status") + + // Upload file first + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", "status-test.txt") + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create upload request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload failed: %v", err) + } + defer resp.Body.Close() + + var uploadResult map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &uploadResult); err != nil { + t.Fatalf("failed to decode upload response: %v", err) + } + + cid := uploadResult["cid"].(string) + + // Get status + statusReq := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/storage/status/" + cid, + } + + statusBody, status, err := statusReq.Do(ctx) + if err != nil { + t.Fatalf("status request failed: %v", err) + } + + if status != http.StatusOK { + t.Fatalf("expected status 200, got %d", status) + } + + var statusResult map[string]interface{} + if err := DecodeJSON(statusBody, &statusResult); err != nil { + t.Fatalf("failed to decode status response: %v", err) + } + + if statusResult["cid"] != cid { + t.Fatalf("expected cid %s, got %v", cid, statusResult["cid"]) + } +} + +func TestStorage_InvalidCID(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + statusReq := &HTTPRequest{ + Method: http.MethodGet, + URL: GetGatewayURL() + "/v1/storage/status/QmInvalidCID123456789", + } + + _, status, err := statusReq.Do(ctx) + if err != nil { + t.Fatalf("status request failed: %v", err) + } + + if status != http.StatusNotFound { + t.Logf("warning: expected status 404 for invalid CID, got %d", status) + } +} + +func TestStorage_GetByteRange(t *testing.T) { + SkipIfMissingGateway(t) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + content := []byte("0123456789abcdefghijklmnopqrstuvwxyz") + + // Upload file first + var buf bytes.Buffer + writer := multipart.NewWriter(&buf) + + part, err := writer.CreateFormFile("file", "range-test.txt") + if err != nil { + t.Fatalf("failed to create form file: %v", err) + } + + if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { + t.Fatalf("failed to copy data: %v", err) + } + + if err := writer.Close(); err != nil { + t.Fatalf("failed to close writer: %v", err) + } + + // Create upload request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, GetGatewayURL()+"/v1/storage/upload", &buf) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + if apiKey := GetAPIKey(); apiKey != "" { + req.Header.Set("Authorization", "Bearer "+apiKey) + } + + client := NewHTTPClient(5 * time.Minute) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("upload failed: %v", err) + } + defer resp.Body.Close() + + var uploadResult map[string]interface{} + body, _ := io.ReadAll(resp.Body) + if err := DecodeJSON(body, &uploadResult); err != nil { + t.Fatalf("failed to decode upload response: %v", err) + } + + cid := uploadResult["cid"].(string) + + // Get full content + getReq, err := http.NewRequestWithContext(ctx, http.MethodGet, GetGatewayURL()+"/v1/storage/get/"+cid, nil) + if err != nil { + t.Fatalf("failed to create get request: %v", err) + } + + if apiKey := GetAPIKey(); apiKey != "" { + getReq.Header.Set("Authorization", "Bearer "+apiKey) + } + + resp, err = client.Do(getReq) + if err != nil { + t.Fatalf("get request failed: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected status 200, got %d", resp.StatusCode) + } + + retrievedContent, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if !bytes.Equal(retrievedContent, content) { + t.Fatalf("content mismatch: expected %q, got %q", string(content), string(retrievedContent)) + } +} diff --git a/go.mod b/go.mod index d2cec41..fc2d5c2 100644 --- a/go.mod +++ b/go.mod @@ -72,6 +72,7 @@ require ( github.com/libp2p/go-yamux/v5 v5.0.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-sqlite3 v1.14.32 // indirect github.com/miekg/dns v1.1.66 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect diff --git a/go.sum b/go.sum index 69f9844..d97cebb 100644 --- a/go.sum +++ b/go.sum @@ -246,6 +246,8 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8 github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= +github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= diff --git a/pkg/discovery/discovery.go b/pkg/discovery/discovery.go index aa26f26..45a6555 100644 --- a/pkg/discovery/discovery.go +++ b/pkg/discovery/discovery.go @@ -115,8 +115,8 @@ func (d *Manager) handlePeerExchangeStream(s network.Stream) { continue } - // Filter addresses to only include configured listen addresses, not ephemeral ports - // Ephemeral ports are typically > 32768, so we filter those out + // Include all addresses with valid TCP ports + // This allows test clients and dynamic allocations to participate in peer discovery filteredAddrs := make([]multiaddr.Multiaddr, 0) for _, addr := range addrs { // Extract TCP port from multiaddr @@ -124,9 +124,9 @@ func (d *Manager) handlePeerExchangeStream(s network.Stream) { if err == nil { portNum, err := strconv.Atoi(port) if err == nil { - // Only include ports that are reasonable (not ephemeral ports > 32768) - // Common LibP2P ports are typically < 10000 - if portNum > 0 && portNum <= 32767 { + // Accept all valid TCP ports > 0, including ephemeral ports + // Test clients and dynamic allocations may use high ports (> 32768) + if portNum > 0 { filteredAddrs = append(filteredAddrs, addr) } } else { @@ -141,7 +141,7 @@ func (d *Manager) handlePeerExchangeStream(s network.Stream) { // If no addresses remain after filtering, skip this peer if len(filteredAddrs) == 0 { - d.logger.Debug("No valid addresses after filtering ephemeral ports", + d.logger.Debug("No valid addresses after filtering", zap.String("peer_id", pid.String()[:8]+"..."), zap.Int("original_count", len(addrs))) continue diff --git a/pkg/gateway/cache_handlers.go b/pkg/gateway/cache_handlers.go index 56ea931..938a215 100644 --- a/pkg/gateway/cache_handlers.go +++ b/pkg/gateway/cache_handlers.go @@ -3,6 +3,7 @@ package gateway import ( "context" "encoding/json" + "errors" "fmt" "net/http" "strings" @@ -74,7 +75,8 @@ func (g *Gateway) cacheGetHandler(w http.ResponseWriter, r *http.Request) { gr, err := dm.Get(ctx, req.Key) if err != nil { - if err == olriclib.ErrKeyNotFound { + // Check for key not found error - handle both wrapped and direct errors + if errors.Is(err, olriclib.ErrKeyNotFound) || err.Error() == "key not found" || strings.Contains(err.Error(), "key not found") { writeError(w, http.StatusNotFound, "key not found") return } @@ -372,7 +374,8 @@ func (g *Gateway) cacheDeleteHandler(w http.ResponseWriter, r *http.Request) { deletedCount, err := dm.Delete(ctx, req.Key) if err != nil { - if err == olriclib.ErrKeyNotFound { + // Check for key not found error - handle both wrapped and direct errors + if errors.Is(err, olriclib.ErrKeyNotFound) || err.Error() == "key not found" || strings.Contains(err.Error(), "key not found") { writeError(w, http.StatusNotFound, "key not found") return } diff --git a/pkg/gateway/storage_handlers.go b/pkg/gateway/storage_handlers.go index 03e3f91..3bb74d6 100644 --- a/pkg/gateway/storage_handlers.go +++ b/pkg/gateway/storage_handlers.go @@ -401,7 +401,19 @@ func (g *Gateway) networkPeersHandler(w http.ResponseWriter, r *http.Request) { writeError(w, http.StatusInternalServerError, err.Error()) return } - writeJSON(w, http.StatusOK, peers) + // Flatten peer addresses into a list of multiaddr strings + // Each PeerInfo can have multiple addresses, so we collect all of them + peerAddrs := make([]string, 0) + for _, peer := range peers { + // Add peer ID as /p2p/ multiaddr format + if peer.ID != "" { + peerAddrs = append(peerAddrs, "/p2p/"+peer.ID) + } + // Add all addresses for this peer + peerAddrs = append(peerAddrs, peer.Addresses...) + } + // Return peers in expected format: {"peers": ["/p2p/...", "/ip4/...", ...]} + writeJSON(w, http.StatusOK, map[string]any{"peers": peerAddrs}) } func (g *Gateway) networkConnectHandler(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/ipfs/client.go b/pkg/ipfs/client.go index adbef91..7f517e5 100644 --- a/pkg/ipfs/client.go +++ b/pkg/ipfs/client.go @@ -151,6 +151,14 @@ func (c *Client) GetPeerCount(ctx context.Context) (int, error) { // Add adds content to IPFS and returns the CID func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddResponse, error) { + // Track original size by reading into memory first + // This allows us to return the actual byte count, not the DAG size + data, err := io.ReadAll(reader) + if err != nil { + return nil, fmt.Errorf("failed to read data: %w", err) + } + originalSize := int64(len(data)) + // Create multipart form request for IPFS Cluster API var buf bytes.Buffer writer := multipart.NewWriter(&buf) @@ -161,7 +169,7 @@ func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddRe return nil, fmt.Errorf("failed to create form file: %w", err) } - if _, err := io.Copy(part, reader); err != nil { + if _, err := io.Copy(part, bytes.NewReader(data)); err != nil { return nil, fmt.Errorf("failed to copy data: %w", err) } @@ -215,6 +223,9 @@ func (c *Client) Add(ctx context.Context, reader io.Reader, name string) (*AddRe last.Name = name } + // Override size with original byte count (not DAG size) + last.Size = originalSize + return &last, nil } diff --git a/pkg/pubsub/publish.go b/pkg/pubsub/publish.go index 0653d09..3fb309a 100644 --- a/pkg/pubsub/publish.go +++ b/pkg/pubsub/publish.go @@ -3,6 +3,7 @@ package pubsub import ( "context" "fmt" + "time" ) // Publish publishes a message to a topic @@ -27,6 +28,29 @@ func (m *Manager) Publish(ctx context.Context, topic string, data []byte) error return fmt.Errorf("failed to get topic for publishing: %w", err) } + // Wait briefly for mesh formation if no peers are in the mesh yet + // GossipSub needs time to discover peers and form a mesh + // With FloodPublish enabled, messages will be flooded to all connected peers + // but we still want to give the mesh a chance to form for better delivery + waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) + defer waitCancel() + + // Check if we have peers in the mesh, wait up to 2 seconds for mesh formation + meshFormed := false + for i := 0; i < 20 && !meshFormed; i++ { + peers := libp2pTopic.ListPeers() + if len(peers) > 0 { + meshFormed = true + break // Mesh has formed, proceed with publish + } + select { + case <-waitCtx.Done(): + meshFormed = true // Timeout, proceed anyway (FloodPublish will handle it) + case <-time.After(100 * time.Millisecond): + // Continue waiting + } + } + // Publish message if err := libp2pTopic.Publish(ctx, data); err != nil { return fmt.Errorf("failed to publish message: %w", err) diff --git a/pkg/pubsub/subscriptions.go b/pkg/pubsub/subscriptions.go index 10fe26f..9b9584c 100644 --- a/pkg/pubsub/subscriptions.go +++ b/pkg/pubsub/subscriptions.go @@ -24,24 +24,21 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa } namespacedTopic := fmt.Sprintf("%s.%s", ns, topic) - m.mu.Lock() - defer m.mu.Unlock() - - // Check if we already have a subscription for this topic - topicSub, exists := m.subscriptions[namespacedTopic] - - if exists { - // Add handler to existing subscription + // Fast path: we already have a subscription for this topic + m.mu.RLock() + if existing := m.subscriptions[namespacedTopic]; existing != nil { + m.mu.RUnlock() handlerID := generateHandlerID() - topicSub.mu.Lock() - topicSub.handlers[handlerID] = handler - topicSub.refCount++ - topicSub.mu.Unlock() + existing.mu.Lock() + existing.handlers[handlerID] = handler + existing.refCount++ + existing.mu.Unlock() return nil } + m.mu.RUnlock() - // Create new subscription - // Get or create topic + // Create the underlying libp2p subscription without holding the manager lock + // to avoid re-entrant lock attempts libp2pTopic, err := m.getOrCreateTopic(namespacedTopic) if err != nil { return fmt.Errorf("failed to get topic: %w", err) @@ -58,26 +55,44 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa // Create topic subscription with initial handler handlerID := generateHandlerID() - topicSub = &topicSubscription{ + newSub := &topicSubscription{ sub: sub, cancel: cancel, handlers: map[HandlerID]MessageHandler{handlerID: handler}, refCount: 1, } - m.subscriptions[namespacedTopic] = topicSub + + // Install the subscription (or merge if another goroutine beat us) + m.mu.Lock() + if existing := m.subscriptions[namespacedTopic]; existing != nil { + m.mu.Unlock() + // Another goroutine already created a subscription while we were working + // Clean up our resources and add to theirs + cancel() + sub.Cancel() + handlerID := generateHandlerID() + existing.mu.Lock() + existing.handlers[handlerID] = handler + existing.refCount++ + existing.mu.Unlock() + return nil + } + m.subscriptions[namespacedTopic] = newSub + m.mu.Unlock() + + // Announce topic interest to help with peer discovery + go m.announceTopicInterest(namespacedTopic) // Start message handler goroutine (fan-out to all handlers) - go func() { - defer func() { - sub.Cancel() - }() + go func(ts *topicSubscription) { + defer ts.sub.Cancel() for { select { case <-subCtx.Done(): return default: - msg, err := sub.Next(subCtx) + msg, err := ts.sub.Next(subCtx) if err != nil { if subCtx.Err() != nil { return // Context cancelled @@ -85,13 +100,18 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa continue } + // Filter out internal discovery messages + if string(msg.Data) == "PEER_DISCOVERY_PING" { + continue + } + // Broadcast to all handlers - topicSub.mu.RLock() - handlers := make([]MessageHandler, 0, len(topicSub.handlers)) - for _, h := range topicSub.handlers { + ts.mu.RLock() + handlers := make([]MessageHandler, 0, len(ts.handlers)) + for _, h := range ts.handlers { handlers = append(handlers, h) } - topicSub.mu.RUnlock() + ts.mu.RUnlock() // Call each handler (don't block on individual handler errors) for _, h := range handlers { @@ -102,7 +122,7 @@ func (m *Manager) Subscribe(ctx context.Context, topic string, handler MessageHa } } } - }() + }(newSub) return nil } diff --git a/pkg/rqlite/gateway.go b/pkg/rqlite/gateway.go index 369b2e1..1855079 100644 --- a/pkg/rqlite/gateway.go +++ b/pkg/rqlite/gateway.go @@ -210,10 +210,11 @@ type txOp struct { } type transactionRequest struct { - Ops []txOp `json:"ops"` - ReturnResults bool `json:"return_results"` // if true, returns per-op results - StopOnError bool `json:"stop_on_error"` // default true in tx - PartialResults bool `json:"partial_results"` // ignored for actual TX (atomic); kept for API symmetry + Ops []txOp `json:"ops"` + Statements []string `json:"statements"` // legacy format: array of SQL strings (treated as exec ops) + ReturnResults bool `json:"return_results"` // if true, returns per-op results + StopOnError bool `json:"stop_on_error"` // default true in tx + PartialResults bool `json:"partial_results"` // ignored for actual TX (atomic); kept for API symmetry } // -------------------- @@ -427,8 +428,21 @@ func (g *HTTPGateway) handleTransaction(w http.ResponseWriter, r *http.Request) return } var body transactionRequest - if err := json.NewDecoder(r.Body).Decode(&body); err != nil || len(body.Ops) == 0 { - writeError(w, http.StatusBadRequest, "invalid body: {ops:[{kind,sql,args?}], return_results?}") + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + writeError(w, http.StatusBadRequest, "invalid body: {ops:[{kind,sql,args?}], return_results?} or {statements:[sql...]}") + return + } + + // Support legacy "statements" format by converting to ops + if len(body.Statements) > 0 && len(body.Ops) == 0 { + body.Ops = make([]txOp, len(body.Statements)) + for i, stmt := range body.Statements { + body.Ops[i] = txOp{Kind: "exec", SQL: stmt} + } + } + + if len(body.Ops) == 0 { + writeError(w, http.StatusBadRequest, "invalid body: {ops:[{kind,sql,args?}], return_results?} or {statements:[sql...]}") return } ctx, cancel := g.withTimeout(r.Context()) @@ -501,8 +515,8 @@ func (g *HTTPGateway) handleSchema(w http.ResponseWriter, r *http.Request) { return } writeJSON(w, http.StatusOK, map[string]any{ - "objects": rows, - "count": len(rows), + "tables": rows, + "count": len(rows), }) }