mirror of
https://github.com/DeBrosOfficial/network.git
synced 2026-01-30 16:03:02 +00:00
339 lines
12 KiB
Go
339 lines
12 KiB
Go
//go:build e2e
|
|
|
|
package cluster_test
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"net"
|
|
"net/http"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/DeBrosOfficial/network/e2e"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
// =============================================================================
|
|
// STRICT OLRIC CACHE DISTRIBUTION TESTS
|
|
// These tests verify that Olric cache data is properly distributed across nodes.
|
|
// Tests FAIL if distribution doesn't work - no skips, no warnings.
|
|
// =============================================================================
|
|
|
|
// getOlricNodeAddresses returns HTTP addresses of Olric nodes
|
|
// Note: Olric HTTP port is typically on port 3320 for the main cluster
|
|
func getOlricNodeAddresses() []string {
|
|
// In dev mode, we have a single Olric instance
|
|
// In production, each node runs its own Olric instance
|
|
return []string{
|
|
"http://localhost:3320",
|
|
}
|
|
}
|
|
|
|
// TestOlric_BasicDistribution verifies cache operations work across the cluster.
|
|
func TestOlric_BasicDistribution(t *testing.T) {
|
|
// Note: Not using SkipIfMissingGateway() since LoadTestEnv() creates its own API key
|
|
env, err := e2e.LoadTestEnv()
|
|
require.NoError(t, err, "FAIL: Could not load test environment")
|
|
require.NotEmpty(t, env.APIKey, "FAIL: No API key available")
|
|
|
|
dmap := fmt.Sprintf("dist_test_%d", time.Now().UnixNano())
|
|
|
|
t.Run("Put_and_get_from_same_gateway", func(t *testing.T) {
|
|
key := fmt.Sprintf("key_%d", time.Now().UnixNano())
|
|
value := fmt.Sprintf("value_%d", time.Now().UnixNano())
|
|
|
|
// Put
|
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
|
|
require.NoError(t, err, "FAIL: Could not put value to cache")
|
|
|
|
// Get
|
|
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
require.NoError(t, err, "FAIL: Could not get value from cache")
|
|
require.Equal(t, value, retrieved, "FAIL: Retrieved value doesn't match")
|
|
|
|
t.Logf(" ✓ Put/Get works: %s = %s", key, value)
|
|
})
|
|
|
|
t.Run("Multiple_keys_distributed", func(t *testing.T) {
|
|
// Put multiple keys (should be distributed across partitions)
|
|
keys := make(map[string]string)
|
|
for i := 0; i < 20; i++ {
|
|
key := fmt.Sprintf("dist_key_%d_%d", i, time.Now().UnixNano())
|
|
value := fmt.Sprintf("dist_value_%d", i)
|
|
keys[key] = value
|
|
|
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
|
|
require.NoError(t, err, "FAIL: Could not put key %s", key)
|
|
}
|
|
|
|
t.Logf(" Put 20 keys to cache")
|
|
|
|
// Verify all keys are retrievable
|
|
for key, expectedValue := range keys {
|
|
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
require.NoError(t, err, "FAIL: Could not get key %s", key)
|
|
require.Equal(t, expectedValue, retrieved, "FAIL: Value mismatch for key %s", key)
|
|
}
|
|
|
|
t.Logf(" ✓ All 20 keys are retrievable")
|
|
})
|
|
}
|
|
|
|
// TestOlric_ConcurrentAccess verifies cache handles concurrent operations correctly.
|
|
func TestOlric_ConcurrentAccess(t *testing.T) {
|
|
env, err := e2e.LoadTestEnv()
|
|
require.NoError(t, err, "FAIL: Could not load test environment")
|
|
|
|
dmap := fmt.Sprintf("concurrent_test_%d", time.Now().UnixNano())
|
|
|
|
t.Run("Concurrent_writes_to_same_key", func(t *testing.T) {
|
|
key := fmt.Sprintf("concurrent_key_%d", time.Now().UnixNano())
|
|
|
|
// Launch multiple goroutines writing to the same key
|
|
done := make(chan error, 10)
|
|
for i := 0; i < 10; i++ {
|
|
go func(idx int) {
|
|
value := fmt.Sprintf("concurrent_value_%d", idx)
|
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
|
|
done <- err
|
|
}(i)
|
|
}
|
|
|
|
// Wait for all writes
|
|
var errors []error
|
|
for i := 0; i < 10; i++ {
|
|
if err := <-done; err != nil {
|
|
errors = append(errors, err)
|
|
}
|
|
}
|
|
|
|
require.Empty(t, errors, "FAIL: %d concurrent writes failed: %v", len(errors), errors)
|
|
|
|
// The key should have ONE of the values (last write wins)
|
|
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
require.NoError(t, err, "FAIL: Could not get key after concurrent writes")
|
|
require.Contains(t, retrieved, "concurrent_value_", "FAIL: Value doesn't match expected pattern")
|
|
|
|
t.Logf(" ✓ Concurrent writes succeeded, final value: %s", retrieved)
|
|
})
|
|
|
|
t.Run("Concurrent_reads_and_writes", func(t *testing.T) {
|
|
key := fmt.Sprintf("rw_key_%d", time.Now().UnixNano())
|
|
initialValue := "initial_value"
|
|
|
|
// Set initial value
|
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, initialValue)
|
|
require.NoError(t, err, "FAIL: Could not set initial value")
|
|
|
|
// Launch concurrent readers and writers
|
|
done := make(chan error, 20)
|
|
|
|
// 10 readers
|
|
for i := 0; i < 10; i++ {
|
|
go func() {
|
|
_, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
done <- err
|
|
}()
|
|
}
|
|
|
|
// 10 writers
|
|
for i := 0; i < 10; i++ {
|
|
go func(idx int) {
|
|
value := fmt.Sprintf("updated_value_%d", idx)
|
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
|
|
done <- err
|
|
}(i)
|
|
}
|
|
|
|
// Wait for all operations
|
|
var readErrors, writeErrors []error
|
|
for i := 0; i < 20; i++ {
|
|
if err := <-done; err != nil {
|
|
if i < 10 {
|
|
readErrors = append(readErrors, err)
|
|
} else {
|
|
writeErrors = append(writeErrors, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
require.Empty(t, readErrors, "FAIL: %d reads failed", len(readErrors))
|
|
require.Empty(t, writeErrors, "FAIL: %d writes failed", len(writeErrors))
|
|
|
|
t.Logf(" ✓ Concurrent read/write operations succeeded")
|
|
})
|
|
}
|
|
|
|
// TestOlric_NamespaceClusterCache verifies cache works in namespace-specific clusters.
|
|
func TestOlric_NamespaceClusterCache(t *testing.T) {
|
|
// Create a new namespace
|
|
namespace := fmt.Sprintf("cache-test-%d", time.Now().UnixNano())
|
|
|
|
env, err := e2e.LoadTestEnvWithNamespace(namespace)
|
|
require.NoError(t, err, "FAIL: Could not create namespace for cache test")
|
|
require.NotEmpty(t, env.APIKey, "FAIL: No API key")
|
|
|
|
t.Logf("Created namespace %s", namespace)
|
|
|
|
dmap := fmt.Sprintf("ns_cache_%d", time.Now().UnixNano())
|
|
|
|
t.Run("Cache_operations_work_in_namespace", func(t *testing.T) {
|
|
key := fmt.Sprintf("ns_key_%d", time.Now().UnixNano())
|
|
value := fmt.Sprintf("ns_value_%d", time.Now().UnixNano())
|
|
|
|
// Put using namespace API key
|
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
|
|
require.NoError(t, err, "FAIL: Could not put value in namespace cache")
|
|
|
|
// Get
|
|
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
require.NoError(t, err, "FAIL: Could not get value from namespace cache")
|
|
require.Equal(t, value, retrieved, "FAIL: Value mismatch in namespace cache")
|
|
|
|
t.Logf(" ✓ Namespace cache operations work: %s = %s", key, value)
|
|
})
|
|
|
|
// Check if namespace Olric instances are running (port 10003 offset in port blocks)
|
|
var nsOlricPorts []int
|
|
for port := 10003; port <= 10098; port += 5 {
|
|
conn, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", port), 1*time.Second)
|
|
if err == nil {
|
|
conn.Close()
|
|
nsOlricPorts = append(nsOlricPorts, port)
|
|
}
|
|
}
|
|
|
|
if len(nsOlricPorts) > 0 {
|
|
t.Logf("Found %d namespace Olric memberlist ports: %v", len(nsOlricPorts), nsOlricPorts)
|
|
|
|
t.Run("Namespace_Olric_nodes_connected", func(t *testing.T) {
|
|
// Verify all namespace Olric nodes can be reached
|
|
for _, port := range nsOlricPorts {
|
|
conn, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", port), 2*time.Second)
|
|
require.NoError(t, err, "FAIL: Cannot connect to namespace Olric on port %d", port)
|
|
conn.Close()
|
|
t.Logf(" ✓ Namespace Olric memberlist on port %d is reachable", port)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestOlric_DataConsistency verifies data remains consistent across operations.
|
|
func TestOlric_DataConsistency(t *testing.T) {
|
|
env, err := e2e.LoadTestEnv()
|
|
require.NoError(t, err, "FAIL: Could not load test environment")
|
|
|
|
dmap := fmt.Sprintf("consistency_test_%d", time.Now().UnixNano())
|
|
|
|
t.Run("Update_preserves_latest_value", func(t *testing.T) {
|
|
key := fmt.Sprintf("update_key_%d", time.Now().UnixNano())
|
|
|
|
// Write multiple times
|
|
for i := 1; i <= 5; i++ {
|
|
value := fmt.Sprintf("version_%d", i)
|
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
|
|
require.NoError(t, err, "FAIL: Could not update key to version %d", i)
|
|
}
|
|
|
|
// Final read should return latest version
|
|
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
require.NoError(t, err, "FAIL: Could not read final value")
|
|
require.Equal(t, "version_5", retrieved, "FAIL: Latest version not preserved")
|
|
|
|
t.Logf(" ✓ Latest value preserved after 5 updates")
|
|
})
|
|
|
|
t.Run("Delete_removes_key", func(t *testing.T) {
|
|
key := fmt.Sprintf("delete_key_%d", time.Now().UnixNano())
|
|
value := "to_be_deleted"
|
|
|
|
// Put
|
|
err := e2e.PutToOlric(env.GatewayURL, env.APIKey, dmap, key, value)
|
|
require.NoError(t, err, "FAIL: Could not put value")
|
|
|
|
// Verify it exists
|
|
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
require.NoError(t, err, "FAIL: Could not get value before delete")
|
|
require.Equal(t, value, retrieved)
|
|
|
|
// Delete (POST with JSON body)
|
|
deleteBody := map[string]interface{}{
|
|
"dmap": dmap,
|
|
"key": key,
|
|
}
|
|
deleteBytes, _ := json.Marshal(deleteBody)
|
|
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/cache/delete", strings.NewReader(string(deleteBytes)))
|
|
req.Header.Set("Content-Type", "application/json")
|
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
|
client := &http.Client{Timeout: 10 * time.Second}
|
|
resp, err := client.Do(req)
|
|
require.NoError(t, err, "FAIL: Delete request failed")
|
|
resp.Body.Close()
|
|
require.True(t, resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNoContent,
|
|
"FAIL: Delete returned unexpected status %d", resp.StatusCode)
|
|
|
|
// Verify key is gone
|
|
_, err = e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
require.Error(t, err, "FAIL: Key should not exist after delete")
|
|
require.Contains(t, err.Error(), "not found", "FAIL: Expected 'not found' error")
|
|
|
|
t.Logf(" ✓ Delete properly removes key")
|
|
})
|
|
}
|
|
|
|
// TestOlric_TTLExpiration verifies TTL expiration works.
|
|
// NOTE: TTL is currently parsed but not applied by the cache handler (TODO in set_handler.go).
|
|
// This test is skipped until TTL support is fully implemented.
|
|
func TestOlric_TTLExpiration(t *testing.T) {
|
|
t.Skip("TTL support not yet implemented in cache handler - see set_handler.go lines 88-98")
|
|
|
|
env, err := e2e.LoadTestEnv()
|
|
require.NoError(t, err, "FAIL: Could not load test environment")
|
|
|
|
dmap := fmt.Sprintf("ttl_test_%d", time.Now().UnixNano())
|
|
|
|
t.Run("Key_expires_after_TTL", func(t *testing.T) {
|
|
key := fmt.Sprintf("ttl_key_%d", time.Now().UnixNano())
|
|
value := "expires_soon"
|
|
ttlSeconds := 3
|
|
|
|
// Put with TTL (TTL is a duration string like "3s", "1m", etc.)
|
|
reqBody := map[string]interface{}{
|
|
"dmap": dmap,
|
|
"key": key,
|
|
"value": value,
|
|
"ttl": fmt.Sprintf("%ds", ttlSeconds),
|
|
}
|
|
bodyBytes, _ := json.Marshal(reqBody)
|
|
|
|
req, _ := http.NewRequest("POST", env.GatewayURL+"/v1/cache/put", strings.NewReader(string(bodyBytes)))
|
|
req.Header.Set("Content-Type", "application/json")
|
|
req.Header.Set("Authorization", "Bearer "+env.APIKey)
|
|
|
|
client := &http.Client{Timeout: 10 * time.Second}
|
|
resp, err := client.Do(req)
|
|
require.NoError(t, err, "FAIL: Put with TTL failed")
|
|
resp.Body.Close()
|
|
require.True(t, resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusCreated,
|
|
"FAIL: Put returned status %d", resp.StatusCode)
|
|
|
|
// Verify key exists immediately
|
|
retrieved, err := e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
require.NoError(t, err, "FAIL: Could not get key immediately after put")
|
|
require.Equal(t, value, retrieved)
|
|
t.Logf(" Key exists immediately after put")
|
|
|
|
// Wait for TTL to expire (plus buffer)
|
|
time.Sleep(time.Duration(ttlSeconds+2) * time.Second)
|
|
|
|
// Key should be gone
|
|
_, err = e2e.GetFromOlric(env.GatewayURL, env.APIKey, dmap, key)
|
|
require.Error(t, err, "FAIL: Key should have expired after %d seconds", ttlSeconds)
|
|
require.Contains(t, err.Error(), "not found", "FAIL: Expected 'not found' error after TTL")
|
|
|
|
t.Logf(" ✓ Key expired after %d seconds as expected", ttlSeconds)
|
|
})
|
|
}
|