Remove real integration tests and related setup files
- Deleted the real integration test file `real-integration.test.ts` which contained tests for the DebrosFramework, RealTestUser, and RealTestPost models. - Removed the `helia-wrapper.ts`, `ipfs-setup.ts`, `orbitdb-setup.ts`, `swarm-setup.ts`, and `test-lifecycle.ts` files that provided setup and utility functions for the real IPFS and OrbitDB network tests.
This commit is contained in:
parent
831c977eda
commit
83c7b985ff
@ -1,56 +0,0 @@
|
|||||||
// Global setup for real integration tests
|
|
||||||
module.exports = async () => {
|
|
||||||
console.log('🚀 Global setup for real integration tests');
|
|
||||||
|
|
||||||
// Set environment variables
|
|
||||||
process.env.NODE_ENV = 'test';
|
|
||||||
process.env.DEBROS_TEST_MODE = 'real';
|
|
||||||
|
|
||||||
// Check for required dependencies - skip for ES module packages
|
|
||||||
try {
|
|
||||||
// Just check if the packages exist without importing them
|
|
||||||
const fs = require('fs');
|
|
||||||
const path = require('path');
|
|
||||||
|
|
||||||
const heliaPath = path.join(__dirname, '../../node_modules/helia');
|
|
||||||
const orbitdbPath = path.join(__dirname, '../../node_modules/@orbitdb/core');
|
|
||||||
|
|
||||||
if (fs.existsSync(heliaPath) && fs.existsSync(orbitdbPath)) {
|
|
||||||
console.log('✅ Required dependencies available');
|
|
||||||
} else {
|
|
||||||
throw new Error('Required packages not found');
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error('❌ Missing required dependencies for real tests:', error.message);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate environment
|
|
||||||
const nodeVersion = process.version;
|
|
||||||
console.log(`📋 Node.js version: ${nodeVersion}`);
|
|
||||||
|
|
||||||
if (parseInt(nodeVersion.slice(1)) < 18) {
|
|
||||||
console.error('❌ Node.js 18+ required for real tests');
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check available ports (basic check)
|
|
||||||
const net = require('net');
|
|
||||||
const checkPort = (port) => {
|
|
||||||
return new Promise((resolve) => {
|
|
||||||
const server = net.createServer();
|
|
||||||
server.listen(port, () => {
|
|
||||||
server.close(() => resolve(true));
|
|
||||||
});
|
|
||||||
server.on('error', () => resolve(false));
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
const basePort = 40000;
|
|
||||||
const portAvailable = await checkPort(basePort);
|
|
||||||
if (!portAvailable) {
|
|
||||||
console.warn(`⚠️ Port ${basePort} not available, tests will use dynamic ports`);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('✅ Global setup complete');
|
|
||||||
};
|
|
@ -1,42 +0,0 @@
|
|||||||
// Global teardown for real integration tests
|
|
||||||
module.exports = async () => {
|
|
||||||
console.log('🧹 Global teardown for real integration tests');
|
|
||||||
|
|
||||||
// Force cleanup any remaining processes
|
|
||||||
try {
|
|
||||||
// Kill any orphaned processes that might be hanging around
|
|
||||||
const { exec } = require('child_process');
|
|
||||||
const { promisify } = require('util');
|
|
||||||
const execAsync = promisify(exec);
|
|
||||||
|
|
||||||
// Clean up any leftover IPFS processes (be careful - only test processes)
|
|
||||||
try {
|
|
||||||
await execAsync('pkill -f "test.*ipfs" || true');
|
|
||||||
} catch (error) {
|
|
||||||
// Ignore errors - processes might not exist
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up temporary directories
|
|
||||||
const fs = require('fs');
|
|
||||||
const path = require('path');
|
|
||||||
const os = require('os');
|
|
||||||
|
|
||||||
const tempDir = os.tmpdir();
|
|
||||||
const testDirs = fs.readdirSync(tempDir).filter(dir => dir.startsWith('debros-test-'));
|
|
||||||
|
|
||||||
for (const dir of testDirs) {
|
|
||||||
try {
|
|
||||||
const fullPath = path.join(tempDir, dir);
|
|
||||||
fs.rmSync(fullPath, { recursive: true, force: true });
|
|
||||||
console.log(`🗑️ Cleaned up: ${fullPath}`);
|
|
||||||
} catch (error) {
|
|
||||||
console.warn(`⚠️ Could not clean up ${dir}:`, error.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} catch (error) {
|
|
||||||
console.warn('⚠️ Error during global teardown:', error.message);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('✅ Global teardown complete');
|
|
||||||
};
|
|
@ -1,73 +0,0 @@
|
|||||||
// Jest setup for real integration tests
|
|
||||||
import { jest } from '@jest/globals';
|
|
||||||
|
|
||||||
// Increase timeout for all tests
|
|
||||||
jest.setTimeout(180000); // 3 minutes
|
|
||||||
|
|
||||||
// Disable console logs in tests unless in debug mode
|
|
||||||
const originalConsole = console;
|
|
||||||
const debugMode = process.env.REAL_TEST_DEBUG === 'true';
|
|
||||||
|
|
||||||
if (!debugMode) {
|
|
||||||
// Silence routine logs but keep errors and important messages
|
|
||||||
console.log = (...args: any[]) => {
|
|
||||||
try {
|
|
||||||
const message = args.map(arg => {
|
|
||||||
if (typeof arg === 'string') return arg;
|
|
||||||
if (typeof arg === 'object' && arg !== null) return JSON.stringify(arg);
|
|
||||||
return String(arg);
|
|
||||||
}).join(' ');
|
|
||||||
|
|
||||||
if (message.includes('❌') || message.includes('✅') || message.includes('🚀') || message.includes('🧹')) {
|
|
||||||
originalConsole.log(...args);
|
|
||||||
}
|
|
||||||
} catch (_error) {
|
|
||||||
// Fallback to original console if there's any issue
|
|
||||||
originalConsole.log(...args);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
console.info = () => {}; // Silence info
|
|
||||||
console.debug = () => {}; // Silence debug
|
|
||||||
|
|
||||||
// Keep warnings and errors
|
|
||||||
console.warn = originalConsole.warn;
|
|
||||||
console.error = originalConsole.error;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Global error handlers
|
|
||||||
process.on('unhandledRejection', (reason, promise) => {
|
|
||||||
console.error('❌ Unhandled Rejection at:', promise, 'reason:', reason);
|
|
||||||
});
|
|
||||||
|
|
||||||
process.on('uncaughtException', (error) => {
|
|
||||||
console.error('❌ Uncaught Exception:', error);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Environment setup
|
|
||||||
process.env.NODE_ENV = 'test';
|
|
||||||
process.env.DEBROS_TEST_MODE = 'real';
|
|
||||||
|
|
||||||
// Global test utilities
|
|
||||||
declare global {
|
|
||||||
namespace NodeJS {
|
|
||||||
interface Global {
|
|
||||||
REAL_TEST_CONFIG: {
|
|
||||||
timeout: number;
|
|
||||||
nodeCount: number;
|
|
||||||
debugMode: boolean;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
(global as any).REAL_TEST_CONFIG = {
|
|
||||||
timeout: 180000,
|
|
||||||
nodeCount: parseInt(process.env.REAL_TEST_NODE_COUNT || '3'),
|
|
||||||
debugMode: debugMode
|
|
||||||
};
|
|
||||||
|
|
||||||
console.log('🔧 Real test environment configured');
|
|
||||||
console.log(` Debug mode: ${debugMode}`);
|
|
||||||
console.log(` Node count: ${(global as any).REAL_TEST_CONFIG.nodeCount}`);
|
|
||||||
console.log(` Timeout: ${(global as any).REAL_TEST_CONFIG.timeout}ms`);
|
|
@ -1,280 +0,0 @@
|
|||||||
import { describe, beforeAll, afterAll, beforeEach, it, expect } from '@jest/globals';
|
|
||||||
import { BaseModel } from '../../src/framework/models/BaseModel';
|
|
||||||
import { Model, Field } from '../../src/framework/models/decorators';
|
|
||||||
import { realTestHelpers, RealTestNetwork } from './setup/test-lifecycle';
|
|
||||||
import { testDatabaseReplication } from './setup/orbitdb-setup';
|
|
||||||
|
|
||||||
// Simple test model for P2P testing
|
|
||||||
@Model({
|
|
||||||
scope: 'global',
|
|
||||||
type: 'docstore'
|
|
||||||
})
|
|
||||||
class P2PTestModel extends BaseModel {
|
|
||||||
@Field({ type: 'string', required: true })
|
|
||||||
declare message: string;
|
|
||||||
|
|
||||||
@Field({ type: 'string', required: true })
|
|
||||||
declare nodeId: string;
|
|
||||||
|
|
||||||
@Field({ type: 'number', required: false })
|
|
||||||
declare timestamp: number;
|
|
||||||
}
|
|
||||||
|
|
||||||
describe('Real P2P Network Tests', () => {
|
|
||||||
let network: RealTestNetwork;
|
|
||||||
|
|
||||||
beforeAll(async () => {
|
|
||||||
console.log('🌐 Setting up P2P test network...');
|
|
||||||
|
|
||||||
// Setup network with 3 nodes for proper P2P testing
|
|
||||||
network = await realTestHelpers.setupAll({
|
|
||||||
nodeCount: 3,
|
|
||||||
timeout: 90000,
|
|
||||||
enableDebugLogs: true
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log('✅ P2P test network ready');
|
|
||||||
}, 120000); // 2 minute timeout for network setup
|
|
||||||
|
|
||||||
afterAll(async () => {
|
|
||||||
console.log('🧹 Cleaning up P2P test network...');
|
|
||||||
await realTestHelpers.cleanupAll();
|
|
||||||
console.log('✅ P2P test cleanup complete');
|
|
||||||
}, 30000);
|
|
||||||
|
|
||||||
beforeEach(async () => {
|
|
||||||
// Wait for network stabilization between tests
|
|
||||||
await realTestHelpers.getManager().waitForNetworkStabilization(2000);
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Peer Discovery and Connections', () => {
|
|
||||||
it('should have all nodes connected to each other', async () => {
|
|
||||||
const nodes = realTestHelpers.getManager().getMultipleNodes();
|
|
||||||
expect(nodes.length).toBe(3);
|
|
||||||
|
|
||||||
// Check that each node has connections
|
|
||||||
for (let i = 0; i < nodes.length; i++) {
|
|
||||||
const node = nodes[i];
|
|
||||||
const peers = node.ipfs.getConnectedPeers();
|
|
||||||
|
|
||||||
console.log(`Node ${i} connected to ${peers.length} peers:`, peers);
|
|
||||||
expect(peers.length).toBeGreaterThan(0);
|
|
||||||
|
|
||||||
// In a 3-node network, each node should ideally connect to the other 2
|
|
||||||
// But we'll be flexible and require at least 1 connection
|
|
||||||
expect(peers.length).toBeGreaterThanOrEqual(1);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should be able to identify all peer IDs', async () => {
|
|
||||||
const nodes = realTestHelpers.getManager().getMultipleNodes();
|
|
||||||
const peerIds = nodes.map(node => node.ipfs.getPeerId());
|
|
||||||
|
|
||||||
// All peer IDs should be unique and non-empty
|
|
||||||
expect(peerIds.length).toBe(3);
|
|
||||||
expect(new Set(peerIds).size).toBe(3); // All unique
|
|
||||||
peerIds.forEach(peerId => {
|
|
||||||
expect(peerId).toBeTruthy();
|
|
||||||
expect(peerId.length).toBeGreaterThan(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log('Peer IDs:', peerIds);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should have working libp2p multiaddresses', async () => {
|
|
||||||
const nodes = realTestHelpers.getManager().getMultipleNodes();
|
|
||||||
|
|
||||||
for (const node of nodes) {
|
|
||||||
const multiaddrs = node.ipfs.getMultiaddrs();
|
|
||||||
expect(multiaddrs.length).toBeGreaterThan(0);
|
|
||||||
|
|
||||||
// Each multiaddr should be properly formatted
|
|
||||||
multiaddrs.forEach(addr => {
|
|
||||||
expect(addr).toMatch(/^\/ip4\/127\.0\.0\.1\/tcp\/\d+\/p2p\/[A-Za-z0-9]+/);
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(`Node multiaddrs:`, multiaddrs);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Database Replication Across Nodes', () => {
|
|
||||||
it('should replicate OrbitDB databases between nodes', async () => {
|
|
||||||
const manager = realTestHelpers.getManager();
|
|
||||||
const isReplicationWorking = await testDatabaseReplication(
|
|
||||||
network.orbitdbNodes,
|
|
||||||
'p2p-replication-test',
|
|
||||||
'documents'
|
|
||||||
);
|
|
||||||
|
|
||||||
expect(isReplicationWorking).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should sync data across multiple nodes', async () => {
|
|
||||||
const nodes = realTestHelpers.getManager().getMultipleNodes();
|
|
||||||
const dbName = 'multi-node-sync-test';
|
|
||||||
|
|
||||||
// Open same database on all nodes
|
|
||||||
const databases = await Promise.all(
|
|
||||||
nodes.map(node => node.orbitdb.openDB(dbName, 'documents'))
|
|
||||||
);
|
|
||||||
|
|
||||||
// Add data from first node
|
|
||||||
const testDoc = {
|
|
||||||
_id: 'sync-test-1',
|
|
||||||
message: 'Hello from node 0',
|
|
||||||
timestamp: Date.now()
|
|
||||||
};
|
|
||||||
|
|
||||||
await databases[0].put(testDoc);
|
|
||||||
console.log('📝 Added document to node 0');
|
|
||||||
|
|
||||||
// Wait for replication
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 5000));
|
|
||||||
|
|
||||||
// Check if data appears on other nodes
|
|
||||||
let replicatedCount = 0;
|
|
||||||
|
|
||||||
for (let i = 1; i < databases.length; i++) {
|
|
||||||
const allDocs = await databases[i].all();
|
|
||||||
const hasDoc = allDocs.some((doc: any) => doc._id === 'sync-test-1');
|
|
||||||
|
|
||||||
if (hasDoc) {
|
|
||||||
replicatedCount++;
|
|
||||||
console.log(`✅ Document replicated to node ${i}`);
|
|
||||||
} else {
|
|
||||||
console.log(`❌ Document not yet replicated to node ${i}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We expect at least some replication, though it might not be immediate
|
|
||||||
expect(replicatedCount).toBeGreaterThanOrEqual(0); // Be lenient for test stability
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('PubSub Communication', () => {
|
|
||||||
it('should have working PubSub service on all nodes', async () => {
|
|
||||||
const nodes = realTestHelpers.getManager().getMultipleNodes();
|
|
||||||
|
|
||||||
for (const node of nodes) {
|
|
||||||
const pubsub = node.ipfs.pubsub;
|
|
||||||
expect(pubsub).toBeDefined();
|
|
||||||
expect(typeof pubsub.publish).toBe('function');
|
|
||||||
expect(typeof pubsub.subscribe).toBe('function');
|
|
||||||
expect(typeof pubsub.unsubscribe).toBe('function');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should be able to publish and receive messages', async () => {
|
|
||||||
const nodes = realTestHelpers.getManager().getMultipleNodes();
|
|
||||||
const topic = 'test-topic-' + Date.now();
|
|
||||||
const testMessage = 'Hello, P2P network!';
|
|
||||||
|
|
||||||
let messageReceived = false;
|
|
||||||
let receivedMessage = '';
|
|
||||||
|
|
||||||
// Subscribe on second node
|
|
||||||
await nodes[1].ipfs.pubsub.subscribe(topic, (message: any) => {
|
|
||||||
messageReceived = true;
|
|
||||||
receivedMessage = message.data;
|
|
||||||
console.log(`📨 Received message: ${message.data}`);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Wait for subscription to be established
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
||||||
|
|
||||||
// Publish from first node
|
|
||||||
await nodes[0].ipfs.pubsub.publish(topic, testMessage);
|
|
||||||
console.log(`📤 Published message: ${testMessage}`);
|
|
||||||
|
|
||||||
// Wait for message propagation
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
||||||
|
|
||||||
// Check if message was received
|
|
||||||
// Note: PubSub in private networks can be flaky, so we'll be lenient
|
|
||||||
console.log(`Message received: ${messageReceived}, Content: ${receivedMessage}`);
|
|
||||||
|
|
||||||
// For now, just verify the pubsub system is working (no assertion failure)
|
|
||||||
// In a production environment, you'd want stronger guarantees
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Network Resilience', () => {
|
|
||||||
it('should handle node disconnection gracefully', async () => {
|
|
||||||
const nodes = realTestHelpers.getManager().getMultipleNodes();
|
|
||||||
|
|
||||||
// Get initial peer counts
|
|
||||||
const initialPeerCounts = nodes.map(node => node.ipfs.getConnectedPeers().length);
|
|
||||||
console.log('Initial peer counts:', initialPeerCounts);
|
|
||||||
|
|
||||||
// Stop one node temporarily
|
|
||||||
const nodeToStop = nodes[2];
|
|
||||||
await nodeToStop.ipfs.stop();
|
|
||||||
console.log('🛑 Stopped node 2');
|
|
||||||
|
|
||||||
// Wait for network to detect disconnection
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
||||||
|
|
||||||
// Check remaining nodes
|
|
||||||
for (let i = 0; i < 2; i++) {
|
|
||||||
const peers = nodes[i].ipfs.getConnectedPeers();
|
|
||||||
console.log(`Node ${i} now has ${peers.length} peers`);
|
|
||||||
|
|
||||||
// Remaining nodes should still have some connections
|
|
||||||
// (at least to each other)
|
|
||||||
expect(peers.length).toBeGreaterThanOrEqual(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restart the stopped node
|
|
||||||
await nodeToStop.ipfs.init();
|
|
||||||
console.log('🚀 Restarted node 2');
|
|
||||||
|
|
||||||
// Give time for reconnection
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
||||||
|
|
||||||
// Attempt to reconnect
|
|
||||||
await nodeToStop.ipfs.connectToPeers([nodes[0], nodes[1]]);
|
|
||||||
|
|
||||||
// Wait for connections to stabilize
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
||||||
|
|
||||||
const finalPeerCounts = nodes.map(node => node.ipfs.getConnectedPeers().length);
|
|
||||||
console.log('Final peer counts:', finalPeerCounts);
|
|
||||||
|
|
||||||
// Network should have some connectivity restored
|
|
||||||
expect(finalPeerCounts.some(count => count > 0)).toBe(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should maintain data integrity across network events', async () => {
|
|
||||||
const nodes = realTestHelpers.getManager().getMultipleNodes();
|
|
||||||
const dbName = 'resilience-test';
|
|
||||||
|
|
||||||
// Create databases on first two nodes
|
|
||||||
const db1 = await nodes[0].orbitdb.openDB(dbName, 'documents');
|
|
||||||
const db2 = await nodes[1].orbitdb.openDB(dbName, 'documents');
|
|
||||||
|
|
||||||
// Add initial data
|
|
||||||
await db1.put({ _id: 'resilience-1', data: 'initial-data' });
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
||||||
|
|
||||||
// Verify replication
|
|
||||||
const initialDocs1 = await db1.all();
|
|
||||||
const initialDocs2 = await db2.all();
|
|
||||||
|
|
||||||
expect(initialDocs1.length).toBeGreaterThan(0);
|
|
||||||
console.log(`Node 1 has ${initialDocs1.length} documents`);
|
|
||||||
console.log(`Node 2 has ${initialDocs2.length} documents`);
|
|
||||||
|
|
||||||
// Add more data while network is stable
|
|
||||||
await db2.put({ _id: 'resilience-2', data: 'stable-network-data' });
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
||||||
|
|
||||||
// Verify final state
|
|
||||||
const finalDocs1 = await db1.all();
|
|
||||||
const finalDocs2 = await db2.all();
|
|
||||||
|
|
||||||
expect(finalDocs1.length).toBeGreaterThanOrEqual(initialDocs1.length);
|
|
||||||
expect(finalDocs2.length).toBeGreaterThanOrEqual(initialDocs2.length);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}, 180000); // 3 minute timeout for the entire P2P test suite
|
|
@ -1,283 +0,0 @@
|
|||||||
import { describe, beforeAll, afterAll, beforeEach, it, expect, jest } from '@jest/globals';
|
|
||||||
import { DebrosFramework } from '../../src/framework/DebrosFramework';
|
|
||||||
import { BaseModel } from '../../src/framework/models/BaseModel';
|
|
||||||
import { Model, Field, BeforeCreate } from '../../src/framework/models/decorators';
|
|
||||||
import { realTestHelpers, RealTestNetwork } from './setup/test-lifecycle';
|
|
||||||
|
|
||||||
// Test model for real integration testing
|
|
||||||
@Model({
|
|
||||||
scope: 'global',
|
|
||||||
type: 'docstore'
|
|
||||||
})
|
|
||||||
class RealTestUser extends BaseModel {
|
|
||||||
@Field({ type: 'string', required: true, unique: true })
|
|
||||||
declare username: string;
|
|
||||||
|
|
||||||
@Field({ type: 'string', required: true })
|
|
||||||
declare email: string;
|
|
||||||
|
|
||||||
@Field({ type: 'boolean', required: false, default: true })
|
|
||||||
declare isActive: boolean;
|
|
||||||
|
|
||||||
@Field({ type: 'number', required: false })
|
|
||||||
declare createdAt: number;
|
|
||||||
|
|
||||||
@BeforeCreate()
|
|
||||||
setCreatedAt() {
|
|
||||||
this.createdAt = Date.now();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Model({
|
|
||||||
scope: 'user',
|
|
||||||
type: 'docstore'
|
|
||||||
})
|
|
||||||
class RealTestPost extends BaseModel {
|
|
||||||
@Field({ type: 'string', required: true })
|
|
||||||
declare title: string;
|
|
||||||
|
|
||||||
@Field({ type: 'string', required: true })
|
|
||||||
declare content: string;
|
|
||||||
|
|
||||||
@Field({ type: 'string', required: true })
|
|
||||||
declare authorId: string;
|
|
||||||
|
|
||||||
@Field({ type: 'number', required: false })
|
|
||||||
declare createdAt: number;
|
|
||||||
|
|
||||||
@BeforeCreate()
|
|
||||||
setCreatedAt() {
|
|
||||||
this.createdAt = Date.now();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
describe('Real IPFS/OrbitDB Integration Tests', () => {
|
|
||||||
let network: RealTestNetwork;
|
|
||||||
let framework: DebrosFramework;
|
|
||||||
|
|
||||||
beforeAll(async () => {
|
|
||||||
console.log('🚀 Setting up real integration test environment...');
|
|
||||||
|
|
||||||
// Setup the real network with multiple nodes
|
|
||||||
network = await realTestHelpers.setupAll({
|
|
||||||
nodeCount: 2, // Use 2 nodes for faster tests
|
|
||||||
timeout: 60000,
|
|
||||||
enableDebugLogs: true
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create framework instance with real services
|
|
||||||
framework = new DebrosFramework();
|
|
||||||
|
|
||||||
const primaryNode = realTestHelpers.getManager().getPrimaryNode();
|
|
||||||
await framework.initialize(primaryNode.orbitdb, primaryNode.ipfs);
|
|
||||||
|
|
||||||
console.log('✅ Real integration test environment ready');
|
|
||||||
}, 90000); // 90 second timeout for setup
|
|
||||||
|
|
||||||
afterAll(async () => {
|
|
||||||
console.log('🧹 Cleaning up real integration test environment...');
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (framework) {
|
|
||||||
await framework.stop();
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.warn('Warning: Error stopping framework:', error);
|
|
||||||
}
|
|
||||||
|
|
||||||
await realTestHelpers.cleanupAll();
|
|
||||||
console.log('✅ Real integration test cleanup complete');
|
|
||||||
}, 30000); // 30 second timeout for cleanup
|
|
||||||
|
|
||||||
beforeEach(async () => {
|
|
||||||
// Wait for network to stabilize between tests
|
|
||||||
await realTestHelpers.getManager().waitForNetworkStabilization(1000);
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Framework Initialization', () => {
|
|
||||||
it('should initialize framework with real IPFS and OrbitDB services', async () => {
|
|
||||||
expect(framework).toBeDefined();
|
|
||||||
expect(framework.getStatus().initialized).toBe(true);
|
|
||||||
|
|
||||||
const health = await framework.healthCheck();
|
|
||||||
expect(health.healthy).toBe(true);
|
|
||||||
expect(health.services.ipfs).toBe('connected');
|
|
||||||
expect(health.services.orbitdb).toBe('connected');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should have working database manager', async () => {
|
|
||||||
const databaseManager = framework.getDatabaseManager();
|
|
||||||
expect(databaseManager).toBeDefined();
|
|
||||||
|
|
||||||
// Test database creation
|
|
||||||
const testDb = await databaseManager.getGlobalDatabase('test-db');
|
|
||||||
expect(testDb).toBeDefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should verify network connectivity', async () => {
|
|
||||||
const isConnected = await realTestHelpers.getManager().verifyNetworkConnectivity();
|
|
||||||
expect(isConnected).toBe(true);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Real Model Operations', () => {
|
|
||||||
it('should create and save models to real IPFS/OrbitDB', async () => {
|
|
||||||
const user = await RealTestUser.create({
|
|
||||||
username: 'real-test-user',
|
|
||||||
email: 'real@test.com'
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(user).toBeInstanceOf(RealTestUser);
|
|
||||||
expect(user.id).toBeDefined();
|
|
||||||
expect(user.username).toBe('real-test-user');
|
|
||||||
expect(user.email).toBe('real@test.com');
|
|
||||||
expect(user.isActive).toBe(true);
|
|
||||||
expect(user.createdAt).toBeGreaterThan(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should find models from real storage', async () => {
|
|
||||||
// Create a user
|
|
||||||
const originalUser = await RealTestUser.create({
|
|
||||||
username: 'findable-user',
|
|
||||||
email: 'findable@test.com'
|
|
||||||
});
|
|
||||||
|
|
||||||
// Wait for data to be persisted
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
||||||
|
|
||||||
// Find the user
|
|
||||||
const foundUser = await RealTestUser.findById(originalUser.id);
|
|
||||||
expect(foundUser).toBeInstanceOf(RealTestUser);
|
|
||||||
expect(foundUser?.id).toBe(originalUser.id);
|
|
||||||
expect(foundUser?.username).toBe('findable-user');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle unique constraints with real storage', async () => {
|
|
||||||
// Create first user
|
|
||||||
await RealTestUser.create({
|
|
||||||
username: 'unique-user',
|
|
||||||
email: 'unique1@test.com'
|
|
||||||
});
|
|
||||||
|
|
||||||
// Wait for persistence
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 500));
|
|
||||||
|
|
||||||
// Try to create duplicate
|
|
||||||
await expect(RealTestUser.create({
|
|
||||||
username: 'unique-user', // Duplicate username
|
|
||||||
email: 'unique2@test.com'
|
|
||||||
})).rejects.toThrow();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should work with user-scoped models', async () => {
|
|
||||||
const post = await RealTestPost.create({
|
|
||||||
title: 'Real Test Post',
|
|
||||||
content: 'This post is stored in real IPFS/OrbitDB',
|
|
||||||
authorId: 'test-author-123'
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(post).toBeInstanceOf(RealTestPost);
|
|
||||||
expect(post.title).toBe('Real Test Post');
|
|
||||||
expect(post.authorId).toBe('test-author-123');
|
|
||||||
expect(post.createdAt).toBeGreaterThan(0);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Real Data Persistence', () => {
|
|
||||||
it('should persist data across framework restarts', async () => {
|
|
||||||
// Create data
|
|
||||||
const user = await RealTestUser.create({
|
|
||||||
username: 'persistent-user',
|
|
||||||
email: 'persistent@test.com'
|
|
||||||
});
|
|
||||||
|
|
||||||
const userId = user.id;
|
|
||||||
|
|
||||||
// Wait for persistence
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
||||||
|
|
||||||
// Stop and restart framework (but keep the same IPFS/OrbitDB instances)
|
|
||||||
await framework.stop();
|
|
||||||
|
|
||||||
const primaryNode = realTestHelpers.getManager().getPrimaryNode();
|
|
||||||
await framework.initialize(primaryNode.orbitdb, primaryNode.ipfs);
|
|
||||||
|
|
||||||
// Try to find the user
|
|
||||||
const foundUser = await RealTestUser.findById(userId);
|
|
||||||
expect(foundUser).toBeInstanceOf(RealTestUser);
|
|
||||||
expect(foundUser?.username).toBe('persistent-user');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle concurrent operations', async () => {
|
|
||||||
// Create multiple users concurrently
|
|
||||||
const userCreations = Array.from({ length: 5 }, (_, i) =>
|
|
||||||
RealTestUser.create({
|
|
||||||
username: `concurrent-user-${i}`,
|
|
||||||
email: `concurrent${i}@test.com`
|
|
||||||
})
|
|
||||||
);
|
|
||||||
|
|
||||||
const users = await Promise.all(userCreations);
|
|
||||||
|
|
||||||
expect(users).toHaveLength(5);
|
|
||||||
users.forEach((user, i) => {
|
|
||||||
expect(user.username).toBe(`concurrent-user-${i}`);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Verify all users can be found
|
|
||||||
const foundUsers = await Promise.all(
|
|
||||||
users.map(user => RealTestUser.findById(user.id))
|
|
||||||
);
|
|
||||||
|
|
||||||
foundUsers.forEach(user => {
|
|
||||||
expect(user).toBeInstanceOf(RealTestUser);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Real Network Operations', () => {
|
|
||||||
it('should use real IPFS for content addressing', async () => {
|
|
||||||
const ipfsService = realTestHelpers.getManager().getPrimaryNode().ipfs;
|
|
||||||
const helia = ipfsService.getHelia();
|
|
||||||
|
|
||||||
expect(helia).toBeDefined();
|
|
||||||
|
|
||||||
// Test basic IPFS operations
|
|
||||||
const testData = new TextEncoder().encode('Hello, real IPFS!');
|
|
||||||
const { cid } = await helia.blockstore.put(testData);
|
|
||||||
|
|
||||||
expect(cid).toBeDefined();
|
|
||||||
|
|
||||||
const retrievedData = await helia.blockstore.get(cid);
|
|
||||||
expect(new TextDecoder().decode(retrievedData)).toBe('Hello, real IPFS!');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use real OrbitDB for distributed databases', async () => {
|
|
||||||
const orbitdbService = realTestHelpers.getManager().getPrimaryNode().orbitdb;
|
|
||||||
const orbitdb = orbitdbService.getOrbitDB();
|
|
||||||
|
|
||||||
expect(orbitdb).toBeDefined();
|
|
||||||
expect(orbitdb.id).toBeDefined();
|
|
||||||
|
|
||||||
// Test basic OrbitDB operations
|
|
||||||
const testDb = await orbitdbService.openDB('real-test-db', 'documents');
|
|
||||||
expect(testDb).toBeDefined();
|
|
||||||
|
|
||||||
const docId = await testDb.put({ message: 'Hello, real OrbitDB!' });
|
|
||||||
expect(docId).toBeDefined();
|
|
||||||
|
|
||||||
const doc = await testDb.get(docId);
|
|
||||||
expect(doc.message).toBe('Hello, real OrbitDB!');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should verify peer connections exist', async () => {
|
|
||||||
const nodes = realTestHelpers.getManager().getMultipleNodes();
|
|
||||||
|
|
||||||
// Each node should have connections to other nodes
|
|
||||||
for (const node of nodes) {
|
|
||||||
const peers = node.ipfs.getConnectedPeers();
|
|
||||||
expect(peers.length).toBeGreaterThan(0);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}, 120000); // 2 minute timeout for the entire suite
|
|
@ -1,66 +0,0 @@
|
|||||||
// Manual wrapper for ES modules to work with Jest
|
|
||||||
// This file provides CommonJS-compatible interfaces for pure ES modules
|
|
||||||
|
|
||||||
// Synchronous wrappers that use dynamic imports with await
|
|
||||||
export async function loadModules() {
|
|
||||||
const [
|
|
||||||
heliaModule,
|
|
||||||
libp2pModule,
|
|
||||||
tcpModule,
|
|
||||||
noiseModule,
|
|
||||||
yamuxModule,
|
|
||||||
gossipsubModule,
|
|
||||||
identifyModule,
|
|
||||||
] = await Promise.all([
|
|
||||||
import('helia'),
|
|
||||||
import('libp2p'),
|
|
||||||
import('@libp2p/tcp'),
|
|
||||||
import('@chainsafe/libp2p-noise'),
|
|
||||||
import('@chainsafe/libp2p-yamux'),
|
|
||||||
import('@chainsafe/libp2p-gossipsub'),
|
|
||||||
import('@libp2p/identify'),
|
|
||||||
]);
|
|
||||||
|
|
||||||
return {
|
|
||||||
createHelia: heliaModule.createHelia,
|
|
||||||
createLibp2p: libp2pModule.createLibp2p,
|
|
||||||
tcp: tcpModule.tcp,
|
|
||||||
noise: noiseModule.noise,
|
|
||||||
yamux: yamuxModule.yamux,
|
|
||||||
gossipsub: gossipsubModule.gossipsub,
|
|
||||||
identify: identifyModule.identify,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Separate async loader for OrbitDB
|
|
||||||
export async function loadOrbitDBModules() {
|
|
||||||
const orbitdbModule = await import('@orbitdb/core');
|
|
||||||
|
|
||||||
return {
|
|
||||||
createOrbitDB: orbitdbModule.createOrbitDB,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Separate async loaders for datastore modules that might have different import patterns
|
|
||||||
export async function loadDatastoreModules() {
|
|
||||||
try {
|
|
||||||
const [blockstoreModule, datastoreModule] = await Promise.all([
|
|
||||||
import('blockstore-fs'),
|
|
||||||
import('datastore-fs'),
|
|
||||||
]);
|
|
||||||
|
|
||||||
return {
|
|
||||||
FsBlockstore: blockstoreModule.FsBlockstore,
|
|
||||||
FsDatastore: datastoreModule.FsDatastore,
|
|
||||||
};
|
|
||||||
} catch (_error) {
|
|
||||||
// Fallback to require() for modules that might not be pure ES modules
|
|
||||||
const FsBlockstore = require('blockstore-fs').FsBlockstore;
|
|
||||||
const FsDatastore = require('datastore-fs').FsDatastore;
|
|
||||||
|
|
||||||
return {
|
|
||||||
FsBlockstore,
|
|
||||||
FsDatastore,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,242 +0,0 @@
|
|||||||
import { loadModules, loadDatastoreModules } from './helia-wrapper';
|
|
||||||
import { join } from 'path';
|
|
||||||
import { PrivateSwarmSetup } from './swarm-setup';
|
|
||||||
import { IPFSInstance } from '../../../src/framework/services/OrbitDBService';
|
|
||||||
|
|
||||||
export class RealIPFSService implements IPFSInstance {
|
|
||||||
private helia: any;
|
|
||||||
private libp2p: any;
|
|
||||||
private nodeIndex: number;
|
|
||||||
private swarmSetup: PrivateSwarmSetup;
|
|
||||||
private dataDir: string;
|
|
||||||
|
|
||||||
constructor(nodeIndex: number, swarmSetup: PrivateSwarmSetup) {
|
|
||||||
this.nodeIndex = nodeIndex;
|
|
||||||
this.swarmSetup = swarmSetup;
|
|
||||||
this.dataDir = swarmSetup.getNodeDataDir(nodeIndex);
|
|
||||||
}
|
|
||||||
|
|
||||||
async init(): Promise<any> {
|
|
||||||
console.log(`🚀 Initializing IPFS node ${this.nodeIndex}...`);
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Load ES modules dynamically
|
|
||||||
const { createHelia, createLibp2p, tcp, noise, yamux, gossipsub, identify } =
|
|
||||||
await loadModules();
|
|
||||||
const { FsBlockstore, FsDatastore } = await loadDatastoreModules();
|
|
||||||
|
|
||||||
// Create libp2p instance with private swarm configuration
|
|
||||||
this.libp2p = await createLibp2p({
|
|
||||||
addresses: {
|
|
||||||
listen: [`/ip4/127.0.0.1/tcp/${this.swarmSetup.getNodePort(this.nodeIndex)}`],
|
|
||||||
},
|
|
||||||
transports: [tcp()],
|
|
||||||
connectionEncrypters: [noise()],
|
|
||||||
streamMuxers: [yamux()],
|
|
||||||
services: {
|
|
||||||
identify: identify(),
|
|
||||||
pubsub: gossipsub({
|
|
||||||
allowPublishToZeroTopicPeers: true,
|
|
||||||
canRelayMessage: true,
|
|
||||||
emitSelf: false,
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
connectionManager: {
|
|
||||||
maxConnections: 10,
|
|
||||||
dialTimeout: 10000,
|
|
||||||
inboundUpgradeTimeout: 10000,
|
|
||||||
},
|
|
||||||
start: false, // Don't auto-start, we'll start manually
|
|
||||||
});
|
|
||||||
|
|
||||||
// Create blockstore and datastore
|
|
||||||
const blockstore = new FsBlockstore(join(this.dataDir, 'blocks'));
|
|
||||||
const datastore = new FsDatastore(join(this.dataDir, 'datastore'));
|
|
||||||
|
|
||||||
// Create Helia instance
|
|
||||||
this.helia = await createHelia({
|
|
||||||
libp2p: this.libp2p,
|
|
||||||
blockstore,
|
|
||||||
datastore,
|
|
||||||
start: false,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Start the node
|
|
||||||
await this.helia.start();
|
|
||||||
|
|
||||||
console.log(
|
|
||||||
`✅ IPFS node ${this.nodeIndex} started with Peer ID: ${this.libp2p.peerId.toString()}`,
|
|
||||||
);
|
|
||||||
console.log(
|
|
||||||
`📡 Listening on: ${this.libp2p
|
|
||||||
.getMultiaddrs()
|
|
||||||
.map((ma) => ma.toString())
|
|
||||||
.join(', ')}`,
|
|
||||||
);
|
|
||||||
|
|
||||||
return this.helia;
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`❌ Failed to initialize IPFS node ${this.nodeIndex}:`, error);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async connectToPeers(peerNodes: RealIPFSService[]): Promise<void> {
|
|
||||||
if (!this.libp2p) {
|
|
||||||
throw new Error('IPFS node not initialized');
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const peerNode of peerNodes) {
|
|
||||||
if (peerNode.nodeIndex === this.nodeIndex) continue; // Don't connect to self
|
|
||||||
|
|
||||||
try {
|
|
||||||
const peerAddrs = peerNode.getMultiaddrs();
|
|
||||||
|
|
||||||
for (const addr of peerAddrs) {
|
|
||||||
try {
|
|
||||||
console.log(
|
|
||||||
`🔗 Node ${this.nodeIndex} connecting to node ${peerNode.nodeIndex} at ${addr}`,
|
|
||||||
);
|
|
||||||
await this.libp2p.dial(addr);
|
|
||||||
console.log(`✅ Node ${this.nodeIndex} connected to node ${peerNode.nodeIndex}`);
|
|
||||||
break; // Successfully connected, no need to try other addresses
|
|
||||||
} catch (dialError) {
|
|
||||||
console.log(`⚠️ Failed to dial ${addr}: ${dialError.message}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.warn(
|
|
||||||
`⚠️ Could not connect node ${this.nodeIndex} to node ${peerNode.nodeIndex}:`,
|
|
||||||
error.message,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
getMultiaddrs(): string[] {
|
|
||||||
if (!this.libp2p) return [];
|
|
||||||
return this.libp2p.getMultiaddrs().map((ma: any) => ma.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
getPeerId(): string {
|
|
||||||
if (!this.libp2p) return '';
|
|
||||||
return this.libp2p.peerId.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
getConnectedPeers(): string[] {
|
|
||||||
if (!this.libp2p) return [];
|
|
||||||
return this.libp2p.getPeers().map((peer: any) => peer.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
async stop(): Promise<void> {
|
|
||||||
console.log(`🛑 Stopping IPFS node ${this.nodeIndex}...`);
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (this.helia) {
|
|
||||||
await this.helia.stop();
|
|
||||||
console.log(`✅ IPFS node ${this.nodeIndex} stopped`);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`❌ Error stopping IPFS node ${this.nodeIndex}:`, error);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
getHelia(): any {
|
|
||||||
return this.helia;
|
|
||||||
}
|
|
||||||
|
|
||||||
getLibp2pInstance(): any {
|
|
||||||
return this.libp2p;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Framework interface compatibility
|
|
||||||
get pubsub() {
|
|
||||||
if (!this.libp2p?.services?.pubsub) {
|
|
||||||
throw new Error('PubSub service not available');
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
publish: async (topic: string, data: string) => {
|
|
||||||
const encoder = new TextEncoder();
|
|
||||||
await this.libp2p.services.pubsub.publish(topic, encoder.encode(data));
|
|
||||||
},
|
|
||||||
subscribe: async (topic: string, handler: (message: any) => void) => {
|
|
||||||
this.libp2p.services.pubsub.addEventListener('message', (evt: any) => {
|
|
||||||
if (evt.detail.topic === topic) {
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
const message = {
|
|
||||||
topic: evt.detail.topic,
|
|
||||||
data: decoder.decode(evt.detail.data),
|
|
||||||
from: evt.detail.from.toString(),
|
|
||||||
};
|
|
||||||
handler(message);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
this.libp2p.services.pubsub.subscribe(topic);
|
|
||||||
},
|
|
||||||
unsubscribe: async (topic: string) => {
|
|
||||||
this.libp2p.services.pubsub.unsubscribe(topic);
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Utility function to create multiple IPFS nodes in a private network
|
|
||||||
export async function createIPFSNetwork(nodeCount: number = 3): Promise<{
|
|
||||||
nodes: RealIPFSService[];
|
|
||||||
swarmSetup: PrivateSwarmSetup;
|
|
||||||
}> {
|
|
||||||
console.log(`🌐 Creating private IPFS network with ${nodeCount} nodes...`);
|
|
||||||
|
|
||||||
const swarmSetup = new PrivateSwarmSetup(nodeCount);
|
|
||||||
const nodes: RealIPFSService[] = [];
|
|
||||||
|
|
||||||
// Create all nodes
|
|
||||||
for (let i = 0; i < nodeCount; i++) {
|
|
||||||
const node = new RealIPFSService(i, swarmSetup);
|
|
||||||
nodes.push(node);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize all nodes
|
|
||||||
for (const node of nodes) {
|
|
||||||
await node.init();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait a moment for nodes to be ready
|
|
||||||
await new Promise((resolve) => setTimeout(resolve, 1000));
|
|
||||||
|
|
||||||
// Connect nodes in a mesh topology
|
|
||||||
for (let i = 0; i < nodes.length; i++) {
|
|
||||||
const currentNode = nodes[i];
|
|
||||||
const otherNodes = nodes.filter((_, index) => index !== i);
|
|
||||||
await currentNode.connectToPeers(otherNodes);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for connections to establish
|
|
||||||
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
||||||
|
|
||||||
// Report network status
|
|
||||||
console.log(`📊 Private IPFS Network Status:`);
|
|
||||||
for (const node of nodes) {
|
|
||||||
const peers = node.getConnectedPeers();
|
|
||||||
console.log(` Node ${node.nodeIndex}: ${peers.length} peers connected`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return { nodes, swarmSetup };
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function shutdownIPFSNetwork(
|
|
||||||
nodes: RealIPFSService[],
|
|
||||||
swarmSetup: PrivateSwarmSetup,
|
|
||||||
): Promise<void> {
|
|
||||||
console.log(`🛑 Shutting down IPFS network...`);
|
|
||||||
|
|
||||||
// Stop all nodes
|
|
||||||
await Promise.all(nodes.map((node) => node.stop()));
|
|
||||||
|
|
||||||
// Cleanup test data
|
|
||||||
swarmSetup.cleanup();
|
|
||||||
|
|
||||||
console.log(`✅ IPFS network shutdown complete`);
|
|
||||||
}
|
|
@ -1,247 +0,0 @@
|
|||||||
import { loadOrbitDBModules } from './helia-wrapper';
|
|
||||||
import { RealIPFSService } from './ipfs-setup';
|
|
||||||
import { OrbitDBInstance } from '../../../src/framework/services/OrbitDBService';
|
|
||||||
|
|
||||||
export class RealOrbitDBService implements OrbitDBInstance {
|
|
||||||
private orbitdb: any;
|
|
||||||
private ipfsService: RealIPFSService;
|
|
||||||
private nodeIndex: number;
|
|
||||||
private databases: Map<string, any> = new Map();
|
|
||||||
|
|
||||||
constructor(nodeIndex: number, ipfsService: RealIPFSService) {
|
|
||||||
this.nodeIndex = nodeIndex;
|
|
||||||
this.ipfsService = ipfsService;
|
|
||||||
}
|
|
||||||
|
|
||||||
async init(): Promise<any> {
|
|
||||||
console.log(`🌀 Initializing OrbitDB for node ${this.nodeIndex}...`);
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Load OrbitDB ES modules dynamically
|
|
||||||
const { createOrbitDB } = await loadOrbitDBModules();
|
|
||||||
|
|
||||||
const ipfs = this.ipfsService.getHelia();
|
|
||||||
if (!ipfs) {
|
|
||||||
throw new Error('IPFS node must be initialized before OrbitDB');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create OrbitDB instance
|
|
||||||
this.orbitdb = await createOrbitDB({
|
|
||||||
ipfs,
|
|
||||||
id: `orbitdb-node-${this.nodeIndex}`,
|
|
||||||
directory: `./orbitdb-${this.nodeIndex}`, // Local directory for this node
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(`✅ OrbitDB initialized for node ${this.nodeIndex}`);
|
|
||||||
console.log(`📍 OrbitDB ID: ${this.orbitdb.id}`);
|
|
||||||
|
|
||||||
return this.orbitdb;
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`❌ Failed to initialize OrbitDB for node ${this.nodeIndex}:`, error);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async openDB(name: string, type: string): Promise<any> {
|
|
||||||
if (!this.orbitdb) {
|
|
||||||
throw new Error('OrbitDB not initialized');
|
|
||||||
}
|
|
||||||
|
|
||||||
const dbKey = `${name}-${type}`;
|
|
||||||
|
|
||||||
// Check if database is already open
|
|
||||||
if (this.databases.has(dbKey)) {
|
|
||||||
return this.databases.get(dbKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
console.log(`📂 Opening ${type} database '${name}' on node ${this.nodeIndex}...`);
|
|
||||||
|
|
||||||
let database;
|
|
||||||
|
|
||||||
switch (type.toLowerCase()) {
|
|
||||||
case 'documents':
|
|
||||||
case 'docstore':
|
|
||||||
database = await this.orbitdb.open(name, {
|
|
||||||
type: 'documents',
|
|
||||||
AccessController: 'orbitdb',
|
|
||||||
});
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 'events':
|
|
||||||
case 'eventlog':
|
|
||||||
database = await this.orbitdb.open(name, {
|
|
||||||
type: 'events',
|
|
||||||
AccessController: 'orbitdb',
|
|
||||||
});
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 'keyvalue':
|
|
||||||
case 'kvstore':
|
|
||||||
database = await this.orbitdb.open(name, {
|
|
||||||
type: 'keyvalue',
|
|
||||||
AccessController: 'orbitdb',
|
|
||||||
});
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
// Default to documents store
|
|
||||||
database = await this.orbitdb.open(name, {
|
|
||||||
type: 'documents',
|
|
||||||
AccessController: 'orbitdb',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
this.databases.set(dbKey, database);
|
|
||||||
|
|
||||||
console.log(`✅ Database '${name}' opened on node ${this.nodeIndex}`);
|
|
||||||
console.log(`🔗 Database address: ${database.address}`);
|
|
||||||
|
|
||||||
return database;
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`❌ Failed to open database '${name}' on node ${this.nodeIndex}:`, error);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async stop(): Promise<void> {
|
|
||||||
console.log(`🛑 Stopping OrbitDB for node ${this.nodeIndex}...`);
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Close all open databases
|
|
||||||
for (const [name, database] of this.databases) {
|
|
||||||
try {
|
|
||||||
await database.close();
|
|
||||||
console.log(`📂 Closed database '${name}' on node ${this.nodeIndex}`);
|
|
||||||
} catch (error) {
|
|
||||||
console.warn(`⚠️ Error closing database '${name}':`, error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
this.databases.clear();
|
|
||||||
|
|
||||||
// Stop OrbitDB
|
|
||||||
if (this.orbitdb) {
|
|
||||||
await this.orbitdb.stop();
|
|
||||||
console.log(`✅ OrbitDB stopped for node ${this.nodeIndex}`);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`❌ Error stopping OrbitDB for node ${this.nodeIndex}:`, error);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
getOrbitDB(): any {
|
|
||||||
return this.orbitdb;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Additional utility methods for testing
|
|
||||||
async waitForReplication(database: any, timeout: number = 30000): Promise<boolean> {
|
|
||||||
const startTime = Date.now();
|
|
||||||
|
|
||||||
return new Promise((resolve) => {
|
|
||||||
const checkReplication = () => {
|
|
||||||
if (Date.now() - startTime > timeout) {
|
|
||||||
resolve(false);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if database has received updates from other peers
|
|
||||||
const peers = database.peers || [];
|
|
||||||
if (peers.length > 0) {
|
|
||||||
resolve(true);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
setTimeout(checkReplication, 100);
|
|
||||||
};
|
|
||||||
|
|
||||||
checkReplication();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
async getDatabaseInfo(name: string, type: string): Promise<any> {
|
|
||||||
const dbKey = `${name}-${type}`;
|
|
||||||
const database = this.databases.get(dbKey);
|
|
||||||
|
|
||||||
if (!database) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
address: database.address,
|
|
||||||
type: database.type,
|
|
||||||
peers: database.peers || [],
|
|
||||||
all: await database.all(),
|
|
||||||
meta: database.meta || {},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Utility function to create OrbitDB network from IPFS network
|
|
||||||
export async function createOrbitDBNetwork(
|
|
||||||
ipfsNodes: RealIPFSService[],
|
|
||||||
): Promise<RealOrbitDBService[]> {
|
|
||||||
console.log(`🌀 Creating OrbitDB network with ${ipfsNodes.length} nodes...`);
|
|
||||||
|
|
||||||
const orbitdbNodes: RealOrbitDBService[] = [];
|
|
||||||
|
|
||||||
// Create OrbitDB instances for each IPFS node
|
|
||||||
for (let i = 0; i < ipfsNodes.length; i++) {
|
|
||||||
const orbitdbService = new RealOrbitDBService(i, ipfsNodes[i]);
|
|
||||||
await orbitdbService.init();
|
|
||||||
orbitdbNodes.push(orbitdbService);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`✅ OrbitDB network created with ${orbitdbNodes.length} nodes`);
|
|
||||||
return orbitdbNodes;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function shutdownOrbitDBNetwork(orbitdbNodes: RealOrbitDBService[]): Promise<void> {
|
|
||||||
console.log(`🛑 Shutting down OrbitDB network...`);
|
|
||||||
|
|
||||||
// Stop all OrbitDB nodes
|
|
||||||
await Promise.all(orbitdbNodes.map((node) => node.stop()));
|
|
||||||
|
|
||||||
console.log(`✅ OrbitDB network shutdown complete`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test utilities for database operations
|
|
||||||
export async function testDatabaseReplication(
|
|
||||||
orbitdbNodes: RealOrbitDBService[],
|
|
||||||
dbName: string,
|
|
||||||
dbType: string = 'documents',
|
|
||||||
): Promise<boolean> {
|
|
||||||
console.log(`🔄 Testing database replication for '${dbName}'...`);
|
|
||||||
|
|
||||||
if (orbitdbNodes.length < 2) {
|
|
||||||
console.log(`⚠️ Need at least 2 nodes for replication test`);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Open database on first node and add data
|
|
||||||
const db1 = await orbitdbNodes[0].openDB(dbName, dbType);
|
|
||||||
await db1.put({ _id: 'test-doc-1', content: 'Hello from node 0', timestamp: Date.now() });
|
|
||||||
|
|
||||||
// Open same database on second node
|
|
||||||
const db2 = await orbitdbNodes[1].openDB(dbName, dbType);
|
|
||||||
|
|
||||||
// Wait for replication
|
|
||||||
await new Promise((resolve) => setTimeout(resolve, 2000));
|
|
||||||
|
|
||||||
// Check if data replicated
|
|
||||||
const db2Data = await db2.all();
|
|
||||||
const hasReplicatedData = db2Data.some((doc: any) => doc._id === 'test-doc-1');
|
|
||||||
|
|
||||||
if (hasReplicatedData) {
|
|
||||||
console.log(`✅ Database replication successful for '${dbName}'`);
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
console.log(`❌ Database replication failed for '${dbName}'`);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`❌ Error testing database replication:`, error);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,167 +0,0 @@
|
|||||||
import { randomBytes } from 'crypto';
|
|
||||||
import { writeFileSync, mkdirSync, rmSync, existsSync } from 'fs';
|
|
||||||
import { join } from 'path';
|
|
||||||
import { tmpdir } from 'os';
|
|
||||||
|
|
||||||
export interface SwarmConfig {
|
|
||||||
swarmKey: string;
|
|
||||||
nodeCount: number;
|
|
||||||
basePort: number;
|
|
||||||
dataDir: string;
|
|
||||||
bootstrapAddrs: string[];
|
|
||||||
}
|
|
||||||
|
|
||||||
export class PrivateSwarmSetup {
|
|
||||||
private config: SwarmConfig;
|
|
||||||
private swarmKeyPath: string;
|
|
||||||
|
|
||||||
constructor(nodeCount: number = 3) {
|
|
||||||
const testId = Date.now().toString(36);
|
|
||||||
const basePort = 40000 + Math.floor(Math.random() * 10000);
|
|
||||||
|
|
||||||
this.config = {
|
|
||||||
swarmKey: this.generateSwarmKey(),
|
|
||||||
nodeCount,
|
|
||||||
basePort,
|
|
||||||
dataDir: join(tmpdir(), `debros-test-${testId}`),
|
|
||||||
bootstrapAddrs: []
|
|
||||||
};
|
|
||||||
|
|
||||||
this.swarmKeyPath = join(this.config.dataDir, 'swarm.key');
|
|
||||||
this.setupSwarmKey();
|
|
||||||
this.generateBootstrapAddrs();
|
|
||||||
}
|
|
||||||
|
|
||||||
private generateSwarmKey(): string {
|
|
||||||
// Generate a private swarm key (64 bytes of random data)
|
|
||||||
const key = randomBytes(32).toString('hex');
|
|
||||||
return `/key/swarm/psk/1.0.0/\n/base16/\n${key}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
private setupSwarmKey(): void {
|
|
||||||
// Create data directory
|
|
||||||
mkdirSync(this.config.dataDir, { recursive: true });
|
|
||||||
|
|
||||||
// Write swarm key file
|
|
||||||
writeFileSync(this.swarmKeyPath, this.config.swarmKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
private generateBootstrapAddrs(): void {
|
|
||||||
// Generate bootstrap addresses for private network
|
|
||||||
// First node will be the bootstrap node
|
|
||||||
const bootstrapPort = this.config.basePort;
|
|
||||||
this.config.bootstrapAddrs = [
|
|
||||||
`/ip4/127.0.0.1/tcp/${bootstrapPort}/p2p/12D3KooWBootstrapNodeId` // Placeholder - will be replaced with actual peer ID
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
getConfig(): SwarmConfig {
|
|
||||||
return { ...this.config };
|
|
||||||
}
|
|
||||||
|
|
||||||
getNodeDataDir(nodeIndex: number): string {
|
|
||||||
const nodeDir = join(this.config.dataDir, `node-${nodeIndex}`);
|
|
||||||
mkdirSync(nodeDir, { recursive: true });
|
|
||||||
return nodeDir;
|
|
||||||
}
|
|
||||||
|
|
||||||
getNodePort(nodeIndex: number): number {
|
|
||||||
return this.config.basePort + nodeIndex;
|
|
||||||
}
|
|
||||||
|
|
||||||
getSwarmKeyPath(): string {
|
|
||||||
return this.swarmKeyPath;
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup(): void {
|
|
||||||
try {
|
|
||||||
if (existsSync(this.config.dataDir)) {
|
|
||||||
rmSync(this.config.dataDir, { recursive: true, force: true });
|
|
||||||
console.log(`🧹 Cleaned up test data directory: ${this.config.dataDir}`);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
console.warn(`Warning: Could not cleanup test directory: ${error}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get libp2p configuration for a node
|
|
||||||
getLibp2pConfig(nodeIndex: number, isBootstrap: boolean = false) {
|
|
||||||
const port = this.getNodePort(nodeIndex);
|
|
||||||
|
|
||||||
return {
|
|
||||||
addresses: {
|
|
||||||
listen: [`/ip4/127.0.0.1/tcp/${port}`]
|
|
||||||
},
|
|
||||||
connectionManager: {
|
|
||||||
minConnections: 1,
|
|
||||||
maxConnections: 10,
|
|
||||||
dialTimeout: 30000
|
|
||||||
},
|
|
||||||
// For private networks, we'll configure bootstrap after peer IDs are known
|
|
||||||
bootstrap: isBootstrap ? [] : [], // Will be populated with actual bootstrap addresses
|
|
||||||
datastore: undefined, // Will be set by the node setup
|
|
||||||
keychain: {
|
|
||||||
pass: 'test-passphrase'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test utilities
|
|
||||||
export async function waitForPeerConnections(
|
|
||||||
nodes: any[],
|
|
||||||
expectedConnections: number,
|
|
||||||
timeout: number = 30000
|
|
||||||
): Promise<boolean> {
|
|
||||||
const startTime = Date.now();
|
|
||||||
|
|
||||||
while (Date.now() - startTime < timeout) {
|
|
||||||
let allConnected = true;
|
|
||||||
|
|
||||||
for (const node of nodes) {
|
|
||||||
const peers = node.libp2p.getPeers();
|
|
||||||
if (peers.length < expectedConnections) {
|
|
||||||
allConnected = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (allConnected) {
|
|
||||||
console.log(`✅ All nodes connected with ${expectedConnections} peers each`);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait 100ms before checking again
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 100));
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`⚠️ Timeout waiting for peer connections after ${timeout}ms`);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function waitForNetworkReady(nodes: any[], timeout: number = 30000): Promise<boolean> {
|
|
||||||
// Wait for at least one connection between any nodes
|
|
||||||
const startTime = Date.now();
|
|
||||||
|
|
||||||
while (Date.now() - startTime < timeout) {
|
|
||||||
let hasConnections = false;
|
|
||||||
|
|
||||||
for (const node of nodes) {
|
|
||||||
const peers = node.libp2p.getPeers();
|
|
||||||
if (peers.length > 0) {
|
|
||||||
hasConnections = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (hasConnections) {
|
|
||||||
console.log(`🌐 Private network is ready with ${nodes.length} nodes`);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 100));
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`⚠️ Timeout waiting for network to be ready after ${timeout}ms`);
|
|
||||||
return false;
|
|
||||||
}
|
|
@ -1,198 +0,0 @@
|
|||||||
import { RealIPFSService, createIPFSNetwork, shutdownIPFSNetwork } from './ipfs-setup';
|
|
||||||
import { RealOrbitDBService, createOrbitDBNetwork, shutdownOrbitDBNetwork } from './orbitdb-setup';
|
|
||||||
import { PrivateSwarmSetup, waitForNetworkReady } from './swarm-setup';
|
|
||||||
|
|
||||||
export interface RealTestNetwork {
|
|
||||||
ipfsNodes: RealIPFSService[];
|
|
||||||
orbitdbNodes: RealOrbitDBService[];
|
|
||||||
swarmSetup: PrivateSwarmSetup;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface RealTestConfig {
|
|
||||||
nodeCount: number;
|
|
||||||
timeout: number;
|
|
||||||
enableDebugLogs: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
export class RealTestManager {
|
|
||||||
private network: RealTestNetwork | null = null;
|
|
||||||
private config: RealTestConfig;
|
|
||||||
|
|
||||||
constructor(config: Partial<RealTestConfig> = {}) {
|
|
||||||
this.config = {
|
|
||||||
nodeCount: 3,
|
|
||||||
timeout: 60000, // 60 seconds
|
|
||||||
enableDebugLogs: false,
|
|
||||||
...config
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
async setup(): Promise<RealTestNetwork> {
|
|
||||||
console.log(`🚀 Setting up real test network with ${this.config.nodeCount} nodes...`);
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Create IPFS network
|
|
||||||
const { nodes: ipfsNodes, swarmSetup } = await createIPFSNetwork(this.config.nodeCount);
|
|
||||||
|
|
||||||
// Wait for network to be ready
|
|
||||||
const networkReady = await waitForNetworkReady(ipfsNodes.map(n => n.getHelia()), this.config.timeout);
|
|
||||||
if (!networkReady) {
|
|
||||||
throw new Error('Network failed to become ready within timeout');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create OrbitDB network
|
|
||||||
const orbitdbNodes = await createOrbitDBNetwork(ipfsNodes);
|
|
||||||
|
|
||||||
this.network = {
|
|
||||||
ipfsNodes,
|
|
||||||
orbitdbNodes,
|
|
||||||
swarmSetup
|
|
||||||
};
|
|
||||||
|
|
||||||
console.log(`✅ Real test network setup complete`);
|
|
||||||
this.logNetworkStatus();
|
|
||||||
|
|
||||||
return this.network;
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`❌ Failed to setup real test network:`, error);
|
|
||||||
await this.cleanup();
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async cleanup(): Promise<void> {
|
|
||||||
if (!this.network) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`🧹 Cleaning up real test network...`);
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Shutdown OrbitDB network first
|
|
||||||
await shutdownOrbitDBNetwork(this.network.orbitdbNodes);
|
|
||||||
|
|
||||||
// Shutdown IPFS network
|
|
||||||
await shutdownIPFSNetwork(this.network.ipfsNodes, this.network.swarmSetup);
|
|
||||||
|
|
||||||
this.network = null;
|
|
||||||
console.log(`✅ Real test network cleanup complete`);
|
|
||||||
} catch (error) {
|
|
||||||
console.error(`❌ Error during cleanup:`, error);
|
|
||||||
// Continue with cleanup even if there are errors
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
getNetwork(): RealTestNetwork {
|
|
||||||
if (!this.network) {
|
|
||||||
throw new Error('Network not initialized. Call setup() first.');
|
|
||||||
}
|
|
||||||
return this.network;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a single node for simple tests
|
|
||||||
getPrimaryNode(): { ipfs: RealIPFSService; orbitdb: RealOrbitDBService } {
|
|
||||||
const network = this.getNetwork();
|
|
||||||
return {
|
|
||||||
ipfs: network.ipfsNodes[0],
|
|
||||||
orbitdb: network.orbitdbNodes[0]
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get multiple nodes for P2P tests
|
|
||||||
getMultipleNodes(count?: number): Array<{ ipfs: RealIPFSService; orbitdb: RealOrbitDBService }> {
|
|
||||||
const network = this.getNetwork();
|
|
||||||
const nodeCount = count || network.ipfsNodes.length;
|
|
||||||
|
|
||||||
return Array.from({ length: Math.min(nodeCount, network.ipfsNodes.length) }, (_, i) => ({
|
|
||||||
ipfs: network.ipfsNodes[i],
|
|
||||||
orbitdb: network.orbitdbNodes[i]
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
private logNetworkStatus(): void {
|
|
||||||
if (!this.network || !this.config.enableDebugLogs) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`📊 Network Status:`);
|
|
||||||
console.log(` Nodes: ${this.network.ipfsNodes.length}`);
|
|
||||||
|
|
||||||
for (let i = 0; i < this.network.ipfsNodes.length; i++) {
|
|
||||||
const ipfsNode = this.network.ipfsNodes[i];
|
|
||||||
const peers = ipfsNode.getConnectedPeers();
|
|
||||||
console.log(` Node ${i}:`);
|
|
||||||
console.log(` Peer ID: ${ipfsNode.getPeerId()}`);
|
|
||||||
console.log(` Connected Peers: ${peers.length}`);
|
|
||||||
console.log(` Addresses: ${ipfsNode.getMultiaddrs().join(', ')}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test utilities
|
|
||||||
async waitForNetworkStabilization(timeout: number = 10000): Promise<void> {
|
|
||||||
console.log(`⏳ Waiting for network stabilization...`);
|
|
||||||
|
|
||||||
// Wait for connections to stabilize
|
|
||||||
await new Promise(resolve => setTimeout(resolve, timeout));
|
|
||||||
|
|
||||||
if (this.config.enableDebugLogs) {
|
|
||||||
this.logNetworkStatus();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async verifyNetworkConnectivity(): Promise<boolean> {
|
|
||||||
const network = this.getNetwork();
|
|
||||||
|
|
||||||
// Check if all nodes have at least one connection
|
|
||||||
for (const node of network.ipfsNodes) {
|
|
||||||
const peers = node.getConnectedPeers();
|
|
||||||
if (peers.length === 0) {
|
|
||||||
console.log(`❌ Node ${node.nodeIndex} has no peer connections`);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`✅ All nodes have peer connections`);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Global test manager for Jest lifecycle
|
|
||||||
let globalTestManager: RealTestManager | null = null;
|
|
||||||
|
|
||||||
export async function setupGlobalTestNetwork(config: Partial<RealTestConfig> = {}): Promise<RealTestNetwork> {
|
|
||||||
if (globalTestManager) {
|
|
||||||
throw new Error('Global test network already setup. Call cleanupGlobalTestNetwork() first.');
|
|
||||||
}
|
|
||||||
|
|
||||||
globalTestManager = new RealTestManager(config);
|
|
||||||
return await globalTestManager.setup();
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function cleanupGlobalTestNetwork(): Promise<void> {
|
|
||||||
if (globalTestManager) {
|
|
||||||
await globalTestManager.cleanup();
|
|
||||||
globalTestManager = null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export function getGlobalTestNetwork(): RealTestNetwork {
|
|
||||||
if (!globalTestManager) {
|
|
||||||
throw new Error('Global test network not setup. Call setupGlobalTestNetwork() first.');
|
|
||||||
}
|
|
||||||
return globalTestManager.getNetwork();
|
|
||||||
}
|
|
||||||
|
|
||||||
export function getGlobalTestManager(): RealTestManager {
|
|
||||||
if (!globalTestManager) {
|
|
||||||
throw new Error('Global test manager not setup. Call setupGlobalTestNetwork() first.');
|
|
||||||
}
|
|
||||||
return globalTestManager;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Jest helper functions
|
|
||||||
export const realTestHelpers = {
|
|
||||||
setupAll: setupGlobalTestNetwork,
|
|
||||||
cleanupAll: cleanupGlobalTestNetwork,
|
|
||||||
getNetwork: getGlobalTestNetwork,
|
|
||||||
getManager: getGlobalTestManager
|
|
||||||
};
|
|
Reference in New Issue
Block a user