penguin-beta-version-1.5 #3

Merged
anonpenguin merged 30 commits from sotiris-beta-version-1.5 into main 2025-07-05 02:53:30 +00:00
11 changed files with 273 additions and 47 deletions
Showing only changes of commit 08d110b7e6 - Show all commits

View File

@ -29,7 +29,7 @@
"test:blog-integration": "jest tests/real-integration/blog-scenario/tests --detectOpenHandles --forceExit",
"test:blog-build": "cd tests/real-integration/blog-scenario && docker-compose -f docker/docker-compose.blog.yml build",
"test:blog-clean": "cd tests/real-integration/blog-scenario && docker-compose -f docker/docker-compose.blog.yml down -v --remove-orphans",
"test:blog-runner": "ts-node tests/real-integration/blog-scenario/run-tests.ts"
"test:blog-runner": "pnpm exec ts-node tests/real-integration/blog-scenario/run-tests.ts"
},
"keywords": [
"ipfs",

14
pnpm-lock.yaml generated
View File

@ -99,6 +99,9 @@ importers:
'@types/node':
specifier: ^22.13.10
version: 22.13.16
'@types/node-fetch':
specifier: ^2.6.7
version: 2.6.12
'@types/node-forge':
specifier: ^1.3.11
version: 1.3.11
@ -129,6 +132,9 @@ importers:
lint-staged:
specifier: ^15.5.0
version: 15.5.0
node-fetch:
specifier: ^2.7.0
version: 2.7.0
prettier:
specifier: ^3.5.3
version: 3.5.3
@ -1522,6 +1528,9 @@ packages:
'@types/multicast-dns@7.2.4':
resolution: {integrity: sha512-ib5K4cIDR4Ro5SR3Sx/LROkMDa0BHz0OPaCBL/OSPDsAXEGZ3/KQeS6poBKYVN7BfjXDL9lWNwzyHVgt/wkyCw==}
'@types/node-fetch@2.6.12':
resolution: {integrity: sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==}
'@types/node-forge@1.3.11':
resolution: {integrity: sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ==}
@ -7048,6 +7057,11 @@ snapshots:
'@types/dns-packet': 5.6.5
'@types/node': 22.13.16
'@types/node-fetch@2.6.12':
dependencies:
'@types/node': 22.14.0
form-data: 4.0.2
'@types/node-forge@1.3.11':
dependencies:
'@types/node': 22.13.16

View File

@ -0,0 +1,137 @@
import { createHelia } from 'helia';
import { createLibp2p } from 'libp2p';
import { tcp } from '@libp2p/tcp';
import { noise } from '@chainsafe/libp2p-noise';
import { yamux } from '@chainsafe/libp2p-yamux';
import { bootstrap } from '@libp2p/bootstrap';
import { mdns } from '@libp2p/mdns';
import { identify } from '@libp2p/identify';
import { gossipsub } from '@chainsafe/libp2p-gossipsub';
import fs from 'fs';
import path from 'path';
export interface IPFSConfig {
swarmKeyFile?: string;
bootstrap?: string[];
ports?: {
swarm?: number;
api?: number;
gateway?: number;
};
}
export class IPFSService {
private helia: any;
private libp2p: any;
private config: IPFSConfig;
constructor(config: IPFSConfig = {}) {
this.config = config;
}
async init(): Promise<void> {
// Create libp2p instance
const libp2pConfig: any = {
addresses: {
listen: [`/ip4/0.0.0.0/tcp/${this.config.ports?.swarm || 4001}`]
},
transports: [tcp()],
connectionEncryption: [noise()],
streamMuxers: [yamux()],
services: {
identify: identify(),
pubsub: gossipsub({
allowPublishToZeroTopicPeers: true
})
}
};
// Add peer discovery
const peerDiscovery = [];
// Add bootstrap peers if provided
if (this.config.bootstrap && this.config.bootstrap.length > 0) {
peerDiscovery.push(bootstrap({
list: this.config.bootstrap
}));
}
// Add mDNS for local discovery
peerDiscovery.push(mdns({
interval: 1000
}));
if (peerDiscovery.length > 0) {
libp2pConfig.peerDiscovery = peerDiscovery;
}
this.libp2p = await createLibp2p(libp2pConfig);
// Create Helia instance
this.helia = await createHelia({
libp2p: this.libp2p
});
console.log(`IPFS Service initialized with peer ID: ${this.libp2p.peerId}`);
}
async stop(): Promise<void> {
if (this.helia) {
await this.helia.stop();
}
}
getHelia(): any {
return this.helia;
}
getLibp2pInstance(): any {
return this.libp2p;
}
async getConnectedPeers(): Promise<Map<string, any>> {
if (!this.libp2p) {
return new Map();
}
const peers = this.libp2p.getPeers();
const peerMap = new Map();
for (const peerId of peers) {
peerMap.set(peerId.toString(), peerId);
}
return peerMap;
}
async pinOnNode(nodeId: string, cid: string): Promise<void> {
if (this.helia && this.helia.pins) {
await this.helia.pins.add(cid);
console.log(`Pinned ${cid} on node ${nodeId}`);
}
}
get pubsub() {
if (!this.libp2p || !this.libp2p.services.pubsub) {
return undefined;
}
return {
publish: async (topic: string, data: string) => {
const encoder = new TextEncoder();
await this.libp2p.services.pubsub.publish(topic, encoder.encode(data));
},
subscribe: async (topic: string, handler: (message: any) => void) => {
this.libp2p.services.pubsub.subscribe(topic);
this.libp2p.services.pubsub.addEventListener('message', (event: any) => {
if (event.detail.topic === topic) {
handler(event.detail);
}
});
},
unsubscribe: async (topic: string) => {
this.libp2p.services.pubsub.unsubscribe(topic);
}
};
}
}

View File

@ -0,0 +1,44 @@
import { createOrbitDB } from '@orbitdb/core';
export class OrbitDBService {
private orbitdb: any;
private ipfsService: any;
constructor(ipfsService: any) {
this.ipfsService = ipfsService;
}
async init(): Promise<void> {
if (!this.ipfsService) {
throw new Error('IPFS service is required for OrbitDB');
}
this.orbitdb = await createOrbitDB({
ipfs: this.ipfsService.getHelia(),
directory: './orbitdb'
});
console.log('OrbitDB Service initialized');
}
async stop(): Promise<void> {
if (this.orbitdb) {
await this.orbitdb.stop();
}
}
async openDB(name: string, type: string): Promise<any> {
if (!this.orbitdb) {
throw new Error('OrbitDB not initialized');
}
return await this.orbitdb.open(name, {
type,
AccessController: this.orbitdb.AccessController
});
}
getOrbitDB(): any {
return this.orbitdb;
}
}

View File

@ -68,23 +68,23 @@ blog-scenario/
```bash
# Run complete integration tests
npm run test:blog-real
pnpm run test:blog-real
# Or use the test runner for better control
npm run test:blog-runner
pnpm run test:blog-runner
```
#### Option 2: Build and Run Manually
```bash
# Build Docker images
npm run test:blog-build
pnpm run test:blog-build
# Run tests
npm run test:blog-real
pnpm run test:blog-real
# Clean up afterwards
npm run test:blog-clean
pnpm run test:blog-clean
```
#### Option 3: Development Mode
@ -95,7 +95,7 @@ cd tests/real-integration/blog-scenario
docker-compose -f docker/docker-compose.blog.yml up blog-node-1 blog-node-2 blog-node-3
# Run tests against running services
npm run test:blog-integration
pnpm run test:blog-integration
```
## Test Scenarios
@ -275,7 +275,7 @@ lsof -i :4001
lsof -i :4011-4013
# Clean up existing containers
npm run test:blog-clean
pnpm run test:blog-clean
```
#### Docker Build Failures
@ -316,10 +316,10 @@ To run tests with additional debugging:
```bash
# Set debug environment
DEBUG=* npm run test:blog-real
DEBUG=* pnpm run test:blog-real
# Run with increased verbosity
LOG_LEVEL=debug npm run test:blog-real
LOG_LEVEL=debug pnpm run test:blog-real
```
## Development

View File

@ -13,16 +13,19 @@ RUN apk add --no-cache \
WORKDIR /app
# Copy package files
COPY package*.json ./
COPY package*.json pnpm-lock.yaml ./
# Install dependencies
RUN npm ci --only=production && npm cache clean --force
# Install pnpm
RUN npm install -g pnpm
# Install dependencies (skip prepare script for Docker)
RUN pnpm install --prod --frozen-lockfile --ignore-scripts
# Copy source code
COPY . .
# Build the application
RUN npm run build
RUN pnpm run build
# Create data directory
RUN mkdir -p /data

View File

@ -8,16 +8,19 @@ RUN apk add --no-cache curl jq
WORKDIR /app
# Copy package files
COPY package*.json ./
COPY package*.json pnpm-lock.yaml ./
# Install all dependencies (including dev dependencies for testing)
RUN npm ci && npm cache clean --force
# Install pnpm
RUN npm install -g pnpm
# Install all dependencies (including dev dependencies for testing, skip prepare script)
RUN pnpm install --frozen-lockfile --ignore-scripts
# Copy source code
COPY . .
# Build the application
RUN npm run build
RUN pnpm run build
# Create results directory
RUN mkdir -p /app/results
@ -27,4 +30,4 @@ ENV NODE_ENV=test
ENV TEST_SCENARIO=blog
# Default command (can be overridden)
CMD ["npm", "run", "test:blog-integration"]
CMD ["pnpm", "run", "test:blog-integration"]

View File

@ -562,7 +562,7 @@ class BlogAPIServer {
try {
if (this.framework) {
const ipfsService = this.framework.getIPFSService();
if (ipfsService) {
if (ipfsService && ipfsService.getConnectedPeers) {
const peers = await ipfsService.getConnectedPeers();
return peers.size;
}
@ -603,10 +603,10 @@ class BlogAPIServer {
}
private async initializeFramework(): Promise<void> {
// Import services - adjust paths based on your actual service locations
// Note: You'll need to implement these services or use existing ones
const IPFSService = (await import('../../../../src/framework/services/IPFSService')).IPFSService;
const OrbitDBService = (await import('../../../../src/framework/services/OrbitDBService')).OrbitDBService;
// Import services
const { IPFSService } = await import('../../../../src/framework/services/IPFSService');
const { OrbitDBService } = await import('../../../../src/framework/services/RealOrbitDBService');
const { FrameworkIPFSService, FrameworkOrbitDBService } = await import('../../../../src/framework/services/OrbitDBService');
// Initialize IPFS service
const ipfsService = new IPFSService({
@ -625,6 +625,10 @@ class BlogAPIServer {
await orbitDBService.init();
console.log(`[${this.nodeId}] OrbitDB service initialized`);
// Wrap services for framework
const frameworkIPFS = new FrameworkIPFSService(ipfsService);
const frameworkOrbitDB = new FrameworkOrbitDBService(orbitDBService);
// Initialize framework
this.framework = new DebrosFramework({
environment: 'test',
@ -642,7 +646,7 @@ class BlogAPIServer {
}
});
await this.framework.initialize(orbitDBService, ipfsService);
await this.framework.initialize(frameworkOrbitDB, frameworkIPFS);
console.log(`[${this.nodeId}] DebrosFramework initialized successfully`);
}
}

View File

@ -2,9 +2,16 @@
echo "Configuring bootstrap IPFS node..."
# Set swarm key for private network
export IPFS_PATH=/root/.ipfs
cp /data/swarm.key $IPFS_PATH/swarm.key
# Set IPFS path
export IPFS_PATH=/data/ipfs
# Copy swarm key for private network
if [ -f "/data/ipfs/swarm.key" ]; then
echo "Using existing swarm key"
else
echo "Swarm key not found"
exit 1
fi
# Configure IPFS for private network
ipfs config --json API.HTTPHeaders.Access-Control-Allow-Origin '["*"]'
@ -14,15 +21,13 @@ ipfs config --json API.HTTPHeaders.Access-Control-Allow-Headers '["Authorization
# Remove default bootstrap nodes (for private network)
ipfs bootstrap rm --all
# Enable experimental features
ipfs config --json Experimental.Libp2pStreamMounting true
ipfs config --json Experimental.P2pHttpProxy true
# Configure addresses
ipfs config Addresses.API "/ip4/0.0.0.0/tcp/5001"
ipfs config Addresses.Gateway "/ip4/0.0.0.0/tcp/8080"
ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4001"]'
# Start IPFS daemon
# Enable PubSub
ipfs config --json Pubsub.Enabled true
echo "Starting IPFS daemon..."
exec ipfs daemon --enable-gc --enable-pubsub-experiment
exec ipfs daemon --enable-gc

View File

@ -1,11 +1,10 @@
version: '3.8'
services:
# Bootstrap node for peer discovery
blog-bootstrap:
build:
context: ../../../
dockerfile: tests/real-integration/blog-scenario/docker/Dockerfile.bootstrap
context: ../../../../
dockerfile: tests/real-integration/shared/infrastructure/docker/Dockerfile.bootstrap
environment:
- NODE_TYPE=bootstrap
- NODE_ID=blog-bootstrap
@ -18,7 +17,7 @@ services:
ports:
- "4001:4001"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5001/api/v0/id"]
test: ["CMD", "sh", "-c", "ipfs id >/dev/null 2>&1"]
interval: 10s
timeout: 5s
retries: 5
@ -26,7 +25,7 @@ services:
# Blog API Node 1
blog-node-1:
build:
context: ../../../
context: ../../../../
dockerfile: tests/real-integration/blog-scenario/docker/Dockerfile.blog-api
depends_on:
blog-bootstrap:
@ -47,7 +46,7 @@ services:
networks:
- blog-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
test: ["CMD", "sh", "-c", "wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1"]
interval: 15s
timeout: 10s
retries: 10
@ -56,7 +55,7 @@ services:
# Blog API Node 2
blog-node-2:
build:
context: ../../../
context: ../../../../
dockerfile: tests/real-integration/blog-scenario/docker/Dockerfile.blog-api
depends_on:
blog-bootstrap:
@ -77,7 +76,7 @@ services:
networks:
- blog-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
test: ["CMD", "sh", "-c", "wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1"]
interval: 15s
timeout: 10s
retries: 10
@ -86,7 +85,7 @@ services:
# Blog API Node 3
blog-node-3:
build:
context: ../../../
context: ../../../../
dockerfile: tests/real-integration/blog-scenario/docker/Dockerfile.blog-api
depends_on:
blog-bootstrap:
@ -107,7 +106,7 @@ services:
networks:
- blog-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
test: ["CMD", "sh", "-c", "wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1"]
interval: 15s
timeout: 10s
retries: 10
@ -116,7 +115,7 @@ services:
# Test Runner
blog-test-runner:
build:
context: ../../../
context: ../../../../
dockerfile: tests/real-integration/blog-scenario/docker/Dockerfile.test-runner
depends_on:
blog-node-1:
@ -131,11 +130,11 @@ services:
- TEST_TIMEOUT=300000
- NODE_ENV=test
volumes:
- ./tests:/app/tests:ro
- ../tests:/app/tests:ro
- test-results:/app/results
networks:
- blog-network
command: ["npm", "run", "test:blog-integration"]
command: ["pnpm", "run", "test:blog-integration"]
volumes:
bootstrap-data:

View File

@ -0,0 +1,17 @@
# Bootstrap node for IPFS peer discovery
FROM ipfs/kubo:v0.24.0
# Copy swarm key
COPY tests/real-integration/blog-scenario/docker/swarm.key /data/ipfs/swarm.key
# Copy configuration script
COPY tests/real-integration/blog-scenario/docker/bootstrap-config.sh /usr/local/bin/bootstrap-config.sh
USER root
RUN chmod +x /usr/local/bin/bootstrap-config.sh
USER ipfs
# Expose IPFS ports
EXPOSE 4001 5001 8080
# Start IPFS daemon with custom config
CMD ["/usr/local/bin/bootstrap-config.sh"]