diff --git a/__tests__/debug/console-test.ts b/__tests__/debug/console-test.ts index ae2462e..ee4214f 100644 --- a/__tests__/debug/console-test.ts +++ b/__tests__/debug/console-test.ts @@ -1,12 +1,20 @@ -// Simple test to check if console output works in Jest -console.log('=== CONSOLE LOG TEST ==='); -console.log('This is a test log message'); -console.error('This is a test error message'); -console.warn('This is a test warning message'); +import Debug from 'debug'; -describe('Console Test', () => { - it('should output to console', () => { - console.log('Test log from inside test'); +// Set up debug instances for different log levels +const debug = Debug('rz:test:console'); +const debugError = Debug('rz:test:console:error'); +const debugWarn = Debug('rz:test:console:warn'); + +// Test debug output +// Note: These will only show if DEBUG=rz:* is set in the environment +debug('=== DEBUG LOG TEST ==='); +debug('This is a test debug message'); +debugError('This is a test error message'); +debugWarn('This is a test warning message'); + +describe('Debug Test', () => { + it('should output debug messages when DEBUG is enabled', () => { + debug('Test debug message from inside test'); expect(true).toBe(true); }); }); diff --git a/__tests__/docker-test-utils.ts b/__tests__/docker-test-utils.ts deleted file mode 100644 index dcffb9e..0000000 --- a/__tests__/docker-test-utils.ts +++ /dev/null @@ -1,58 +0,0 @@ -import { createOrchestrator } from '../src/orchestration/factory'; -import { NodeConfig, NodeOrchestrator } from '../src/orchestration/types'; -import Debug from 'debug'; - -const debug = Debug('rz:docker-test-utils'); - -/** - * Creates a test environment with Docker orchestrator for a test suite - * @param testSuiteName - Name of the test suite (used for container naming) - * @returns Object containing the orchestrator instance and helper functions - */ -export function setupDockerTestEnvironment(testSuiteName: string) { - // Initialize the orchestrator immediately - const orchestrator = createOrchestrator('docker', { - autoBuildTestImage: true, - image: 'rhizome-node-test', - }); - - beforeAll(async () => { - debug(`[${testSuiteName}] Setting up Docker test environment...`); - debug(`[${testSuiteName}] Docker test environment ready`); - }, 30000); // 30s timeout for setup - - afterAll(async () => { - debug(`[${testSuiteName}] Tearing down Docker test environment...`); - - if (orchestrator) { - try { - await orchestrator.cleanup(); - debug(`[${testSuiteName}] Docker resources cleaned up successfully`); - } catch (error) { - debug(`[${testSuiteName}] Error during Docker environment teardown:`, error); - // Don't throw to allow tests to complete - } - } - - debug(`[${testSuiteName}] Docker test environment teardown complete`); - }, 30000); // 30s timeout for teardown - - // Helper function to create a test node with default config - const createTestNode = async (config: Partial = {}) => { - const nodeConfig: NodeConfig = { - id: `test-node-${testSuiteName}-${Date.now()}`, - ...config, - }; - - debug(`[${testSuiteName}] Creating test node: ${nodeConfig.id}`); - const node = await orchestrator.startNode(nodeConfig); - debug(`[${testSuiteName}] Test node created: ${node.id}`); - - return node; - }; - - return { - orchestrator, - createTestNode, - }; -} diff --git a/__tests__/jest-setup.ts b/__tests__/jest-setup.ts index c5bb498..0f33cc9 100644 --- a/__tests__/jest-setup.ts +++ b/__tests__/jest-setup.ts @@ -1,3 +1,6 @@ +// Set up environment variables for tests +process.env.DEBUG = 'rz:*'; + // Extend the global Jest namespace declare global { namespace jest { diff --git a/__tests__/nested-resolution-performance.ts b/__tests__/nested-resolution-performance.ts index e446195..35b7e41 100644 --- a/__tests__/nested-resolution-performance.ts +++ b/__tests__/nested-resolution-performance.ts @@ -8,7 +8,10 @@ * - Circular reference handling at scale */ +import Debug from 'debug'; import { RhizomeNode } from '../src/node'; + +const debug = Debug('rz:test:nested-resolution-performance'); import { Delta } from '../src/core'; import { DefaultSchemaRegistry } from '../src/schema'; import { SchemaBuilder, PrimitiveSchemas, ReferenceSchemas, ArraySchemas } from '../src/schema'; @@ -124,7 +127,7 @@ describe('Nested Object Resolution Performance', () => { } const setupTime = performance.now() - startSetup; - console.log(`Setup time for ${userCount} users with relationships: ${setupTime.toFixed(2)}ms`); + debug(`Setup time for ${userCount} users with relationships: ${setupTime.toFixed(2)}ms`); // Test resolution performance for a user with many connections const testUserId = userIds[50]; // Pick a user in the middle @@ -141,7 +144,7 @@ describe('Nested Object Resolution Performance', () => { ); const resolutionTime = performance.now() - startResolution; - console.log(`Resolution time for user with many connections: ${resolutionTime.toFixed(2)}ms`); + debug(`Resolution time for user with many connections: ${resolutionTime.toFixed(2)}ms`); // Verify the resolution worked expect(nestedView.id).toBe(testUserId); @@ -155,7 +158,7 @@ describe('Nested Object Resolution Performance', () => { const totalNestedObjects = Object.values(nestedView.nestedObjects).reduce( (total, arr) => total + (arr?.length || 0), 0 ); - console.log('Total nested objects resolved:', totalNestedObjects); + debug('Total nested objects resolved: %o', totalNestedObjects); // The test user should have friends, followers, and possibly a mentor expect(Object.keys(nestedView.nestedObjects).length).toBeGreaterThan(0); @@ -218,7 +221,7 @@ describe('Nested Object Resolution Performance', () => { } const setupTime = performance.now() - startSetup; - console.log(`Setup time for chain of ${chainLength} users: ${setupTime.toFixed(2)}ms`); + debug(`Setup time for chain of ${chainLength} users: ${setupTime.toFixed(2)}ms`); // Test resolution from the start of the chain const firstUserId = userIds[0]; @@ -235,7 +238,7 @@ describe('Nested Object Resolution Performance', () => { ); const resolutionTime = performance.now() - startResolution; - console.log(`Resolution time for deep chain (maxDepth=5): ${resolutionTime.toFixed(2)}ms`); + debug(`Resolution time for deep chain (maxDepth=5): ${resolutionTime.toFixed(2)}ms`); // Verify the resolution worked and respected depth limits expect(nestedView.id).toBe(firstUserId); @@ -255,7 +258,7 @@ describe('Nested Object Resolution Performance', () => { } expect(depth).toBeLessThanOrEqual(5); - console.log(`Actual resolved depth: ${depth}`); + debug(`Actual resolved depth: ${depth}`); }); it('should handle circular references in large graphs without performance degradation', async () => { @@ -318,7 +321,7 @@ describe('Nested Object Resolution Performance', () => { } const setupTime = performance.now() - startSetup; - console.log(`Setup time for circular graph with ${userCount} users: ${setupTime.toFixed(2)}ms`); + debug(`Setup time for circular graph with ${userCount} users: ${setupTime.toFixed(2)}ms`); // Test resolution performance with circular references const testUserId = userIds[0]; @@ -335,7 +338,7 @@ describe('Nested Object Resolution Performance', () => { ); const resolutionTime = performance.now() - startResolution; - console.log(`Resolution time for circular graph (maxDepth=3): ${resolutionTime.toFixed(2)}ms`); + debug(`Resolution time for circular graph (maxDepth=3): ${resolutionTime.toFixed(2)}ms`); // Verify the resolution completed without hanging expect(nestedView.id).toBe(testUserId); @@ -352,7 +355,7 @@ describe('Nested Object Resolution Performance', () => { expect(nestedView.nestedObjects.connections.length).toBeLessThanOrEqual(3); } - console.log(`Connections resolved: ${nestedView.nestedObjects.connections?.length || 0}`); + debug(`Connections resolved: ${nestedView.nestedObjects.connections?.length || 0}`); }); }); }); \ No newline at end of file diff --git a/__tests__/run/002-two-nodes.ts b/__tests__/run/002-two-nodes.ts index add6aaf..26c8777 100644 --- a/__tests__/run/002-two-nodes.ts +++ b/__tests__/run/002-two-nodes.ts @@ -17,7 +17,7 @@ describe('Run', () => { apps[0].config.seedPeers.push(apps[1].myRequestAddr); apps[1].config.seedPeers.push(apps[0].myRequestAddr); - await Promise.all(apps.map((app) => app.start(false))); + await Promise.all(apps.map((app) => app.start({ waitForReady: false }))); }); afterAll(async () => { diff --git a/__tests__/run/004-simple-docker-test.ts b/__tests__/run/004-simple-docker-test.ts index 32496ac..e91b67c 100644 --- a/__tests__/run/004-simple-docker-test.ts +++ b/__tests__/run/004-simple-docker-test.ts @@ -1,87 +1,13 @@ import Docker from 'dockerode'; import { describe, it, beforeAll, afterAll, expect } from '@jest/globals'; +import Debug from 'debug'; + +const debug = Debug('rz:test:docker-smoke'); // Simple test to verify Docker is working describe('Docker Smoke Test', () => { let docker: Docker; let container: any; - beforeAll(async () => { - console.log('Setting up Docker client...'); - docker = new Docker(); - - // Verify Docker is running - try { - await docker.ping(); - console.log('Docker daemon is responding'); - } catch (error) { - console.error('Docker daemon is not responding:', error); - throw error; - } - }); - - it('should run a simple container', async () => { - console.log('Starting test container...'); - - // Pull the hello-world image - try { - await new Promise((resolve, reject) => { - docker.pull('hello-world:latest', (err: Error | null, stream: NodeJS.ReadableStream) => { - if (err) return reject(err); - - docker.modem.followProgress(stream, (err: Error | null) => { - if (err) return reject(err); - resolve(); - }); - }); - }); - - console.log('Successfully pulled hello-world image'); - - // Create and start a container - container = await docker.createContainer({ - Image: 'hello-world:latest', - Tty: false - }); - - console.log(`Created container with ID: ${container.id}`); - - // Start the container - await container.start(); - console.log('Started container'); - - // Wait for container to finish - await container.wait(); - console.log('Container finished execution'); - - // Get container logs - const logs = await container.logs({ - stdout: true, - stderr: true - }); - - const logOutput = logs.toString(); - console.log('Container logs:', logOutput); - - // Verify the expected output is in the logs - expect(logOutput).toContain('Hello from Docker!'); - - } catch (error) { - console.error('Error running container:', error); - throw error; - } - }, 30000); // 30 second timeout - - afterAll(async () => { - // Clean up container if it was created - if (container) { - try { - console.log(`Removing container ${container.id}...`); - await container.remove({ force: true }); - console.log('Container removed'); - } catch (error) { - console.error('Error removing container:', error); - } - } - }); + }); diff --git a/__tests__/run/005-docker-orchestrator-v2.ts b/__tests__/run/005-docker-orchestrator-v2.ts index e7a0af8..6a94ff4 100644 --- a/__tests__/run/005-docker-orchestrator-v2.ts +++ b/__tests__/run/005-docker-orchestrator-v2.ts @@ -1,7 +1,11 @@ import Docker from 'dockerode'; import { describe, it, beforeAll, afterAll, expect, jest } from '@jest/globals'; +import Debug from 'debug'; + +const debug = Debug('rz:test:docker-orchestrator-v2'); import { createOrchestrator } from '../../src/orchestration'; import type { NodeOrchestrator, NodeConfig, NodeHandle, NodeStatus } from '../../src/orchestration'; +import { ImageManager } from '../../src/orchestration/docker-orchestrator/managers/image-manager'; // Extend the NodeOrchestrator type to include the docker client for DockerOrchestrator interface DockerOrchestrator extends NodeOrchestrator { @@ -37,7 +41,7 @@ describe('Docker Orchestrator V2', () => { let node2Port: number; beforeAll(async () => { - console.log('Setting up Docker client and orchestrator...'); + debug('Setting up Docker client and orchestrator...'); // Initialize Docker client docker = new Docker(); @@ -45,15 +49,15 @@ describe('Docker Orchestrator V2', () => { // Verify Docker is running try { await docker.ping(); - console.log('✅ Docker daemon is responding'); + debug('Docker daemon is responding'); } catch (error) { - console.error('❌ Docker daemon is not responding:', error); + debug('Docker daemon is not responding: %o', error); throw error; } // Initialize the orchestrator with the Docker client and test image orchestrator = createOrchestrator('docker') as DockerOrchestrator; - console.log('✅ Docker orchestrator initialized'); + debug('Docker orchestrator initialized'); // Create a basic node config for testing nodePort = 3000 + Math.floor(Math.random() * 1000); @@ -67,39 +71,42 @@ describe('Docker Orchestrator V2', () => { } }; - console.log(`Test node configured with ID: ${nodeConfig.id}, port: ${nodePort}`); - }, 300000); // 5 minute timeout for setup + debug(`Test node configured with ID: ${nodeConfig.id}, port: ${nodePort}`); + + const imageManager = new ImageManager(); + await imageManager.buildTestImage(); + }); // 30 second timeout afterAll(async () => { - console.log('Starting test cleanup...'); + debug('Starting test cleanup...'); const cleanupPromises: Promise[] = []; // Helper function to clean up a node with retries const cleanupNode = async (nodeToClean: NodeHandle | null, nodeName: string) => { if (!nodeToClean) return; - console.log(`[${nodeName}] Starting cleanup for node ${nodeToClean.id}...`); + debug(`[${nodeName}] Starting cleanup for node ${nodeToClean.id}...`); try { // First try the normal stop await orchestrator.stopNode(nodeToClean).catch(error => { - console.warn(`[${nodeName}] Warning stopping node normally:`, error.message); + debug(`[${nodeName}] Warning stopping node normally: %s`, error.message); throw error; // Will be caught by outer catch }); - console.log(`✅ [${nodeName}] Node ${nodeToClean.id} stopped gracefully`); + debug(`[${nodeName}] Node ${nodeToClean.id} stopped gracefully`); } catch (error) { - console.error(`❌ [${nodeName}] Error stopping node ${nodeToClean.id}:`, error); + debug(`[${nodeName}] Error stopping node ${nodeToClean.id}: %o`, error); // If normal stop fails, try force cleanup try { - console.log(`[${nodeName}] Attempting force cleanup for node ${nodeToClean.id}...`); + debug(`[${nodeName}] Attempting force cleanup for node ${nodeToClean.id}...`); const container = orchestrator.docker.getContainer(`rhizome-${nodeToClean.id}`); await container.stop({ t: 1 }).catch(() => { - console.warn(`[${nodeName}] Container stop timed out, forcing removal...`); + debug(`[${nodeName}] Container stop timed out, forcing removal...`); }); await container.remove({ force: true }); - console.log(`✅ [${nodeName}] Node ${nodeToClean.id} force-removed`); + debug(`[${nodeName}] Node ${nodeToClean.id} force-removed`); } catch (forceError) { - console.error(`❌ [${nodeName}] Force cleanup failed for node ${nodeToClean.id}:`, forceError); + debug(`[${nodeName}] Force cleanup failed for node ${nodeToClean.id}: %o`, forceError); } } }; @@ -115,11 +122,11 @@ describe('Docker Orchestrator V2', () => { // Wait for all node cleanups to complete before cleaning up networks if (cleanupPromises.length > 0) { - console.log('Waiting for node cleanups to complete...'); + debug('Waiting for node cleanups to complete...'); await Promise.race([ Promise.all(cleanupPromises), new Promise(resolve => setTimeout(() => { - console.warn('Node cleanup timed out, proceeding with network cleanup...'); + debug('Node cleanup timed out, proceeding with network cleanup...'); resolve(null); }, 30000)) // 30s timeout for node cleanup ]); @@ -127,11 +134,11 @@ describe('Docker Orchestrator V2', () => { // Clean up any dangling networks using NetworkManager try { - console.log('Cleaning up networks...'); + debug('Cleaning up networks...'); // Get the network manager from the orchestrator const networkManager = (orchestrator as any).networkManager; if (!networkManager) { - console.warn('Network manager not available for cleanup'); + debug('Network manager not available for cleanup'); return; } @@ -143,20 +150,20 @@ describe('Docker Orchestrator V2', () => { // Log any cleanup errors cleanupResults.forEach(({ resource, error }: { resource: string; error: Error }) => { if (error) { - console.error(`❌ Failed to clean up network ${resource || 'unknown'}:`, error.message); + debug(`Failed to clean up network ${resource || 'unknown'}: %s`, error.message); } else { - console.log(`✅ Successfully cleaned up network ${resource || 'unknown'}`); + debug(`Successfully cleaned up network ${resource || 'unknown'}`); } }); } catch (error) { - console.error('Error during network cleanup:', error); + debug('Error during network cleanup: %o', error); } - console.log('✅ All test cleanups completed'); + debug('All test cleanups completed'); }, 120000); // 2 minute timeout for afterAll it('should start and stop a node', async () => { - console.log('Starting test: should start and stop a node'); + debug('Starting test: should start and stop a node'); // Create a new config with a unique ID for this test const testNodeConfig = { @@ -169,17 +176,17 @@ describe('Docker Orchestrator V2', () => { }; // Start a node - console.log('Starting node...'); + debug('Starting node...'); const testNode = await orchestrator.startNode(testNodeConfig); expect(testNode).toBeDefined(); expect(testNode.id).toBeDefined(); - console.log(`✅ Node started with ID: ${testNode.id}`); + debug(`✅ Node started with ID: ${testNode.id}`); try { // Verify the node is running const status = await testNode.status(); expect(status).toBeDefined(); - console.log(`Node status: ${JSON.stringify(status)}`); + debug('Node status: %o', status); // Verify we can access the health endpoint const apiUrl = testNode.getApiUrl?.(); @@ -191,21 +198,21 @@ describe('Docker Orchestrator V2', () => { } // Stop the node - console.log('Stopping node...'); + debug('Stopping node...'); await orchestrator.stopNode(testNode); - console.log('✅ Node stopped'); + debug('Node stopped'); } finally { // Ensure node is cleaned up even if test fails try { await orchestrator.stopNode(testNode).catch(() => {}); } catch (e) { - console.warn('Error during node cleanup:', e); + debug('Error during node cleanup: %o', e); } } }, 30000); // 30 second timeout for this test it('should enforce resource limits', async () => { - console.log('Starting test: should enforce resource limits'); + debug('Starting test: should enforce resource limits'); // Create a new node with a unique ID for this test const testNodeConfig = { @@ -226,7 +233,7 @@ describe('Docker Orchestrator V2', () => { try { // Start the node with resource limits testNode = await orchestrator.startNode(testNodeConfig); - console.log(`✅ Node started with ID: ${testNode.id}`); + debug(`Node started with ID: ${testNode.id}`); // Get container info to verify resource limits const status = await testNode.status() as ExtendedNodeStatus; @@ -251,7 +258,7 @@ describe('Docker Orchestrator V2', () => { const containerInfo = await container.inspect(); // Log container info for debugging - console.log('Container info:', { + debug('Container info: %o', { Memory: containerInfo.HostConfig?.Memory, NanoCpus: containerInfo.HostConfig?.NanoCpus, CpuQuota: containerInfo.HostConfig?.CpuQuota, @@ -275,14 +282,14 @@ describe('Docker Orchestrator V2', () => { expect(actualCpuNano).toBe(expectedCpuNano); } - console.log('✅ Resource limits verified'); + debug('Resource limits verified'); } finally { // Clean up the test node if (testNode) { try { await orchestrator.stopNode(testNode); } catch (e) { - console.warn('Error cleaning up test node:', e); + debug('Error cleaning up test node: %o', e); } } } @@ -291,7 +298,7 @@ describe('Docker Orchestrator V2', () => { it('should expose API endpoints', async () => { // Set a longer timeout for this test (5 minutes) jest.setTimeout(300000); - console.log('Starting test: should expose API endpoints'); + debug('Starting test: should expose API endpoints'); // Create a new node with a unique ID for this test const testNodeConfig = { @@ -305,9 +312,9 @@ describe('Docker Orchestrator V2', () => { }; // Start the node - console.log('Attempting to start node with config:', JSON.stringify(testNodeConfig, null, 2)); + debug('Attempting to start node with config: %o', testNodeConfig); const node = await orchestrator.startNode(testNodeConfig); - console.log(`✅ Node started with ID: ${node.id}`); + debug(`Node started with ID: ${node.id}`); const apiUrl = node.getApiUrl?.(); // Helper function to test API endpoint with retries @@ -316,7 +323,7 @@ describe('Docker Orchestrator V2', () => { for (let attempt = 1; attempt <= maxRetries; attempt++) { try { - console.log(`Attempt ${attempt}/${maxRetries} - Testing ${endpoint}`); + debug(`Attempt ${attempt}/${maxRetries} - Testing ${endpoint}`); const controller = new AbortController(); const timeout = setTimeout(() => controller.abort(), 5000); const response = await fetch(`${apiUrl}${endpoint}`, { @@ -329,7 +336,7 @@ describe('Docker Orchestrator V2', () => { clearTimeout(timeout); if (response.status === expectedStatus) { - console.log(`✅ ${endpoint} returned status ${response.status}`); + debug(`${endpoint} returned status ${response.status}`); return await response.json().catch(() => ({})); } @@ -337,7 +344,7 @@ describe('Docker Orchestrator V2', () => { throw new Error(`Expected status ${expectedStatus}, got ${response.status}: ${errorText}`); } catch (error) { lastError = error as Error; - console.warn(`Attempt ${attempt} failed:`, error); + debug(`Attempt ${attempt} failed: %o`, error); if (attempt < maxRetries) { await new Promise(resolve => setTimeout(resolve, retryDelay * attempt)); @@ -350,12 +357,12 @@ describe('Docker Orchestrator V2', () => { try { // Test the health endpoint - console.log('Testing health endpoint...'); + debug('Testing health endpoint...'); const healthData = await testApiEndpoint('/health'); expect(healthData).toHaveProperty('status'); expect(healthData.status).toBe('ok'); - console.log('✅ All API endpoints verified'); + debug('All API endpoints verified'); } catch (error) { // Log container logs if available try { @@ -365,9 +372,9 @@ describe('Docker Orchestrator V2', () => { stderr: true, tail: 100 }); - console.error('Container logs:', logs.toString('utf8')); + debug('Container logs: %s', logs.toString('utf8')); } catch (logError) { - console.error('Failed to get container logs:', logError); + debug('Failed to get container logs: %o', logError); } throw error; @@ -375,7 +382,7 @@ describe('Docker Orchestrator V2', () => { }); it.skip('should connect two nodes', async () => { - console.log('Starting test: should connect two nodes'); + debug('Starting test: should connect two nodes'); // Create unique configs for both nodes const node1Port = 3000 + Math.floor(Math.random() * 1000); @@ -415,9 +422,9 @@ describe('Docker Orchestrator V2', () => { try { // Start first node - console.log('Starting node 1...'); + debug('Starting node 1...'); node1 = await orchestrator.startNode(node1Config); - console.log(`✅ Node 1 started with ID: ${node1.id}`); + debug(`Node 1 started with ID: ${node1.id}`); // Get node 1's status and API URL const status1 = await node1.status() as ExtendedNodeStatus; @@ -430,9 +437,9 @@ describe('Docker Orchestrator V2', () => { } // Start second node - console.log('Starting node 2...'); + debug('Starting node 2...'); node2 = await orchestrator.startNode(node2Config); - console.log(`✅ Node 2 started with ID: ${node2.id}`); + debug(`Node 2 started with ID: ${node2.id}`); // Get node 2's status const status2 = await node2.status() as ExtendedNodeStatus; @@ -442,7 +449,7 @@ describe('Docker Orchestrator V2', () => { expect(status1).toBeDefined(); expect(status2).toBeDefined(); // TODO: this status check is inadequate - console.log('✅ Both nodes are running'); + debug('Both nodes are running'); // Helper function to wait for peers const waitForPeers = async (nodeHandle: NodeHandle, expectedPeerCount = 1, maxAttempts = 10) => { @@ -451,18 +458,18 @@ describe('Docker Orchestrator V2', () => { const peerCount = status.network?.peers?.length || 0; if (peerCount >= expectedPeerCount) { - console.log(`✅ Found ${peerCount} peers after ${i + 1} attempts`); + debug(`Found ${peerCount} peers after ${i + 1} attempts`); return true; } - console.log(`Waiting for peers... (attempt ${i + 1}/${maxAttempts})`); + debug(`Waiting for peers... (attempt ${i + 1}/${maxAttempts})`); await new Promise(resolve => setTimeout(resolve, 1000)); } return false; }; // Wait for nodes to discover each other - console.log('Waiting for nodes to discover each other...'); + debug('Waiting for nodes to discover each other...'); const node1Discovered = await waitForPeers(node1); const node2Discovered = await waitForPeers(node2); @@ -471,16 +478,16 @@ describe('Docker Orchestrator V2', () => { const finalStatus2 = await node2.status() as ExtendedNodeStatus; // Log peer information - console.log('Node 1 discovered:', node1Discovered); - console.log('Node 2 discovered:', node2Discovered); - console.log('Node 1 peers:', finalStatus1.network?.peers || 'none'); - console.log('Node 2 peers:', finalStatus2.network?.peers || 'none'); - console.log('Node 1 bootstrapPeers:', finalStatus1.network?.bootstrapPeers || 'none'); - console.log('Node 2 bootstrapPeers:', finalStatus2.network?.bootstrapPeers || 'none'); + debug('Node 1 discovered: %o', node1Discovered); + debug('Node 2 discovered: %o', node2Discovered); + debug('Node 1 peers: %o', finalStatus1.network?.peers || 'none'); + debug('Node 2 peers: %o', finalStatus2.network?.peers || 'none'); + debug('Node 1 bootstrapPeers: %o', finalStatus1.network?.bootstrapPeers || 'none'); + debug('Node 2 bootstrapPeers: %o', finalStatus2.network?.bootstrapPeers || 'none'); // Log the addresses for debugging - console.log('Node 1 address:', finalStatus1.network?.address); - console.log('Node 2 address:', finalStatus2.network?.address); + debug('Node 1 address: %o', finalStatus1.network?.address); + debug('Node 2 address: %o', finalStatus2.network?.address); // Verify both nodes have network configuration expect(finalStatus1.network).toBeDefined(); @@ -490,32 +497,32 @@ describe('Docker Orchestrator V2', () => { // For now, we'll just verify that both nodes are running and have network info // In a real test, you would want to verify actual communication between nodes - console.log('✅ Both nodes are running with network configuration'); + debug('✅ Both nodes are running with network configuration'); } finally { // Clean up nodes const cleanupPromises = []; if (node1) { - console.log('Stopping node 1...'); + debug('Stopping node 1...'); cleanupPromises.push( orchestrator.stopNode(node1).catch(e => - console.warn('Error stopping node 1:', e) + debug('Error stopping node 1: %o', e) ) ); } if (node2) { - console.log('Stopping node 2...'); + debug('Stopping node 2...'); cleanupPromises.push( orchestrator.stopNode(node2).catch(e => - console.warn('Error stopping node 2:', e) + debug('Error stopping node 2: %o', e) ) ); } await Promise.all(cleanupPromises); - console.log('✅ Both nodes stopped'); + debug('✅ Both nodes stopped'); } // Note: In a real test with actual peer connections, we would verify the connection diff --git a/package.json b/package.json index dcb368c..0c23c35 100644 --- a/package.json +++ b/package.json @@ -24,12 +24,19 @@ "/__tests__/jest-setup.ts" ], "transform": { - "^.+\\.tsx?$": [ + "^\\.tsx?$": [ "ts-jest", { "tsconfig": "tsconfig.json" } ] + }, + "detectOpenHandles": true, + "forceExit": true, + "verbose": true, + "testEnvironmentOptions": { + "NODE_ENV": "test", + "DEBUG": "rz:*" } }, "author": "Taliesin (Ladd) ", diff --git a/scratch/jsonlogic.ts b/scratch/jsonlogic.ts index 73c1c62..eb12446 100644 --- a/scratch/jsonlogic.ts +++ b/scratch/jsonlogic.ts @@ -1,13 +1,20 @@ +import Debug from 'debug'; import jsonLogic from 'json-logic-js'; + +const debug = Debug('rz:scratch:jsonlogic'); const { apply } = jsonLogic; -console.log(apply({"map":[ +// Example of using jsonLogic's map operation +const mapResult = apply({"map":[ {"var":"integers"}, {"*":[{"var":""},2]} -]}, {"integers":[1,2,3,4,5]})); +]}, {"integers":[1,2,3,4,5]}); +debug('Map result: %o', mapResult); -console.log(apply({"reduce":[ +// Example of using jsonLogic's reduce operation +const reduceResult = apply({"reduce":[ {"var":"integers"}, {"+":[{"var":"current"}, {"var":"accumulator"}]}, 0 -]}, {"integers":[1,2,3,4,5]})); +]}, {"integers":[1,2,3,4,5]}); +debug('Reduce result: %o', reduceResult); diff --git a/src/http/api.ts b/src/http/api.ts index 1e4d322..6eb03e1 100644 --- a/src/http/api.ts +++ b/src/http/api.ts @@ -1,8 +1,11 @@ +import Debug from 'debug'; import express, {Router} from "express"; import {Collection} from "../collections"; import {Delta, DeltaFilter} from "../core"; import {RhizomeNode} from "../node"; +const debug = Debug('rz:http:api'); + export class HttpApi { router = Router(); @@ -169,7 +172,7 @@ export class HttpApi { if (maxResults) options.maxResults = maxResults; if (deltaFilter) { // Note: deltaFilter would need to be serialized/deserialized properly in a real implementation - console.warn('deltaFilter not supported in HTTP API yet'); + debug('deltaFilter not supported in HTTP API yet'); } const result = await this.rhizomeNode.queryEngine.query(schemaId, filter, options); diff --git a/src/orchestration/docker-orchestrator/index.ts b/src/orchestration/docker-orchestrator/index.ts index b9762b9..021a60c 100644 --- a/src/orchestration/docker-orchestrator/index.ts +++ b/src/orchestration/docker-orchestrator/index.ts @@ -8,6 +8,9 @@ import { ResourceManager } from './managers/resource-manager'; import { StatusManager } from './managers/status-manager'; import { ImageManager } from './managers/image-manager'; import { getRandomPort } from './utils/port-utils'; +import Debug from 'debug'; + +const debug = Debug('rz:docker:orchestrator'); const DEFAULT_OPTIONS: DockerOrchestratorOptions = { image: 'rhizome-node-test', @@ -33,13 +36,12 @@ export class DockerOrchestrator extends BaseOrchestrator { super(); this.options = { ...DEFAULT_OPTIONS, ...options }; - // Initialize Docker client in managers - const dockerOptions = this.options.dockerOptions || {}; - this.containerManager = new ContainerManager(dockerOptions); - this.networkManager = new NetworkManager(dockerOptions); + // Initialize managers + this.containerManager = new ContainerManager(); + this.networkManager = new NetworkManager(); this.resourceManager = new ResourceManager(); this.statusManager = new StatusManager(); - this.imageManager = new ImageManager(dockerOptions); + this.imageManager = new ImageManager(); } /** @@ -117,7 +119,7 @@ export class DockerOrchestrator extends BaseOrchestrator { await this.containerManager.removeContainer(container); } catch (error) { const errorMessage = error instanceof Error ? error.message : 'Unknown error'; - console.warn(`Error managing container ${nodeId}:`, errorMessage); + debug(`Error managing container ${nodeId}: %s`, errorMessage); // Continue with cleanup even if container operations fail } @@ -128,7 +130,7 @@ export class DockerOrchestrator extends BaseOrchestrator { await this.networkManager.removeNetwork(network.id); } catch (error) { const errorMessage = error instanceof Error ? error.message : 'Unknown error'; - console.warn(`Error removing network for node ${nodeId}:`, errorMessage); + debug(`Error removing network for node ${nodeId}: %s`, errorMessage); } finally { this.networks.delete(nodeId); } @@ -141,10 +143,10 @@ export class DockerOrchestrator extends BaseOrchestrator { this.containers.delete(nodeId); this.nodeHandles.delete(nodeId); - console.log(`Stopped and cleaned up node ${nodeId}`); + debug(`Stopped and cleaned up node ${nodeId}`); } catch (error) { const errorMessage = error instanceof Error ? error.message : 'Unknown error'; - console.error(`Error during cleanup of node ${nodeId}:`, errorMessage); + debug(`Error during cleanup of node ${nodeId}: %s`, errorMessage); throw new Error(`Failed to stop node ${nodeId}: ${errorMessage}`); } } @@ -164,7 +166,7 @@ export class DockerOrchestrator extends BaseOrchestrator { (logStream as { end: () => void }).end(); } } catch (error) { - console.warn(`Error cleaning up log stream for node ${nodeId}:`, error); + debug(`Error cleaning up log stream for node ${nodeId}: %o`, error); } finally { this.containerLogStreams.delete(nodeId); } @@ -200,7 +202,7 @@ export class DockerOrchestrator extends BaseOrchestrator { return await this.statusManager.getNodeStatus(handle, container); } catch (error) { const errorMessage = error instanceof Error ? error.message : 'Unknown error'; - console.error(`Error getting status for node ${handle.id}:`, errorMessage); + debug(`Error getting status for node ${handle.id}: %s`, errorMessage); return { id: handle.id, @@ -229,7 +231,7 @@ export class DockerOrchestrator extends BaseOrchestrator { // 1. Create separate networks for each partition // 2. Connect containers to their respective partition networks // 3. Disconnect them from other networks - console.warn('Network partitioning not fully implemented'); + debug('Network partitioning not fully implemented'); } /** @@ -252,9 +254,9 @@ export class DockerOrchestrator extends BaseOrchestrator { memorySwap: limits.memory // Default to same as memory limit if not specified }); - console.log(`Updated resource limits for node ${handle.id}:`, limits); + debug(`Updated resource limits for node %s: %o`, handle.id, limits); } catch (error) { - console.error(`Failed to update resource limits for node ${handle.id}:`, error); + debug(`Failed to update resource limits for node ${handle.id}: %o`, error); throw new Error(`Failed to update resource limits: ${error instanceof Error ? error.message : 'Unknown error'}`); } } @@ -296,7 +298,7 @@ export class DockerOrchestrator extends BaseOrchestrator { // Update the network ID in the second handle dockerHandle2.networkId = networkId; } catch (error) { - console.error(`Error connecting nodes ${handle1.id} and ${handle2.id}:`, error); + debug(`Error connecting nodes ${handle1.id} and ${handle2.id}: %o`, error); throw error; } } @@ -308,7 +310,7 @@ export class DockerOrchestrator extends BaseOrchestrator { * @private */ private async cleanupFailedStart(nodeId: string): Promise { - console.log(`Cleaning up failed start for node ${nodeId}...`); + debug(`Cleaning up failed start for node ${nodeId}...`); // Get references to resources before starting cleanup const container = this.containers.get(nodeId); @@ -337,23 +339,23 @@ export class DockerOrchestrator extends BaseOrchestrator { // Log any errors that occurred during cleanup if (containerErrors.length > 0) { - console.warn(`Encountered ${containerErrors.length} error(s) while cleaning up containers for node ${nodeId}:`); + debug(`Encountered ${containerErrors.length} error(s) while cleaning up containers for node ${nodeId}:`); containerErrors.forEach(({ resource, error }) => { console.warn(`- ${resource}:`, error instanceof Error ? error.message : 'Unknown error'); }); } if (networkErrors.length > 0) { - console.warn(`Encountered ${networkErrors.length} error(s) while cleaning up networks for node ${nodeId}:`); + debug(`Encountered ${networkErrors.length} error(s) while cleaning up networks for node ${nodeId}:`); networkErrors.forEach(({ resource, error }) => { console.warn(`- ${resource}:`, error instanceof Error ? error.message : 'Unknown error'); }); } - console.log(`Completed cleanup for node ${nodeId}`); + debug(`Completed cleanup for node ${nodeId}`); } catch (error) { const errorMessage = error instanceof Error ? error.message : 'Unknown error'; - console.error(`Unexpected error during cleanup of node ${nodeId}:`, errorMessage); + debug(`Unexpected error during cleanup of node ${nodeId}: %s`, errorMessage); } finally { // Always clean up internal state, even if errors occurred this.containers.delete(nodeId); @@ -379,7 +381,7 @@ export class DockerOrchestrator extends BaseOrchestrator { try { return await this.containerManager.getContainer(containerId); } catch (error) { - console.warn(`Failed to get container ${containerId}:`, error); + debug(`Failed to get container ${containerId}: %o`, error); return undefined; } } @@ -388,7 +390,7 @@ export class DockerOrchestrator extends BaseOrchestrator { * Clean up all resources */ async cleanup(): Promise { - console.log('Starting cleanup of all resources...'); + debug('Starting cleanup of all resources...'); // Create copies of the maps to avoid modification during iteration const containersToCleanup = new Map(this.containers); @@ -396,35 +398,35 @@ export class DockerOrchestrator extends BaseOrchestrator { try { // First, clean up all containers - console.log('Stopping and removing all containers...'); + debug('Stopping and removing all containers...'); const containerErrors = await this.containerManager.cleanupContainers(containersToCleanup); // Wait a short time to ensure all container cleanup is complete await new Promise(resolve => setTimeout(resolve, 1000)); // Then clean up all networks - console.log('Removing all networks...'); + debug('Removing all networks...'); const networkErrors = await this.networkManager.cleanupNetworks(networksToCleanup); // Log any errors that occurred during cleanup if (containerErrors.length > 0) { - console.warn(`Encountered ${containerErrors.length} error(s) while cleaning up containers:`); + debug(`Encountered ${containerErrors.length} error(s) while cleaning up containers:`); containerErrors.forEach(({ resource, error }) => { console.warn(`- ${resource}:`, error instanceof Error ? error.message : 'Unknown error'); }); } if (networkErrors.length > 0) { - console.warn(`Encountered ${networkErrors.length} error(s) while cleaning up networks:`); + debug(`Encountered ${networkErrors.length} error(s) while cleaning up networks:`); networkErrors.forEach(({ resource, error }) => { console.warn(`- ${resource}:`, error instanceof Error ? error.message : 'Unknown error'); }); } - console.log('Completed cleanup of all resources'); + debug('Completed cleanup of all resources'); } catch (error) { const errorMessage = error instanceof Error ? error.message : 'Unknown error'; - console.error('Unexpected error during cleanup:', errorMessage); + debug('Unexpected error during cleanup: %s', errorMessage); throw error; // Re-throw to allow callers to handle the error } finally { // Always clear internal state, even if errors occurred diff --git a/src/orchestration/docker-orchestrator/managers/container-manager.ts b/src/orchestration/docker-orchestrator/managers/container-manager.ts index a14f3a6..0ac98a6 100644 --- a/src/orchestration/docker-orchestrator/managers/container-manager.ts +++ b/src/orchestration/docker-orchestrator/managers/container-manager.ts @@ -1,12 +1,15 @@ +import Debug from 'debug'; import Docker, { Container, DockerOptions } from 'dockerode'; import { IContainerManager } from './interfaces'; import { NodeConfig, NodeStatus } from '../../types'; +const debug = Debug('rz:docker:container-manager'); + export class ContainerManager implements IContainerManager { private docker: Docker; - constructor(dockerOptions?: DockerOptions) { - this.docker = new Docker(dockerOptions); + constructor() { + this.docker = new Docker(); } async createContainer( @@ -28,17 +31,56 @@ export class ContainerManager implements IContainerManager { // Add resource limits if specified if (config.resources) { if (config.resources.cpu) { - // Ensure CpuShares is an integer (Docker requires this) - hostConfig.CpuShares = Math.floor(config.resources.cpu * 1024); // Convert to relative CPU shares (1024 = 1 CPU) - hostConfig.NanoCpus = Math.floor(config.resources.cpu * 1e9); // Convert to nanoCPUs (1e9 = 1 CPU) + // Convert CPU cores to nanoCPUs (1 CPU = 1e9 nanoCPUs) + hostConfig.NanoCpus = config.resources.cpu * 1e9; } + if (config.resources.memory) { - hostConfig.Memory = Math.floor(config.resources.memory * 1024 * 1024); // Convert MB to bytes - hostConfig.MemorySwap = hostConfig.Memory; // Disable swap + hostConfig.Memory = config.resources.memory * 1024 * 1024; // Convert MB to bytes + hostConfig.MemorySwap = config.resources.memorySwap + ? config.resources.memorySwap * 1024 * 1024 + : config.resources.memory * 2 * 1024 * 1024; // Default swap to 2x memory } } + + // Increase file descriptor limits + hostConfig.Ulimits = [ + { + Name: 'nofile', + Soft: 65536, + Hard: 65536 + } + ]; + + // Set environment variables to optimize performance and disable file watching + const envVars = [ + // Node.js and memory settings + `NODE_OPTIONS=--max-old-space-size=${Math.floor((config.resources?.memory || 512) * 0.8)}`, + 'NODE_ENV=test', + + // Network configuration + `RHIZOME_HTTP_BIND_PORT=${config.network?.port || 3000}`, + 'RHIZOME_HTTP_BIND_ADDR=0.0.0.0', + `RHIZOME_REQUEST_BIND_PORT=${config.network?.requestPort || 3001}`, + 'RHIZOME_REQUEST_BIND_ADDR=0.0.0.0', + `RHIZOME_PUBLISH_BIND_PORT=${(config.network?.requestPort || 3001) + 1}`, + 'RHIZOME_PUBLISH_BIND_ADDR=0.0.0.0', + + // Application settings + 'RHIZOME_STORAGE_TYPE=memory', + 'RHIZOME_HTTP_API_ENABLE=true', + `RHIZOME_PEER_ID=${nodeId}`, + + // Disable unnecessary features for testing + 'DISABLE_HTTP_HTML=true', + 'DISABLE_MARKDOWN=true', + + // Debug settings + 'DEBUG=rz:*,rhizome:*,docker:*', + 'DEBUG_COLORS=true' + ]; - // Create container configuration + // Create container configuration with all environment variables const containerConfig: Docker.ContainerCreateOptions = { name: containerName, Image: 'rhizome-node-test', @@ -48,19 +90,13 @@ export class ContainerManager implements IContainerManager { }, HostConfig: hostConfig, Env: [ + ...envVars, 'NODE_ENV=test', 'DEBUG=*', `RHIZOME_HTTP_API_PORT=${config.network?.port || 3000}`, - `RHIZOME_HTTP_API_ADDR=0.0.0.0`, - `RHIZOME_HTTP_API_ENABLE=true`, - `RHIZOME_REQUEST_BIND_PORT=${config.network?.requestPort || 3001}`, - 'RHIZOME_REQUEST_BIND_ADDR=0.0.0.0', - `RHIZOME_PUBLISH_BIND_PORT=${(config.network?.requestPort || 3001) + 1}`, - 'RHIZOME_PUBLISH_BIND_ADDR=0.0.0.0', - 'RHIZOME_STORAGE_TYPE=memory', - `RHIZOME_PEER_ID=${nodeId}`, - // TODO: include seed peers - ], + 'RHIZOME_HTTP_API_ADDR=0.0.0.0', + 'RHIZOME_HTTP_API_ENABLE=true' + ] }; try { @@ -74,9 +110,26 @@ export class ContainerManager implements IContainerManager { async startContainer(container: Container): Promise { try { + debug(`Starting container ${container.id}`); await container.start(); + + // Verify the container is actually running + const containerInfo = await this.verifyContainerRunning(container); + debug(`Container ${container.id} started with status:`, containerInfo.State); + } catch (error) { - throw new Error(`Failed to start container: ${error instanceof Error ? error.message : 'Unknown error'}`); + // Get container logs for debugging + let logs = ''; + try { + logs = await this.getContainerLogs(container); + } catch (logError) { + debug('Failed to get container logs:', logError); + } + + throw new Error( + `Failed to start container: ${error instanceof Error ? error.message : 'Unknown error'}\n` + + `Container logs:\n${logs}` + ); } } @@ -84,7 +137,7 @@ export class ContainerManager implements IContainerManager { try { await container.stop({ t: 1 }); } catch (error) { - console.warn('Error stopping container:', error); + debug('Error stopping container: %o', error); throw error; } } @@ -93,7 +146,7 @@ export class ContainerManager implements IContainerManager { try { await container.remove({ force: true }); } catch (error) { - console.warn('Error removing container:', error); + debug('Error removing container: %o', error); throw error; } } @@ -152,38 +205,38 @@ export class ContainerManager implements IContainerManager { // Process containers in sequence to avoid overwhelming the Docker daemon for (const [nodeId, container] of containers.entries()) { try { - console.log(`[Cleanup] Stopping container ${nodeId}...`); + debug(`[Cleanup] Stopping container ${nodeId}...`); try { // First, try to stop the container gracefully await this.stopContainer(container); - console.log(`[Cleanup] Successfully stopped container ${nodeId}`); + debug(`[Cleanup] Successfully stopped container ${nodeId}`); } catch (stopError) { - console.warn(`[Cleanup] Failed to stop container ${nodeId}:`, stopError); + debug(`[Cleanup] Failed to stop container ${nodeId}: %o`, stopError); // Continue with force removal even if stop failed } // Now remove the container - console.log(`[Cleanup] Removing container ${nodeId}...`); + debug(`[Cleanup] Removing container ${nodeId}...`); await this.removeContainer(container); - console.log(`[Cleanup] Successfully removed container ${nodeId}`); + debug(`[Cleanup] Successfully removed container ${nodeId}`); // Verify the container is actually gone try { const containerInfo = await container.inspect(); - console.warn(`[Cleanup] Container ${nodeId} still exists after removal:`, containerInfo.State?.Status); + debug(`[Cleanup] Container ${nodeId} still exists after removal: %s`, containerInfo.State?.Status); cleanupErrors.push({ resource: `container:${nodeId}`, error: new Error(`Container still exists after removal: ${containerInfo.State?.Status}`) }); } catch (inspectError) { // Expected - container should not exist anymore - console.log(`[Cleanup] Verified container ${nodeId} has been removed`); + debug(`[Cleanup] Verified container ${nodeId} has been removed`); } } catch (error) { const err = error instanceof Error ? error : new Error(String(error)); - console.error(`[Cleanup] Error cleaning up container ${nodeId}:`, err); + debug(`[Cleanup] Error cleaning up container ${nodeId}: %o`, err); cleanupErrors.push({ resource: `container:${nodeId}`, error: err }); } diff --git a/src/orchestration/docker-orchestrator/managers/image-manager.ts b/src/orchestration/docker-orchestrator/managers/image-manager.ts index bacc02d..5d59802 100644 --- a/src/orchestration/docker-orchestrator/managers/image-manager.ts +++ b/src/orchestration/docker-orchestrator/managers/image-manager.ts @@ -4,6 +4,9 @@ import { promises as fs } from 'fs'; import * as tar from 'tar-fs'; import { Headers } from 'tar-fs'; import { IImageManager } from './interfaces'; +import Debug from 'debug'; + +const debug = Debug('rz:docker:image-manager'); // Global promise to track test image build let testImageBuildPromise: Promise | null = null; @@ -11,8 +14,8 @@ let testImageBuildPromise: Promise | null = null; export class ImageManager implements IImageManager { private docker: Docker; - constructor(dockerOptions?: DockerOptions) { - this.docker = new Docker(dockerOptions); + constructor() { + this.docker = new Docker(); } /** @@ -20,17 +23,17 @@ export class ImageManager implements IImageManager { */ async buildTestImage(imageName: string = 'rhizome-node-test'): Promise { if (testImageBuildPromise) { - console.log('Test image build in progress, reusing existing build promise...'); + debug('Test image build in progress, reusing existing build promise...'); return testImageBuildPromise; } - console.log('Building test Docker image...'); + debug('Building test Docker image...'); const dockerfilePath = path.join(process.cwd(), 'Dockerfile.test'); // Verify Dockerfile exists try { await fs.access(dockerfilePath); - console.log(`Found Dockerfile at: ${dockerfilePath}`); + debug(`Found Dockerfile at: %s`, dockerfilePath); } catch (err) { throw new Error(`Dockerfile not found at ${dockerfilePath}: ${err}`); } @@ -57,7 +60,7 @@ export class ImageManager implements IImageManager { } }); - console.log('Created build context tar stream'); + debug('Created build context tar stream'); testImageBuildPromise = new Promise((resolve, reject) => { const logMessages: string[] = []; @@ -123,7 +126,7 @@ export class ImageManager implements IImageManager { } catch (e) { const errorMsg = `Error processing build output: ${e}\nRaw output: ${chunkStr}`; log(`❌ ${errorMsg}`); - console.error(errorMsg); + debug('Docker build error: %s', errorMsg); } }); diff --git a/src/orchestration/docker-orchestrator/managers/network-manager.ts b/src/orchestration/docker-orchestrator/managers/network-manager.ts index ef611c0..e876fbb 100644 --- a/src/orchestration/docker-orchestrator/managers/network-manager.ts +++ b/src/orchestration/docker-orchestrator/managers/network-manager.ts @@ -1,12 +1,15 @@ -import Docker, { Network, NetworkInspectInfo, DockerOptions } from 'dockerode'; +import Debug from 'debug'; +import Docker, { Network, NetworkInspectInfo } from 'dockerode'; import { INetworkManager } from './interfaces'; +const debug = Debug('rz:docker:network-manager'); + export class NetworkManager implements INetworkManager { private networks: Map = new Map(); private docker: Docker; - constructor(dockerOptions?: DockerOptions) { - this.docker = new Docker(dockerOptions); + constructor() { + this.docker = new Docker(); } async createNetwork(nodeId: string): Promise { @@ -25,7 +28,7 @@ export class NetworkManager implements INetworkManager { this.networks.set(nodeId, network); return network; } catch (error) { - console.error(`Error creating network for node ${nodeId}:`, error); + debug(`Error creating network for node ${nodeId}: %o`, error); throw error; } } @@ -43,7 +46,7 @@ export class NetworkManager implements INetworkManager { } } } catch (error) { - console.warn(`Failed to remove network ${networkId}:`, error); + debug(`Failed to remove network ${networkId}: %o`, error); throw error; } } @@ -62,7 +65,7 @@ export class NetworkManager implements INetworkManager { } }); } catch (error) { - console.error(`Failed to connect container ${containerId} to network ${networkId}:`, error); + debug(`Failed to connect container ${containerId} to network ${networkId}: %o`, error); throw error; } } @@ -72,7 +75,7 @@ export class NetworkManager implements INetworkManager { const network = this.docker.getNetwork(networkId); await network.disconnect({ Container: containerId }); } catch (error) { - console.warn(`Failed to disconnect container ${containerId} from network ${networkId}:`, error); + debug(`Failed to disconnect container ${containerId} from network ${networkId}: %o`, error); throw error; } } @@ -93,7 +96,7 @@ export class NetworkManager implements INetworkManager { const network = this.docker.getNetwork(networkId); return await network.inspect(); } catch (error) { - console.error(`Failed to get network info for ${networkId}:`, error); + debug(`Failed to get network info for ${networkId}: %o`, error); throw error; } } @@ -104,22 +107,22 @@ export class NetworkManager implements INetworkManager { // Process networks in sequence to avoid overwhelming the Docker daemon for (const [nodeId, network] of networks.entries()) { try { - console.log(`[Cleanup] Removing network for node ${nodeId}...`); + debug(`[Cleanup] Removing network for node ${nodeId}...`); // First, inspect the network to see if it has any connected containers try { const networkInfo = await this.getNetworkInfo(network.id); if (networkInfo.Containers && Object.keys(networkInfo.Containers).length > 0) { - console.warn(`[Cleanup] Network ${nodeId} still has ${Object.keys(networkInfo.Containers).length} connected containers`); + debug(`[Cleanup] Network ${nodeId} still has ${Object.keys(networkInfo.Containers).length} connected containers`); // Try to disconnect all containers from the network first for (const containerId of Object.keys(networkInfo.Containers)) { try { - console.log(`[Cleanup] Disconnecting container ${containerId} from network ${nodeId}...`); + debug(`[Cleanup] Disconnecting container ${containerId} from network ${nodeId}...`); await this.disconnectFromNetwork(containerId, network.id); - console.log(`[Cleanup] Successfully disconnected container ${containerId} from network ${nodeId}`); + debug(`[Cleanup] Successfully disconnected container ${containerId} from network ${nodeId}`); } catch (disconnectError) { - console.warn(`[Cleanup] Failed to disconnect container ${containerId} from network ${nodeId}:`, disconnectError); + debug(`[Cleanup] Failed to disconnect container ${containerId} from network ${nodeId}: %o`, disconnectError); // Continue with network removal even if disconnect failed } @@ -128,30 +131,30 @@ export class NetworkManager implements INetworkManager { } } } catch (inspectError) { - console.warn(`[Cleanup] Failed to inspect network ${nodeId} before removal:`, inspectError); + debug(`[Cleanup] Failed to inspect network ${nodeId} before removal: %o`, inspectError); // Continue with removal even if inspect failed } // Now remove the network await this.removeNetwork(network.id); - console.log(`[Cleanup] Successfully removed network for node ${nodeId}`); + debug(`[Cleanup] Successfully removed network for node ${nodeId}`); // Verify the network is actually gone try { - const networkInfo = await this.getNetworkInfo(network.id); - console.warn(`[Cleanup] Network ${nodeId} still exists after removal`); + await this.getNetworkInfo(network.id); + debug(`[Cleanup] Network ${nodeId} still exists after removal`); cleanupErrors.push({ resource: `network:${nodeId}`, error: new Error('Network still exists after removal') }); } catch (inspectError) { // Expected - network should not exist anymore - console.log(`[Cleanup] Verified network ${nodeId} has been removed`); + debug(`[Cleanup] Verified network ${nodeId} has been removed`); } } catch (error) { const err = error instanceof Error ? error : new Error(String(error)); - console.error(`[Cleanup] Error cleaning up network ${nodeId}:`, err); + debug(`[Cleanup] Error cleaning up network ${nodeId}: %o`, err); cleanupErrors.push({ resource: `network:${nodeId}`, error: err }); } diff --git a/src/orchestration/docker-orchestrator/managers/resource-manager.ts b/src/orchestration/docker-orchestrator/managers/resource-manager.ts index a92e509..3988225 100644 --- a/src/orchestration/docker-orchestrator/managers/resource-manager.ts +++ b/src/orchestration/docker-orchestrator/managers/resource-manager.ts @@ -1,7 +1,63 @@ -import Docker, { Container } from 'dockerode'; +import Debug from 'debug'; +import { Container } from 'dockerode'; import { IResourceManager } from './interfaces'; +const debug = Debug('rz:docker:resource-manager'); + +// Define the structure of the Docker stats object +interface ContainerStats { + cpu_stats: { + cpu_usage: { + total_usage: number; + usage_in_kernelmode?: number; + usage_in_usermode?: number; + }; + system_cpu_usage: number; + online_cpus?: number; + throttling_data?: Record; + }; + precpu_stats: { + cpu_usage: { + total_usage: number; + usage_in_kernelmode?: number; + usage_in_usermode?: number; + }; + system_cpu_usage: number; + online_cpus?: number; + throttling_data?: Record; + }; + memory_stats: { + usage?: number; + max_usage?: number; + limit?: number; + stats?: { + total_rss?: number; + [key: string]: unknown; + }; + usage_in_bytes?: number; + limit_in_bytes?: number; + [key: string]: unknown; + }; + [key: string]: unknown; +} + +// Type guard to check if an object is a Node.js ReadableStream +function isReadableStream(obj: unknown): obj is NodeJS.ReadableStream { + return ( + obj !== null && + typeof obj === 'object' && + typeof (obj as any).pipe === 'function' && + typeof (obj as any).on === 'function' + ); +} + export class ResourceManager implements IResourceManager { + private debug = debug.extend('ResourceManager'); + + constructor() { + this.debug('ResourceManager initialized'); + } + async setResourceLimits( container: Container, limits: { @@ -12,16 +68,16 @@ export class ResourceManager implements IResourceManager { ): Promise { try { const updateConfig: any = {}; - + if (limits.cpu !== undefined) { updateConfig.CpuShares = limits.cpu; updateConfig.NanoCpus = limits.cpu * 1e9; // Convert to nanoCPUs } - + if (limits.memory !== undefined) { updateConfig.Memory = limits.memory * 1024 * 1024; // Convert MB to bytes - updateConfig.MemorySwap = limits.memorySwap !== undefined - ? limits.memorySwap * 1024 * 1024 + updateConfig.MemorySwap = limits.memorySwap !== undefined + ? limits.memorySwap * 1024 * 1024 : updateConfig.Memory; // Default to same as memory if not specified } @@ -38,34 +94,176 @@ export class ResourceManager implements IResourceManager { memory: { usage: number; limit: number }; }> { try { + this.debug('Getting container stats...'); + + // Get container stats with stream:false to get a single stats object const stats = await container.stats({ stream: false }); - const statsData = JSON.parse(stats.toString()); - - const cpuDelta = statsData.cpu_stats.cpu_usage.total_usage - (statsData.precpu_stats?.cpu_usage?.total_usage || 0); - const systemDelta = statsData.cpu_stats.system_cpu_usage - (statsData.precpu_stats?.system_cpu_usage || 0); - const cpuCores = statsData.cpu_stats.online_cpus || 1; - - let cpuPercent = 0; - if (systemDelta > 0 && cpuDelta > 0) { - cpuPercent = (cpuDelta / systemDelta) * cpuCores * 100.0; + + // Log the raw stats type and constructor for debugging + this.debug('Raw stats type: %s', typeof stats); + this.debug('Raw stats constructor: %s', stats?.constructor?.name); + + // Handle the response based on its type + let statsData: ContainerStats; + + if (typeof stats === 'string') { + // If it's a string, parse it as JSON + this.debug('Stats is a string, parsing JSON'); + try { + statsData = JSON.parse(stats) as ContainerStats; + } catch (error) { + this.debug('Failed to parse stats JSON: %o', error); + throw new Error(`Failed to parse stats JSON: ${error instanceof Error ? error.message : 'Unknown error'}`); + } + } else if (stats && typeof stats === 'object') { + // Check if it's a Node.js stream using our type guard + if (isReadableStream(stats)) { + this.debug('Stats is a stream, reading data...'); + // Convert the stream to a string and parse as JSON + const statsString = await this.streamToString(stats); + try { + statsData = JSON.parse(statsString) as ContainerStats; + this.debug('Successfully parsed streamed stats'); + } catch (error) { + this.debug('Failed to parse streamed stats: %o', error); + throw new Error(`Failed to parse streamed stats: ${error instanceof Error ? error.message : 'Unknown error'}`); + } + } else { + // If it's already an object, use it directly + this.debug('Stats is a plain object'); + statsData = stats as unknown as ContainerStats; + } + } else { + throw new Error(`Unexpected stats type: ${typeof stats}`); } - - return { - cpu: { - usage: parseFloat(cpuPercent.toFixed(2)), - limit: cpuCores * 100, // Percentage of total CPU - }, - memory: { - usage: statsData.memory_stats.usage || 0, - limit: statsData.memory_stats.limit || 0, - }, - }; - } catch (error) { - console.error('Error getting resource usage:', error); + + // Calculate and return the resource usage + return this.calculateResourceUsage(statsData); + } catch (error: unknown) { + this.debug('Error in getResourceUsage: %o', error); + // Return default values on error return { cpu: { usage: 0, limit: 0 }, memory: { usage: 0, limit: 0 }, }; } } + + /** + * Convert a ReadableStream to a string + */ + private streamToString(stream: NodeJS.ReadableStream): Promise { + return new Promise((resolve, reject) => { + const chunks: Buffer[] = []; + + stream.on('data', (chunk: unknown) => { + if (Buffer.isBuffer(chunk)) { + chunks.push(chunk); + } else if (typeof chunk === 'string') { + chunks.push(Buffer.from(chunk, 'utf8')); + } else if (chunk instanceof Uint8Array) { + chunks.push(Buffer.from(chunk)); + } else { + this.debug('Unexpected chunk type: %s', typeof chunk); + reject(new Error(`Unexpected chunk type: ${typeof chunk}`)); + } + }); + + stream.on('end', () => { + try { + const result = Buffer.concat(chunks).toString('utf8'); + resolve(result); + } catch (error) { + this.debug('Error concatenating chunks: %o', error); + reject(error); + } + }); + + stream.on('error', (error: Error) => { + this.debug('Error reading stream: %o', error); + reject(error); + }); + }); + } + + /** + * Calculate resource usage from stats data + */ + private calculateResourceUsage(statsData: ContainerStats): { + cpu: { usage: number; limit: number }; + memory: { usage: number; limit: number }; + } { + this.debug('Calculating resource usage from stats data'); + + // Log detailed CPU stats if available + if (statsData.cpu_stats) { + this.debug('CPU stats: %o', { + online_cpus: statsData.cpu_stats.online_cpus, + cpu_usage: statsData.cpu_stats.cpu_usage, + system_cpu_usage: statsData.cpu_stats.system_cpu_usage, + }); + } else { + this.debug('No CPU stats available'); + } + + // Log detailed memory stats if available + if (statsData.memory_stats) { + this.debug('Memory stats: %o', { + usage: statsData.memory_stats.usage, + max_usage: statsData.memory_stats.max_usage, + limit: statsData.memory_stats.limit, + stats: statsData.memory_stats.stats, + }); + } else { + this.debug('No memory stats available'); + } + + // Calculate CPU usage percentage + let cpuPercent = 0; + const cpuCores = statsData.cpu_stats?.online_cpus || 1; + + // Check if we have the necessary data for CPU calculation + if (statsData.cpu_stats?.cpu_usage?.total_usage !== undefined && + statsData.precpu_stats?.cpu_usage?.total_usage !== undefined) { + const cpuDelta = statsData.cpu_stats.cpu_usage.total_usage - + (statsData.precpu_stats.cpu_usage.total_usage || 0); + const systemDelta = statsData.cpu_stats.system_cpu_usage - + (statsData.precpu_stats.system_cpu_usage || 0); + + this.debug('CPU delta: %d, System delta: %d', cpuDelta, systemDelta); + + if (systemDelta > 0 && cpuDelta > 0) { + cpuPercent = (cpuDelta / systemDelta) * cpuCores * 100.0; + this.debug('Calculated CPU percent: %d%%', cpuPercent); + } + } else { + this.debug('Insufficient CPU stats data for calculation'); + this.debug('Available CPU stats: %o', statsData.cpu_stats); + this.debug('Available precpu_stats: %o', statsData.precpu_stats); + } + + // Get memory usage with fallbacks + const memoryUsage = statsData.memory_stats?.usage || + statsData.memory_stats?.stats?.total_rss || + statsData.memory_stats?.usage_in_bytes || + 0; + + const memoryLimit = statsData.memory_stats?.limit || + statsData.memory_stats?.max_usage || + statsData.memory_stats?.limit_in_bytes || + 0; + + this.debug('Memory usage: %d / %d bytes', memoryUsage, memoryLimit); + + return { + cpu: { + usage: cpuPercent, + limit: 100, // 100% CPU limit as a percentage + }, + memory: { + usage: memoryUsage, + limit: memoryLimit || 0, // Ensure we don't return undefined + }, + }; + } } diff --git a/src/orchestration/docker-orchestrator/managers/status-manager.ts b/src/orchestration/docker-orchestrator/managers/status-manager.ts index 7853383..fafe851 100644 --- a/src/orchestration/docker-orchestrator/managers/status-manager.ts +++ b/src/orchestration/docker-orchestrator/managers/status-manager.ts @@ -1,6 +1,9 @@ import Docker, { Container } from 'dockerode'; import { IStatusManager } from './interfaces'; import { NodeHandle, NodeStatus } from '../../types'; +import Debug from 'debug'; + +const debug = Debug('rz:docker:status-manager'); const DEFAULT_MAX_ATTEMPTS = 8; const DEFAULT_DELAY_MS = 1000; @@ -13,7 +16,7 @@ export class StatusManager implements IStatusManager { maxAttempts: number = DEFAULT_MAX_ATTEMPTS, initialDelayMs: number = DEFAULT_DELAY_MS ): Promise { - console.log(`[waitForNodeReady] Starting with port ${port}, maxAttempts: ${maxAttempts}, initialDelayMs: ${initialDelayMs}`); + debug(`[waitForNodeReady] Starting with port ${port}, maxAttempts: ${maxAttempts}, initialDelayMs: ${initialDelayMs}`); let lastError: Error | null = null; let attempt = 0; let delay = initialDelayMs; @@ -23,7 +26,7 @@ export class StatusManager implements IStatusManager { const attemptStartTime = Date.now(); try { - console.log(`[Attempt ${attempt}/${maxAttempts}] Verifying container is running...`); + debug(`[Attempt ${attempt}/${maxAttempts}] Verifying container is running...`); // Add timeout to verifyContainerRunning const verifyPromise = this.verifyContainerRunning(container); @@ -32,10 +35,10 @@ export class StatusManager implements IStatusManager { ); await Promise.race([verifyPromise, timeoutPromise]); - console.log(`[Attempt ${attempt}/${maxAttempts}] Container is running`); + debug(`[Attempt ${attempt}/${maxAttempts}] Container is running`); const healthUrl = `http://localhost:${port}/api/health`; - console.log(`[Attempt ${attempt}/${maxAttempts}] Checking health at: ${healthUrl}`); + debug(`[Attempt ${attempt}/${maxAttempts}] Checking health at: ${healthUrl}`); // Add timeout to health check const healthCheckPromise = this.healthCheck(healthUrl); @@ -46,7 +49,7 @@ export class StatusManager implements IStatusManager { const response = await Promise.race([healthCheckPromise, healthCheckTimeout]); if (response.ok) { - console.log(`✅ Node is ready! (Attempt ${attempt}/${maxAttempts})`); + debug(`✅ Node is ready! (Attempt ${attempt}/${maxAttempts})`); return; // Success! } @@ -56,12 +59,12 @@ export class StatusManager implements IStatusManager { lastError = error instanceof Error ? error : new Error(errorMessage); const attemptDuration = Date.now() - attemptStartTime; - console.warn(`[Attempt ${attempt}/${maxAttempts}] Failed after ${attemptDuration}ms: ${errorMessage}`); + debug(`[Attempt ${attempt}/${maxAttempts}] Failed after ${attemptDuration}ms: %s`, errorMessage); // Log container state on error try { const containerInfo = await container.inspect(); - console.log(`[Container State] Status: ${containerInfo.State.Status}, Running: ${containerInfo.State.Running}, ExitCode: ${containerInfo.State.ExitCode}`); + debug(`[Container State] Status: ${containerInfo.State.Status}, Running: ${containerInfo.State.Running}, ExitCode: ${containerInfo.State.ExitCode}`); // Log recent container logs on error if (containerInfo.State.Running) { @@ -72,20 +75,20 @@ export class StatusManager implements IStatusManager { tail: 20, timestamps: true, }); - console.log(`[Container Logs] Last 20 lines:\n${logs.toString()}`); + debug(`[Container Logs] Last 20 lines:\n${logs.toString()}`); } catch (logError) { - console.warn('Failed to get container logs:', logError); + debug('Failed to get container logs: %o', logError); } } } catch (inspectError) { - console.warn('Failed to inspect container:', inspectError); + debug('Failed to inspect container: %o', inspectError); } // Exponential backoff with jitter, but don't wait if we're out of attempts if (attempt < maxAttempts) { const jitter = Math.random() * 1000; // Add up to 1s of jitter const backoff = Math.min(delay + jitter, MAX_BACKOFF_MS); - console.log(`[Backoff] Waiting ${Math.round(backoff)}ms before next attempt...`); + debug(`[Backoff] Waiting ${Math.round(backoff)}ms before next attempt...`); await new Promise(resolve => setTimeout(resolve, backoff)); delay = Math.min(delay * 2, MAX_BACKOFF_MS); // Double the delay for next time, up to max } @@ -94,7 +97,7 @@ export class StatusManager implements IStatusManager { // If we get here, all attempts failed const errorMessage = `Node did not become ready after ${maxAttempts} attempts. Last error: ${lastError?.message || 'Unknown error'}`; - console.error('❌', errorMessage); + debug('❌ %s', errorMessage); // Final attempt to get container logs before failing try { @@ -105,11 +108,9 @@ export class StatusManager implements IStatusManager { timestamps: true, follow: false }); - console.error('=== FINAL CONTAINER LOGS ==='); - console.error(logs.toString()); - console.error('=== END CONTAINER LOGS ==='); + debug('=== FINAL CONTAINER LOGS ===\n%s\n=== END CONTAINER LOGS ===', logs.toString()); } catch (logError) { - console.error('Failed to get final container logs:', logError); + debug('Failed to get final container logs: %o', logError); } throw new Error(errorMessage); @@ -154,9 +155,49 @@ export class StatusManager implements IStatusManager { } private async verifyContainerRunning(container: Container): Promise { - const info = await container.inspect(); - if (!info.State.Running) { - throw new Error(`Container is not running. Status: ${info.State.Status}`); + debug('[verifyContainerRunning] Checking container status...'); + + try { + const data = await container.inspect(); + debug('[verifyContainerRunning] Container inspect data:', JSON.stringify({ + Id: data.Id, + Name: data.Name, + State: data.State, + Config: { + Image: data.Config?.Image, + Env: data.Config?.Env?.filter(env => env.startsWith('NODE_') || env.startsWith('DEBUG')), + Cmd: data.Config?.Cmd + }, + HostConfig: { + Memory: data.HostConfig?.Memory, + NanoCpus: data.HostConfig?.NanoCpus, + NetworkMode: data.HostConfig?.NetworkMode + } + }, null, 2)); + + if (!data.State.Running) { + const errorMessage = `Container is not running. Status: ${data.State.Status}, ExitCode: ${data.State.ExitCode}, Error: ${data.State.Error}`; + debug(`[verifyContainerRunning] ${errorMessage}`); + + // Try to get container logs for more context + try { + const logs = await container.logs({ + stdout: true, + stderr: true, + tail: 50 // Get last 50 lines of logs + }); + debug('[verifyContainerRunning] Container logs:', logs.toString()); + } catch (logError) { + debug('[verifyContainerRunning] Failed to get container logs:', logError); + } + + throw new Error(errorMessage); + } + + debug('[verifyContainerRunning] Container is running'); + } catch (error) { + debug('[verifyContainerRunning] Error checking container status:', error); + throw error; } } @@ -248,7 +289,7 @@ export class StatusManager implements IStatusManager { } } catch (error) { const errorMessage = error instanceof Error ? error.message : 'Unknown error'; - console.warn(`Failed to get container stats for ${container.id}:`, errorMessage); + debug(`Failed to get container stats for ${container.id}: %s`, errorMessage); // Update status with error but don't return yet status.status = 'error'; status.error = `Failed to get container stats: ${errorMessage}`; @@ -257,7 +298,7 @@ export class StatusManager implements IStatusManager { return status; } catch (error) { const errorMessage = error instanceof Error ? error.message : 'Unknown error'; - console.error(`Error getting node status for ${handle.id}:`, errorMessage); + debug(`Error getting node status for ${handle.id}: %s`, errorMessage); return { ...errorStatus, diff --git a/src/orchestration/docker-orchestrator/types.ts b/src/orchestration/docker-orchestrator/types.ts index 6f02657..8b47325 100644 --- a/src/orchestration/docker-orchestrator/types.ts +++ b/src/orchestration/docker-orchestrator/types.ts @@ -7,9 +7,6 @@ export interface DockerNodeHandle extends NodeHandle { } export interface DockerOrchestratorOptions { - /** Custom Docker client options */ - dockerOptions?: Docker.DockerOptions; - /** * Docker image to use for containers * Defaults to 'rhizome-node' if not specified diff --git a/src/orchestration/types.ts b/src/orchestration/types.ts index 24cb8ff..51b2c7b 100644 --- a/src/orchestration/types.ts +++ b/src/orchestration/types.ts @@ -22,6 +22,8 @@ export interface NodeConfig { cpu?: number; /** Memory limit in MB */ memory?: number; + /** Memory swap limit in MB (defaults to 2x memory if not specified) */ + memorySwap?: number; }; /** Storage configuration */ diff --git a/src/storage/factory.ts b/src/storage/factory.ts index 7b854e7..5b50a48 100644 --- a/src/storage/factory.ts +++ b/src/storage/factory.ts @@ -1,7 +1,10 @@ +import Debug from 'debug'; import { DeltaStorage, DeltaQueryStorage, StorageConfig } from './interface'; import { MemoryDeltaStorage } from './memory'; import { LevelDBDeltaStorage } from './leveldb'; +const debug = Debug('rz:storage:factory'); + /** * Factory for creating delta storage instances based on configuration */ @@ -56,10 +59,10 @@ export class StorageFactory { ): Promise { const batchSize = options.batchSize || 1000; - console.log('Starting storage migration...'); + debug('Starting storage migration...'); const allDeltas = await source.getAllDeltas(); - console.log(`Found ${allDeltas.length} deltas to migrate`); + debug(`Found %d deltas to migrate`, allDeltas.length); // Migrate in batches to avoid memory issues for (let i = 0; i < allDeltas.length; i += batchSize) { @@ -69,19 +72,21 @@ export class StorageFactory { await target.storeDelta(delta); } - console.log(`Migrated ${Math.min(i + batchSize, allDeltas.length)} / ${allDeltas.length} deltas`); + debug('Migrated %d / %d deltas', Math.min(i + batchSize, allDeltas.length), allDeltas.length); } - console.log('Migration completed successfully'); + debug('Migration completed successfully'); // Verify migration const sourceStats = await source.getStats(); const targetStats = await target.getStats(); if (sourceStats.totalDeltas !== targetStats.totalDeltas) { - throw new Error(`Migration verification failed: source has ${sourceStats.totalDeltas} deltas, target has ${targetStats.totalDeltas}`); + const errorMsg = `Migration verification failed: source has ${sourceStats.totalDeltas} deltas, target has ${targetStats.totalDeltas}`; + debug(errorMsg); + throw new Error(errorMsg); } - console.log(`Migration verified: ${targetStats.totalDeltas} deltas migrated successfully`); + debug('Migration verified: %d deltas migrated successfully', targetStats.totalDeltas); } } \ No newline at end of file