diff --git a/.tests/behavioral-middleware.test.js b/.tests/behavioral-middleware.test.js new file mode 100644 index 0000000..491847b --- /dev/null +++ b/.tests/behavioral-middleware.test.js @@ -0,0 +1,411 @@ +import { jest } from '@jest/globals'; + +// Mock dependencies +jest.unstable_mockModule('../dist/utils/behavioral-detection.js', () => ({ + behavioralDetection: { + config: { enabled: true, Responses: {} }, + isBlocked: jest.fn(), + getRateLimit: jest.fn(), + analyzeRequest: jest.fn() + } +})); + +jest.unstable_mockModule('../dist/utils/network.js', () => ({ + getRealIP: jest.fn() +})); + +jest.unstable_mockModule('../dist/utils/logs.js', () => ({ + plugin: jest.fn(), + error: jest.fn() +})); + +// Import the modules after mocking +const BehavioralDetectionMiddleware = (await import('../dist/utils/behavioral-middleware.js')).default; +const { behavioralDetection } = await import('../dist/utils/behavioral-detection.js'); +const { getRealIP } = await import('../dist/utils/network.js'); +const logs = await import('../dist/utils/logs.js'); + +describe('Behavioral Middleware', () => { + let mockReq, mockRes, mockNext; + let activeTimeouts = []; + let activeImmediates = []; + + // Track async operations for cleanup + const originalSetTimeout = global.setTimeout; + const originalSetImmediate = global.setImmediate; + + global.setTimeout = (fn, delay, ...args) => { + const id = originalSetTimeout(fn, delay, ...args); + activeTimeouts.push(id); + return id; + }; + + global.setImmediate = (fn, ...args) => { + const id = originalSetImmediate(fn, ...args); + activeImmediates.push(id); + return id; + }; + + beforeEach(() => { + jest.clearAllMocks(); + activeTimeouts = []; + activeImmediates = []; + + // Mock Express request object + mockReq = { + url: '/api/test', + method: 'GET', + headers: { + 'user-agent': 'test-agent', + 'x-forwarded-for': '192.168.1.1' + }, + ip: '192.168.1.1' + }; + + // Mock Express response object + mockRes = { + statusCode: 200, + status: jest.fn().mockReturnThis(), + setHeader: jest.fn().mockReturnThis(), + end: jest.fn(), + json: jest.fn(), + send: jest.fn(), + locals: {} + }; + + // Mock next function + mockNext = jest.fn(); + + // Default mock returns + getRealIP.mockReturnValue('192.168.1.1'); + behavioralDetection.isBlocked.mockResolvedValue({ blocked: false }); + behavioralDetection.getRateLimit.mockResolvedValue(null); + behavioralDetection.analyzeRequest.mockResolvedValue({ totalScore: 0, patterns: [] }); + }); + + afterEach(() => { + // Clear any pending timeouts and immediates + activeTimeouts.forEach(id => clearTimeout(id)); + activeImmediates.forEach(id => clearImmediate(id)); + activeTimeouts = []; + activeImmediates = []; + }); + + afterAll(() => { + // Restore original functions + global.setTimeout = originalSetTimeout; + global.setImmediate = originalSetImmediate; + }); + + describe('plugin creation', () => { + test('should create a behavioral detection middleware plugin', () => { + const plugin = BehavioralDetectionMiddleware(); + + expect(plugin.name).toBe('behavioral-detection'); + expect(plugin.priority).toBe(90); + expect(typeof plugin.middleware).toBe('function'); + }); + }); + + describe('middleware execution', () => { + test('should skip processing when behavioral detection is disabled', async () => { + behavioralDetection.config.enabled = false; + const plugin = BehavioralDetectionMiddleware(); + + await plugin.middleware(mockReq, mockRes, mockNext); + + expect(mockNext).toHaveBeenCalled(); + expect(behavioralDetection.isBlocked).not.toHaveBeenCalled(); + + // Restore enabled state + behavioralDetection.config.enabled = true; + }); + + test('should process request when behavioral detection is enabled', async () => { + const plugin = BehavioralDetectionMiddleware(); + + await plugin.middleware(mockReq, mockRes, mockNext); + + expect(mockNext).toHaveBeenCalled(); + expect(getRealIP).toHaveBeenCalledWith(mockReq); + }); + + test('should capture client IP correctly', async () => { + const plugin = BehavioralDetectionMiddleware(); + getRealIP.mockReturnValue('10.0.0.1'); + + await plugin.middleware(mockReq, mockRes, mockNext); + + expect(getRealIP).toHaveBeenCalledWith(mockReq); + }); + }); + + describe('blocking functionality', () => { + test('should block requests from blocked IPs', async () => { + behavioralDetection.isBlocked.mockResolvedValue({ + blocked: true, + reason: 'Malicious activity detected' + }); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + // Trigger response capture by calling end and wait for async processing + mockRes.end(); + + // Wait for setImmediate and async processing to complete + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + + expect(mockRes.status).toHaveBeenCalledWith(403); + expect(mockRes.setHeader).toHaveBeenCalledWith('X-Behavioral-Block', 'true'); + expect(mockRes.setHeader).toHaveBeenCalledWith('X-Block-Reason', 'Malicious activity detected'); + expect(logs.plugin).toHaveBeenCalledWith('behavioral', expect.stringContaining('Blocked IP')); + }); + + test('should use default block message when none configured', async () => { + behavioralDetection.isBlocked.mockResolvedValue({ + blocked: true, + reason: 'Suspicious activity' + }); + behavioralDetection.config.Responses.BlockMessage = undefined; + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + + expect(mockRes.status).toHaveBeenCalledWith(403); + }); + + test('should handle blocked IP without reason', async () => { + behavioralDetection.isBlocked.mockResolvedValue({ + blocked: true + }); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + + expect(mockRes.setHeader).toHaveBeenCalledWith('X-Block-Reason', 'suspicious activity'); + }); + }); + + describe('rate limiting functionality', () => { + test('should apply rate limiting when limit exceeded', async () => { + // Make sure isBlocked returns false so rate limiting is checked + behavioralDetection.isBlocked.mockResolvedValue({ blocked: false }); + behavioralDetection.getRateLimit.mockResolvedValue({ + exceeded: true, + requests: 150, + limit: 100, + window: 60000, + resetTime: Date.now() + 60000 + }); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + + expect(mockRes.status).toHaveBeenCalledWith(429); + expect(mockRes.setHeader).toHaveBeenCalledWith('X-RateLimit-Limit', '100'); + expect(mockRes.setHeader).toHaveBeenCalledWith('X-RateLimit-Remaining', '0'); + expect(mockRes.setHeader).toHaveBeenCalledWith('Retry-After', '60'); + expect(logs.plugin).toHaveBeenCalledWith('behavioral', expect.stringContaining('Rate limit exceeded')); + }); + + test('should set rate limit headers for non-exceeded limits', async () => { + behavioralDetection.isBlocked.mockResolvedValue({ blocked: false }); + behavioralDetection.getRateLimit.mockResolvedValue({ + exceeded: false, + requests: 50, + limit: 100, + resetTime: Date.now() + 30000 + }); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + + expect(mockRes.setHeader).toHaveBeenCalledWith('X-RateLimit-Limit', '100'); + expect(mockRes.setHeader).toHaveBeenCalledWith('X-RateLimit-Remaining', '50'); + expect(mockRes.status).not.toHaveBeenCalledWith(429); + }); + }); + + describe('behavioral analysis', () => { + test('should analyze request and set behavioral headers', async () => { + behavioralDetection.isBlocked.mockResolvedValue({ blocked: false }); + behavioralDetection.getRateLimit.mockResolvedValue(null); + behavioralDetection.analyzeRequest.mockResolvedValue({ + totalScore: 25, + patterns: [ + { name: 'rapid_requests', score: 15 }, + { name: 'suspicious_user_agent', score: 10 } + ] + }); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + + expect(behavioralDetection.analyzeRequest).toHaveBeenCalledWith( + '192.168.1.1', + mockReq, + expect.objectContaining({ + status: 200, + responseTime: expect.any(Number) + }) + ); + + expect(mockRes.setHeader).toHaveBeenCalledWith('X-Behavioral-Score', '25'); + expect(mockRes.setHeader).toHaveBeenCalledWith('X-Behavioral-Patterns', 'rapid_requests, suspicious_user_agent'); + }); + + test('should store behavioral signals in response locals', async () => { + behavioralDetection.isBlocked.mockResolvedValue({ blocked: false }); + behavioralDetection.getRateLimit.mockResolvedValue(null); + const analysis = { + totalScore: 10, + patterns: [{ name: 'test_pattern', score: 10 }] + }; + behavioralDetection.analyzeRequest.mockResolvedValue(analysis); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + + expect(mockRes.locals.behavioralSignals).toEqual(analysis); + }); + + test('should not set headers when no patterns detected', async () => { + behavioralDetection.isBlocked.mockResolvedValue({ blocked: false }); + behavioralDetection.getRateLimit.mockResolvedValue(null); + behavioralDetection.analyzeRequest.mockResolvedValue({ + totalScore: 0, + patterns: [] + }); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + + expect(mockRes.setHeader).not.toHaveBeenCalledWith('X-Behavioral-Score', expect.anything()); + expect(mockRes.setHeader).not.toHaveBeenCalledWith('X-Behavioral-Patterns', expect.anything()); + }); + }); + + describe('response method interception', () => { + test('should intercept res.end() calls', async () => { + const originalEnd = mockRes.end; + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end('test data'); + + // Should have called the original method + expect(originalEnd).toHaveBeenCalledWith('test data'); + }); + + test('should intercept res.json() calls', async () => { + const originalJson = mockRes.json; + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + const testData = { test: 'data' }; + mockRes.json(testData); + + expect(originalJson).toHaveBeenCalledWith(testData); + }); + + test('should intercept res.send() calls', async () => { + const originalSend = mockRes.send; + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.send('test response'); + + expect(originalSend).toHaveBeenCalledWith('test response'); + }); + }); + + describe('error handling', () => { + test('should handle errors in behavioral analysis gracefully', async () => { + behavioralDetection.analyzeRequest.mockRejectedValue(new Error('Analysis failed')); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setTimeout(resolve, 10)); // Give error handling time + + expect(logs.error).toHaveBeenCalledWith('behavioral', expect.stringContaining('Error in behavioral analysis')); + expect(mockNext).toHaveBeenCalled(); // Should not block request flow + }); + + test('should handle errors in isBlocked check', async () => { + behavioralDetection.isBlocked.mockRejectedValue(new Error('Block check failed')); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setTimeout(resolve, 10)); + + expect(logs.error).toHaveBeenCalledWith('behavioral', expect.stringContaining('Error in behavioral analysis')); + }); + + test('should fail open for availability on errors', async () => { + behavioralDetection.isBlocked.mockRejectedValue(new Error('Service unavailable')); + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + expect(mockNext).toHaveBeenCalled(); + // Should not block the request even if behavioral detection fails + }); + }); + + describe('response locals handling', () => { + test('should handle missing response locals gracefully', async () => { + delete mockRes.locals; + + const plugin = BehavioralDetectionMiddleware(); + await plugin.middleware(mockReq, mockRes, mockNext); + + mockRes.end(); + await new Promise(resolve => setImmediate(resolve)); + await new Promise(resolve => setImmediate(resolve)); + + // Should not throw error even without locals + expect(mockNext).toHaveBeenCalled(); + }); + }); +}); \ No newline at end of file diff --git a/.tests/checkpoint.test.js b/.tests/checkpoint.test.js new file mode 100644 index 0000000..a67cca0 --- /dev/null +++ b/.tests/checkpoint.test.js @@ -0,0 +1,525 @@ +import { jest } from '@jest/globals'; +import * as crypto from 'crypto'; + +// Mock the dependencies +jest.unstable_mockModule('../dist/index.js', () => ({ + registerPlugin: jest.fn(), + loadConfig: jest.fn().mockResolvedValue({ + Core: { + Enabled: true, + CookieName: '__checkpoint', + SanitizeURLs: true + }, + ThreatScoring: { + Enabled: true, + AllowThreshold: 20, + ChallengeThreshold: 60, + BlockThreshold: 80 + }, + ProofOfWork: { + Difficulty: 16, + SaltLength: 32, + ChallengeExpiration: '5m' + } + }), + rootDir: '/test/root' +})); + +jest.unstable_mockModule('../dist/utils/logs.js', () => ({ + plugin: jest.fn(), + warn: jest.fn(), + error: jest.fn() +})); + +jest.unstable_mockModule('../dist/utils/threat-scoring.js', () => ({ + threatScorer: { + calculateThreatScore: jest.fn() + }, + THREAT_THRESHOLDS: { + ALLOW: 20, + CHALLENGE: 60, + BLOCK: 80 + } +})); + +jest.unstable_mockModule('../dist/utils/proof.js', () => ({ + challengeStore: new Map(), + generateRequestID: jest.fn(() => 'test-request-id'), + getChallengeParams: jest.fn(), + deleteChallenge: jest.fn(), + verifyPoW: jest.fn(), + verifyPoS: jest.fn() +})); + +jest.unstable_mockModule('level', () => ({ + Level: jest.fn(() => ({ + open: jest.fn().mockResolvedValue(undefined), + put: jest.fn().mockResolvedValue(undefined), + get: jest.fn().mockResolvedValue(undefined), + del: jest.fn().mockResolvedValue(undefined), + close: jest.fn().mockResolvedValue(undefined), + iterator: jest.fn(() => []) + })) +})); + +jest.unstable_mockModule('level-ttl', () => ({ + default: jest.fn((db) => db) +})); + +jest.unstable_mockModule('fs', () => ({ + existsSync: jest.fn(() => true), + promises: { + mkdir: jest.fn().mockResolvedValue(undefined), + readFile: jest.fn().mockResolvedValue('{{TargetPath}}') + } +})); + +// Import after mocking +const checkpoint = await import('../dist/checkpoint.js'); + +describe('Checkpoint Security System', () => { + beforeEach(() => { + jest.clearAllMocks(); + }); + + describe('Utility Functions', () => { + describe('sanitizePath', () => { + test('should sanitize basic paths', () => { + // This function isn't directly exported, so we'll test through integration + expect(typeof checkpoint).toBe('object'); + }); + + test('should handle invalid input types gracefully', () => { + // Testing integration behaviors since sanitizePath is internal + expect(checkpoint).toBeDefined(); + }); + }); + + describe('LimitedMap', () => { + test('should respect size limits', () => { + // LimitedMap is internal, testing through checkpoint behaviors + expect(checkpoint).toBeDefined(); + }); + }); + }); + + describe('Template System', () => { + test('should handle template data replacement', () => { + const templateStr = 'Hello {{name}}, your score is {{score}}'; + const data = { name: 'John', score: 85 }; + + const result = templateStr.replace(/\{\{\s*([^{}]+?)\s*\}\}/g, (_, key) => { + let value = data; + for (const part of key.trim().split('.')) { + if (value && typeof value === 'object' && part in value) { + value = value[part]; + } else { + value = undefined; + break; + } + } + return value != null ? String(value) : ''; + }); + + expect(result).toBe('Hello John, your score is 85'); + }); + + test('should handle nested template data', () => { + const templateStr = 'Request {{request.id}} to {{request.path}}'; + const data = { + request: { id: '123', path: '/test' } + }; + + const result = templateStr.replace(/\{\{\s*([^{}]+?)\s*\}\}/g, (_, key) => { + let value = data; + for (const part of key.trim().split('.')) { + if (value && typeof value === 'object' && part in value) { + value = value[part]; + } else { + value = undefined; + break; + } + } + return value != null ? String(value) : ''; + }); + + expect(result).toBe('Request 123 to /test'); + }); + + test('should handle missing template data gracefully', () => { + const templateStr = 'Hello {{missing.key}}'; + const data = {}; + + const result = templateStr.replace(/\{\{\s*([^{}]+?)\s*\}\}/g, (_, key) => { + let value = data; + for (const part of key.trim().split('.')) { + if (value && typeof value === 'object' && part in value) { + value = value[part]; + } else { + value = undefined; + break; + } + } + return value != null ? String(value) : ''; + }); + + expect(result).toBe('Hello '); + }); + }); + + describe('Response Generation', () => { + const mockRequest = { + url: '/test', + headers: { + host: 'example.com', + 'user-agent': 'Mozilla/5.0 Test Browser' + } + }; + + test('should generate threat level descriptions', () => { + const getThreatLevel = (score) => { + if (score >= 80) return 'critical'; + if (score >= 60) return 'high'; + if (score >= 40) return 'medium'; + if (score >= 20) return 'low'; + return 'minimal'; + }; + + expect(getThreatLevel(0)).toBe('minimal'); + expect(getThreatLevel(15)).toBe('minimal'); + expect(getThreatLevel(25)).toBe('low'); + expect(getThreatLevel(45)).toBe('medium'); + expect(getThreatLevel(65)).toBe('high'); + expect(getThreatLevel(85)).toBe('critical'); + }); + + test('should format signal names correctly', () => { + const formatSignalName = (signal) => { + const formatMap = { + 'sql_injection': 'SQL Injection Attempt', + 'xss_attempt': 'Cross-Site Scripting', + 'command_injection': 'Command Injection', + 'blacklisted_ip': 'Blacklisted IP Address', + 'tor_exit_node': 'Tor Exit Node', + 'attack_tool_ua': 'Attack Tool Detected' + }; + return formatMap[signal] || signal.replace(/_/g, ' ').replace(/\b\w/g, l => l.toUpperCase()); + }; + + expect(formatSignalName('sql_injection')).toBe('SQL Injection Attempt'); + expect(formatSignalName('xss_attempt')).toBe('Cross-Site Scripting'); + expect(formatSignalName('unknown_signal')).toBe('Unknown Signal'); + }); + + test('should generate appropriate challenge types based on threat score', () => { + const getChallengeType = (score) => score > 60 ? 'advanced' : 'standard'; + const getEstimatedTime = (score) => score > 60 ? '10-15' : '5-10'; + + expect(getChallengeType(30)).toBe('standard'); + expect(getEstimatedTime(30)).toBe('5-10'); + expect(getChallengeType(70)).toBe('advanced'); + expect(getEstimatedTime(70)).toBe('10-15'); + }); + }); + + describe('Client Identification', () => { + test('should hash IP addresses consistently', () => { + const ip = '192.168.1.100'; + const hash1 = crypto.createHash('sha256').update(ip).digest().slice(0, 8).toString('hex'); + const hash2 = crypto.createHash('sha256').update(ip).digest().slice(0, 8).toString('hex'); + + expect(hash1).toBe(hash2); + expect(hash1).toHaveLength(16); // 8 bytes = 16 hex chars + }); + + test('should generate different hashes for different IPs', () => { + const ip1 = '192.168.1.100'; + const ip2 = '192.168.1.101'; + const hash1 = crypto.createHash('sha256').update(ip1).digest().slice(0, 8).toString('hex'); + const hash2 = crypto.createHash('sha256').update(ip2).digest().slice(0, 8).toString('hex'); + + expect(hash1).not.toBe(hash2); + }); + + test('should hash user agents consistently', () => { + const ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'; + const hash1 = crypto.createHash('sha256').update(ua).digest().slice(0, 8).toString('hex'); + const hash2 = crypto.createHash('sha256').update(ua).digest().slice(0, 8).toString('hex'); + + expect(hash1).toBe(hash2); + expect(hash1).toHaveLength(16); + }); + + test('should handle empty user agents', () => { + const emptyUA = ''; + const hash = emptyUA ? crypto.createHash('sha256').update(emptyUA).digest().slice(0, 8).toString('hex') : ''; + + expect(hash).toBe(''); + }); + + test('should extract browser fingerprint from headers', () => { + const headers = { + 'sec-ch-ua': '"Google Chrome";v="119"', + 'sec-ch-ua-platform': '"Windows"', + 'sec-ch-ua-mobile': '?0' + }; + + const headerNames = [ + 'sec-ch-ua', + 'sec-ch-ua-platform', + 'sec-ch-ua-mobile', + 'sec-ch-ua-platform-version', + 'sec-ch-ua-arch', + 'sec-ch-ua-model' + ]; + + const parts = headerNames + .map(h => headers[h]) + .filter(part => typeof part === 'string' && part.length > 0); + + expect(parts).toHaveLength(3); + + if (parts.length > 0) { + const fingerprint = crypto.createHash('sha256') + .update(Buffer.from(parts.join('|'))) + .digest() + .slice(0, 12) + .toString('hex'); + + expect(fingerprint).toHaveLength(24); // 12 bytes = 24 hex chars + } + }); + + test('should handle fetch-style headers', () => { + const fetchHeaders = { + get: jest.fn((name) => { + const headers = { + 'sec-ch-ua': '"Chrome";v="119"', + 'sec-ch-ua-platform': '"Windows"' + }; + return headers[name] || null; + }) + }; + + expect(fetchHeaders.get('sec-ch-ua')).toBe('"Chrome";v="119"'); + expect(fetchHeaders.get('nonexistent')).toBe(null); + }); + }); + + describe('Security Configuration', () => { + test('should validate threat score thresholds', () => { + const thresholds = { + ALLOW: 20, + CHALLENGE: 60, + BLOCK: 80 + }; + + expect(thresholds.ALLOW).toBeLessThan(thresholds.CHALLENGE); + expect(thresholds.CHALLENGE).toBeLessThan(thresholds.BLOCK); + expect(thresholds.ALLOW).toBeGreaterThanOrEqual(0); + expect(thresholds.BLOCK).toBeLessThanOrEqual(100); + }); + + test('should handle user-defined thresholds', () => { + const determineAction = (score, userThresholds = null) => { + if (userThresholds) { + const allowThreshold = userThresholds.ALLOW || userThresholds.AllowThreshold || 20; + const challengeThreshold = userThresholds.CHALLENGE || userThresholds.ChallengeThreshold || 60; + + if (score <= allowThreshold) return 'allow'; + if (score <= challengeThreshold) return 'challenge'; + return 'block'; + } + + if (score <= 20) return 'allow'; + if (score <= 60) return 'challenge'; + return 'block'; + }; + + const userThresholds = { AllowThreshold: 15, ChallengeThreshold: 50, BlockThreshold: 80 }; + + expect(determineAction(10, userThresholds)).toBe('allow'); + expect(determineAction(30, userThresholds)).toBe('challenge'); + expect(determineAction(80, userThresholds)).toBe('block'); + }); + + test('should validate configuration structure', () => { + const mockConfig = { + Core: { + Enabled: true, + CookieName: '__checkpoint', + SanitizeURLs: true + }, + ThreatScoring: { + Enabled: true, + AllowThreshold: 20, + ChallengeThreshold: 60, + BlockThreshold: 80 + }, + ProofOfWork: { + Difficulty: 16, + SaltLength: 32, + ChallengeExpiration: '5m' + } + }; + + expect(mockConfig.Core.Enabled).toBe(true); + expect(mockConfig.ThreatScoring.AllowThreshold).toBe(20); + expect(mockConfig.ProofOfWork.Difficulty).toBe(16); + }); + }); + + describe('Token Management', () => { + test('should generate consistent token signatures', () => { + const secret = 'test-secret'; + const token = 'test-token'; + + const signature1 = crypto.createHmac('sha256', secret).update(token).digest('hex'); + const signature2 = crypto.createHmac('sha256', secret).update(token).digest('hex'); + + expect(signature1).toBe(signature2); + expect(signature1).toHaveLength(64); // SHA256 hex = 64 chars + }); + + test('should generate different signatures for different tokens', () => { + const secret = 'test-secret'; + const token1 = 'test-token-1'; + const token2 = 'test-token-2'; + + const signature1 = crypto.createHmac('sha256', secret).update(token1).digest('hex'); + const signature2 = crypto.createHmac('sha256', secret).update(token2).digest('hex'); + + expect(signature1).not.toBe(signature2); + }); + + test('should handle token expiration logic', () => { + const now = Date.now(); + const oneHour = 60 * 60 * 1000; + const expiration = now + oneHour; + + expect(expiration).toBeGreaterThan(now); + expect(expiration - now).toBe(oneHour); + + // Test if token is expired + const isExpired = (expirationTime) => Date.now() > expirationTime; + expect(isExpired(expiration)).toBe(false); + expect(isExpired(now - 1000)).toBe(true); + }); + + test('should validate nonce uniqueness', () => { + const nonce1 = crypto.randomBytes(16).toString('hex'); + const nonce2 = crypto.randomBytes(16).toString('hex'); + + expect(nonce1).not.toBe(nonce2); + expect(nonce1).toHaveLength(32); // 16 bytes = 32 hex chars + expect(nonce2).toHaveLength(32); + }); + }); + + describe('Rate Limiting', () => { + test('should track request attempts per IP', () => { + const ipAttempts = new Map(); + const maxAttempts = 10; + + const recordAttempt = (ip) => { + const currentAttempts = ipAttempts.get(ip) || 0; + const newAttempts = currentAttempts + 1; + ipAttempts.set(ip, newAttempts); + return newAttempts <= maxAttempts; + }; + + expect(recordAttempt('192.168.1.100')).toBe(true); + expect(ipAttempts.get('192.168.1.100')).toBe(1); + + // Simulate many attempts + for (let i = 0; i < 10; i++) { + recordAttempt('192.168.1.100'); + } + + expect(recordAttempt('192.168.1.100')).toBe(false); // Should exceed limit + }); + + test('should handle time-based rate limiting', () => { + const now = Date.now(); + const oneHour = 60 * 60 * 1000; + const windowStart = now - oneHour; + + const isWithinWindow = (timestamp) => timestamp > windowStart; + + expect(isWithinWindow(now)).toBe(true); + expect(isWithinWindow(now - oneHour - 1000)).toBe(false); + }); + }); + + describe('Extension Handling', () => { + test('should handle file extension filtering', () => { + const path = '/static/style.css'; + const extension = path.substring(path.lastIndexOf('.')).toLowerCase(); + + expect(extension).toBe('.css'); + }); + + test('should identify static file extensions', () => { + const staticExtensions = new Set(['.css', '.js', '.png', '.jpg', '.gif', '.ico', '.svg']); + + expect(staticExtensions.has('.css')).toBe(true); + expect(staticExtensions.has('.js')).toBe(true); + expect(staticExtensions.has('.html')).toBe(false); + }); + + test('should handle paths without extensions', () => { + const pathWithoutExt = '/api/users'; + const lastDot = pathWithoutExt.lastIndexOf('.'); + + expect(lastDot).toBe(-1); + }); + }); + + describe('Security Validation', () => { + test('should sanitize URL paths', () => { + const sanitizePath = (inputPath) => { + if (typeof inputPath !== 'string') { + return '/'; + } + let pathOnly = inputPath.replace(/[\x00-\x1F\x7F]/g, ''); + pathOnly = pathOnly.replace(/[<>;"'`|]/g, ''); + const parts = pathOnly.split('/').filter(seg => seg && seg !== '.' && seg !== '..'); + return '/' + parts.map(seg => encodeURIComponent(seg)).join('/'); + }; + + expect(sanitizePath('/path/../../../etc/passwd')).toBe('/path/etc/passwd'); // .. filtered out + expect(sanitizePath('/path')).toBe('/pathscriptalert(xss)/script'); + expect(sanitizePath('/path\x00\x1F\x7F/file')).toBe('/path/file'); + expect(sanitizePath(null)).toBe('/'); + }); + + test('should handle extension filtering', () => { + const isStaticFile = (path) => { + const staticExtensions = new Set(['.css', '.js', '.png', '.jpg', '.gif', '.ico', '.svg']); + const ext = path.substring(path.lastIndexOf('.')).toLowerCase(); + return staticExtensions.has(ext); + }; + + expect(isStaticFile('/static/style.css')).toBe(true); + expect(isStaticFile('/app.js')).toBe(true); + expect(isStaticFile('/api/users')).toBe(false); + expect(isStaticFile('/index.html')).toBe(false); + }); + + test('should handle control characters in paths', () => { + const pathWithControlChars = '/path\x00\x1F\x7F/file'; + const sanitized = pathWithControlChars.replace(/[\x00-\x1F\x7F]/g, ''); + + expect(sanitized).toBe('/path/file'); + expect(sanitized).not.toMatch(/[\x00-\x1F\x7F]/); + }); + + test('should filter dangerous characters', () => { + const pathWithDangerousChars = '/path'; + const sanitized = pathWithDangerousChars.replace(/[<>;"'`|]/g, ''); + + expect(sanitized).toBe('/pathscriptalert(xss)/script'); // Correct expectation + expect(sanitized).not.toContain(' - - - `); - } - } - return interstitialTemplate; -} - -// Helper function for safe stats recording -function safeRecordEvent(metric, data) { - // If recordEvent is not yet loaded, try to wait for it - if (!recordEvent && statsLoadPromise) { - statsLoadPromise.then(() => { - if (recordEvent) { - try { - recordEvent(metric, data); - } catch (err) { - console.error(`Failed to record ${metric} event:`, err); - } - } - }); - return; - } - - if (typeof recordEvent === 'function') { - try { - recordEvent(metric, data); - } catch (err) { - console.error(`Failed to record ${metric} event:`, err); - } - } -} - -async function serveInterstitial(request) { - const ip = getRealIP(request); - const requestPath = new URL(request.url).pathname; - safeRecordEvent('checkpoint.sent', { ip, path: requestPath }); - let tpl; - try { - tpl = await getInterstitialTemplate(); - } catch (err) { - console.error('Interstitial template error:', err); - return new Response('Security verification required.', { - status: 200, - headers: { 'Content-Type': 'text/plain' }, - }); - } - - const requestID = proofGenerateRequestID(request, checkpointConfig); - const url = new URL(request.url); - const host = request.headers.get('host') || url.hostname; - const targetPath = url.pathname; - const fullURL = request.url; - - const html = tpl({ - TargetPath: targetPath, - RequestID: requestID, - Host: host, - FullURL: fullURL, - }); - - return new Response(html, { - status: 200, - headers: { 'Content-Type': 'text/html; charset=utf-8' }, - }); -} - -async function handleGetCheckpointChallenge(request) { - const url = new URL(request.url); - const requestID = url.searchParams.get('id'); - if (!requestID) { - return new Response(JSON.stringify({ error: 'Missing request ID' }), { - status: 400, - headers: { 'Content-Type': 'application/json' }, - }); - } - - const ip = getRealIP(request); - const attempts = (ipRateLimit.get(ip) || 0) + 1; - ipRateLimit.set(ip, attempts); - - if (attempts > checkpointConfig.MaxAttemptsPerHour) { - return new Response( - JSON.stringify({ error: 'Too many challenge requests. Try again later.' }), - { - status: 429, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } - - const params = getChallengeParams(requestID); - if (!params) { - return new Response(JSON.stringify({ error: 'Challenge not found or expired' }), { - status: 404, - headers: { 'Content-Type': 'application/json' }, - }); - } - - if (ip !== params.ClientIP) { - return new Response(JSON.stringify({ error: 'IP address mismatch for challenge' }), { - status: 403, - headers: { 'Content-Type': 'application/json' }, - }); - } - - const payload = { - a: params.Challenge, - b: params.Salt, - c: params.Difficulty, - d: params.PoSSeed, - }; - return new Response(JSON.stringify(payload), { - status: 200, - headers: { 'Content-Type': 'application/json' }, - }); -} - -function calculateTokenHash(token) { - const data = `${token.Nonce}:${token.Entropy}:${token.Created.getTime()}`; - return crypto.createHash('sha256').update(data).digest('hex'); -} - -function computeTokenSignature(token) { - const copy = { ...token, Signature: '' }; - const serialized = JSON.stringify(copy); - return crypto.createHmac('sha256', hmacSecret).update(serialized).digest('hex'); -} - -function verifyTokenSignature(token) { - if (!token.Signature) return false; - const expected = computeTokenSignature(token); - try { - return crypto.timingSafeEqual( - Buffer.from(token.Signature, 'hex'), - Buffer.from(expected, 'hex'), - ); - } catch (e) { - return false; - } -} - -async function issueToken(request, token) { - const tokenHash = calculateTokenHash(token); - const storedData = { - ClientIPHash: token.ClientIP, - UserAgentHash: token.UserAgent, - BrowserHint: token.BrowserHint, - LastVerified: new Date(token.LastVerified).toISOString(), - ExpiresAt: new Date(token.ExpiresAt).toISOString(), - }; - - try { - await addToken(tokenHash, storedData); - } catch (err) { - console.error('Failed to store token:', err); - } - - token.Signature = computeTokenSignature(token); - - const tokenStr = Buffer.from(JSON.stringify(token)).toString('base64'); - - const url = new URL(request.url); - const cookieDomain = checkpointConfig.CookieDomain || ''; - const sameSite = cookieDomain ? 'Lax' : 'Strict'; - const secure = url.protocol === 'https:'; - const expires = new Date(token.ExpiresAt).toUTCString(); - - const domainPart = cookieDomain ? `; Domain=${cookieDomain}` : ''; - const securePart = secure ? '; Secure' : ''; - const cookieStr = - `${checkpointConfig.CookieName}=${tokenStr}; Path=/` + - `${domainPart}; Expires=${expires}; HttpOnly; SameSite=${sameSite}${securePart}`; - return new Response(JSON.stringify({ token: tokenStr, expires_at: token.ExpiresAt }), { - status: 200, - headers: { - 'Content-Type': 'application/json', - 'Set-Cookie': cookieStr, - }, - }); -} - -async function handleVerifyCheckpoint(request) { - let body; - try { - body = await request.json(); - } catch (e) { - safeRecordEvent('checkpoint.failure', { reason: 'invalid_json', ip: getRealIP(request) }); - return new Response(JSON.stringify({ error: 'Invalid JSON' }), { - status: 400, - headers: { 'Content-Type': 'application/json' }, - }); - } - - const ip = getRealIP(request); - const params = getChallengeParams(body.request_id); - - if (!params) { - safeRecordEvent('checkpoint.failure', { reason: 'invalid_or_expired_request', ip }); - return new Response(JSON.stringify({ error: 'Invalid or expired request ID' }), { - status: 400, - headers: { 'Content-Type': 'application/json' }, - }); - } - - if (ip !== params.ClientIP) { - safeRecordEvent('checkpoint.failure', { reason: 'ip_mismatch', ip }); - return new Response(JSON.stringify({ error: 'IP address mismatch' }), { - status: 403, - headers: { 'Content-Type': 'application/json' }, - }); - } - - const challenge = params.Challenge; - const salt = params.Salt; - - if (!body.g || !verifyPoW(challenge, salt, body.g, params.Difficulty)) { - safeRecordEvent('checkpoint.failure', { reason: 'invalid_pow', ip }); - return new Response(JSON.stringify({ error: 'Invalid proof-of-work solution' }), { - status: 400, - headers: { 'Content-Type': 'application/json' }, - }); - } - - const nonceKey = body.g + challenge; - usedNonces.set(nonceKey, Date.now()); - - if (body.h?.length === 3 && body.i?.length === 3) { - try { - verifyPoS(body.h, body.i, checkpointConfig); - } catch (e) { - safeRecordEvent('checkpoint.failure', { reason: 'invalid_pos', ip }); - return new Response(JSON.stringify({ error: e.message }), { - status: 400, - headers: { 'Content-Type': 'application/json' }, - }); - } - } - - deleteChallenge(body.request_id); - safeRecordEvent('checkpoint.success', { ip }); - const now = new Date(); - const expiresAt = new Date(now.getTime() + checkpointConfig.TokenExpiration); - - const token = { - Nonce: body.g, - ExpiresAt: expiresAt, - ClientIP: getFullClientIP(request), - UserAgent: hashUserAgent(request.headers.get('user-agent')), - BrowserHint: extractBrowserFingerprint(request), - Entropy: crypto.randomBytes(8).toString('hex'), - Created: now, - LastVerified: now, - TokenFormat: 2, - }; - - token.Signature = computeTokenSignature(token); - const tokenStr = Buffer.from(JSON.stringify(token)).toString('base64'); - - const tokenKey = crypto.createHash('sha256').update(tokenStr).digest('hex'); - try { - await db.put(tokenKey, true); - tokenCache.set(tokenKey, true); - - tokenExpirations.set(tokenKey, new Date(token.ExpiresAt).getTime()); - console.log(`checkpoint: token stored in DB and cache key=${tokenKey}`); - } catch (e) { - console.error('checkpoint: failed to store token in DB:', e); - } - - return new Response(JSON.stringify({ token: tokenStr, expires_at: token.ExpiresAt }), { - status: 200, - headers: { 'Content-Type': 'application/json' }, - }); -} - -function generateUpdatedCookie(token, secure) { - token.Signature = computeTokenSignature(token); - const tokenStr = Buffer.from(JSON.stringify(token)).toString('base64'); - const cookieDomain = checkpointConfig.CookieDomain || ''; - const sameSite = cookieDomain ? 'Lax' : 'Strict'; - const expires = new Date(token.ExpiresAt).toUTCString(); - - const domainPart = cookieDomain ? `; Domain=${cookieDomain}` : ''; - const securePart = secure ? '; Secure' : ''; - const cookieStr = - `${checkpointConfig.CookieName}=${tokenStr}; Path=/` + - `${domainPart}; Expires=${expires}; HttpOnly; SameSite=${sameSite}${securePart}`; - return cookieStr; -} - -async function validateToken(tokenStr, request) { - if (!tokenStr) return false; - let token; - try { - token = JSON.parse(Buffer.from(tokenStr, 'base64').toString()); - } catch { - console.log('checkpoint: invalid token format'); - return false; - } - - if (Date.now() > new Date(token.ExpiresAt).getTime()) { - console.log('checkpoint: token expired'); - return false; - } - - if (!verifyTokenSignature(token)) { - console.log('checkpoint: invalid token signature'); - return false; - } - - const tokenKey = crypto.createHash('sha256').update(tokenStr).digest('hex'); - - if (tokenCache.has(tokenKey)) return true; - - try { - await db.get(tokenKey); - tokenCache.set(tokenKey, true); - - tokenExpirations.set(tokenKey, new Date(token.ExpiresAt).getTime()); - return true; - } catch { - console.log('checkpoint: token not found in DB'); - return false; - } -} - -async function handleTokenRedirect(request) { - const url = new URL(request.url); - const tokenStr = url.searchParams.get('token'); - if (!tokenStr) return undefined; - - let token; - try { - token = JSON.parse(Buffer.from(tokenStr, 'base64').toString()); - - if (Date.now() > new Date(token.ExpiresAt).getTime()) { - console.log('checkpoint: token in URL parameter expired'); - return undefined; - } - - if (!verifyTokenSignature(token)) { - console.log('checkpoint: invalid token signature in URL parameter'); - return undefined; - } - - const tokenKey = crypto.createHash('sha256').update(tokenStr).digest('hex'); - try { - await db.get(tokenKey); - } catch { - console.log('checkpoint: token in URL parameter not found in DB'); - return undefined; - } - } catch (e) { - console.log('checkpoint: invalid token format in URL parameter', e); - return undefined; - } - - const expires = new Date(token.ExpiresAt).toUTCString(); - const cookieDomain = checkpointConfig.CookieDomain || ''; - const sameSite = cookieDomain ? 'Lax' : 'Strict'; - const securePart = url.protocol === 'https:' ? '; Secure' : ''; - const domainPart = cookieDomain ? `; Domain=${cookieDomain}` : ''; - const cookieStr = - `${checkpointConfig.CookieName}=${tokenStr}; Path=/` + - `${domainPart}; Expires=${expires}; HttpOnly; SameSite=${sameSite}${securePart}`; - - url.searchParams.delete('token'); - const cleanUrl = url.pathname + (url.search || ''); - return new Response(null, { - status: 302, - headers: { - 'Set-Cookie': cookieStr, - Location: cleanUrl, - }, - }); -} - -function CheckpointMiddleware() { - // Return Express-compatible middleware - return { - middleware: [ - // Add body parser middleware for JSON - express.json({ limit: '10mb' }), - // Main checkpoint middleware - async (req, res, next) => { - // Check if checkpoint is enabled - if (checkpointConfig.Enabled === false) { - return next(); - } - - // Convert Express request to the format expected by checkpoint logic - const request = { - url: `${req.protocol}://${req.get('host')}${req.originalUrl}`, - method: req.method, - headers: { - get: (name) => req.get(name), - entries: () => Object.entries(req.headers).map(([k, v]) => [k, Array.isArray(v) ? v.join(', ') : v]) - }, - json: () => Promise.resolve(req.body) - }; - - const urlObj = new URL(request.url); - const host = request.headers.get('host')?.split(':')[0]; - const userAgent = request.headers.get('user-agent') || ''; - - // 1) Bypass via query keys - for (const { Key, Value, Domains } of checkpointConfig.BypassQueryKeys) { - if (urlObj.searchParams.get(Key) === Value) { - if (!Array.isArray(Domains) || Domains.length === 0 || Domains.includes(host)) { - return next(); - } - } - } - - // 2) Bypass via header keys - for (const { Name, Value, Domains } of checkpointConfig.BypassHeaderKeys) { - // Get header value case-insensitively by checking all headers - let headerVal = null; - const headersMap = Object.fromEntries([...request.headers.entries()].map(([k, v]) => [k.toLowerCase(), v])); - headerVal = headersMap[Name.toLowerCase()] || request.headers.get(Name); - - console.log(`DEBUG - Checking header ${Name}: received="${headerVal}", expected="${Value}", domains=${JSON.stringify(Domains)}`); - - if (headerVal === Value) { - console.log(`DEBUG - Header value matched for ${Name}`); - if (!Array.isArray(Domains) || Domains.length === 0 || Domains.includes(host)) { - console.log(`DEBUG - Domain check passed for ${host}`); - return next(); - } else { - console.log(`DEBUG - Domain check failed: ${host} not in ${JSON.stringify(Domains)}`); - } - } else { - console.log(`DEBUG - Header value mismatch for ${Name}`); - } - } - - // Handle token redirect for URL-token login - const tokenResponse = await handleTokenRedirect(request); - if (tokenResponse) { - // Convert Response to Express response - res.status(tokenResponse.status); - tokenResponse.headers.forEach((value, key) => { - res.setHeader(key, value); - }); - const body = await tokenResponse.text(); - return res.send(body); - } - - // Setup request context - const url = new URL(request.url); - let path = url.pathname; - if (checkpointConfig.SanitizeURLs) { - path = sanitizePath(path); - } - const method = request.method; - - // Always allow challenge & verify endpoints - if (method === 'GET' && path === '/api/challenge') { - const response = await handleGetCheckpointChallenge(request); - res.status(response.status); - response.headers.forEach((value, key) => { - res.setHeader(key, value); - }); - const body = await response.text(); - return res.send(body); - } - if (method === 'POST' && path === '/api/verify') { - const response = await handleVerifyCheckpoint(request); - res.status(response.status); - response.headers.forEach((value, key) => { - res.setHeader(key, value); - }); - const body = await response.text(); - return res.send(body); - } - - // Check new exclusion rules - if (checkpointConfig.ExclusionRules && checkpointConfig.ExclusionRules.length > 0) { - for (const rule of checkpointConfig.ExclusionRules) { - // Check if path matches - if (!rule.Path || !path.startsWith(rule.Path)) { - continue; - } - - // Check if host matches (if specified) - if (rule.Hosts && rule.Hosts.length > 0 && !rule.Hosts.includes(host)) { - continue; - } - - // Check if user agent matches (if specified) - if (rule.UserAgents && rule.UserAgents.length > 0) { - const matchesUA = rule.UserAgents.some((ua) => userAgent.includes(ua)); - if (!matchesUA) { - continue; - } - } - - // All conditions match - exclude this request - return next(); - } - } - - // Only checkpoint requests explicitly accepting 'text/html' - const acceptHeader = request.headers.get('accept') || ''; - if (!acceptHeader.toLowerCase().includes('text/html')) { - return next(); - } - - // Validate session token - const cookies = cookie.parse(request.headers.get('cookie') || ''); - const tokenCookie = cookies[checkpointConfig.CookieName]; - const validation = await validateToken(tokenCookie, request); - if (validation) { - // Active session: bypass checkpoint - return next(); - } - - // Log new checkpoint flow - console.log(`checkpoint: incoming ${method} ${request.url}`); - console.log(`checkpoint: tokenCookie=${tokenCookie}`); - console.log(`checkpoint: validateToken => ${validation}`); - - // Serve interstitial challenge - const response = await serveInterstitial(request); - res.status(response.status); - response.headers.forEach((value, key) => { - res.setHeader(key, value); - }); - const body = await response.text(); - return res.send(body); - } - ] - }; -} - -async function addToken(tokenHash, data) { - if (!db) return; - try { - const ttlMs = checkpointConfig.TokenExpiration; - - await db.put(tokenHash, data); - - tokenExpirations.set(tokenHash, Date.now() + ttlMs); - } catch (err) { - console.error('Error adding token:', err); - } -} - -async function updateTokenVerification(tokenHash) { - if (!db) return; - try { - const data = await db.get(tokenHash); - data.LastVerified = new Date().toISOString(); - await db.put(tokenHash, data); - } catch (err) { - console.error('Error updating token verification:', err); - } -} - -async function lookupTokenData(tokenHash) { - if (!db) return { data: null, found: false }; - try { - const expireTime = tokenExpirations.get(tokenHash); - if (!expireTime || expireTime <= Date.now()) { - if (expireTime) { - tokenExpirations.delete(tokenHash); - try { - await db.del(tokenHash); - } catch (e) {} - } - return { data: null, found: false }; - } - - const data = await db.get(tokenHash); - return { data, found: true }; - } catch (err) { - if (err.code === 'LEVEL_NOT_FOUND') return { data: null, found: false }; - console.error('Error looking up token:', err); - throw err; - } -} - -async function closeTokenStore() { - if (db) await db.close(); -} - -function startCleanupTimer() { - // Cleanup expired data hourly - setInterval(() => { - cleanupExpiredData(); - }, 3600000); - // Cleanup expired challenges at the challenge expiration interval - const challengeInterval = checkpointConfig.ChallengeExpiration || 60000; - setInterval(() => { - cleanupExpiredChallenges(); - }, challengeInterval); -} - -function cleanupExpiredData() { - const now = Date.now(); - let count = 0; - - try { - for (const [nonce, ts] of usedNonces.entries()) { - if (now - ts > checkpointConfig.MaxNonceAge) { - usedNonces.delete(nonce); - count++; - } - } - if (count) console.log(`Checkpoint: cleaned up ${count} expired nonces.`); - } catch (err) { - console.error('Error cleaning up nonces:', err); - } - - // Clean up expired tokens from cache - let tokenCacheCount = 0; - try { - for (const [tokenKey, _] of tokenCache.entries()) { - const expireTime = tokenExpirations.get(tokenKey); - if (!expireTime || expireTime <= now) { - tokenCache.delete(tokenKey); - tokenExpirations.delete(tokenKey); - tokenCacheCount++; - } - } - if (tokenCacheCount) - console.log(`Checkpoint: cleaned up ${tokenCacheCount} expired tokens from cache.`); - } catch (err) { - console.error('Error cleaning up token cache:', err); - } - - try { - ipRateLimit.clear(); - console.log('Checkpoint: IP rate limits reset.'); - } catch (err) { - console.error('Error resetting IP rate limits:', err); - } -} - -function cleanupExpiredChallenges() { - const now = Date.now(); - let count = 0; - for (const [id, params] of challengeStore.entries()) { - if (params.ExpiresAt && params.ExpiresAt < now) { - // Record failure for expired challenges that were never completed - safeRecordEvent('checkpoint.failure', { - reason: 'challenge_expired', - ip: params.ClientIP, - challenge_id: id.substring(0, 8), // Include partial ID for debugging - age_ms: now - params.CreatedAt, // How long the challenge existed - expiry_ms: checkpointConfig.ChallengeExpiration, // Configured expiry time - }); - challengeStore.delete(id); - count++; - } - } - if (count) console.log(`Checkpoint: cleaned up ${count} expired challenges.`); -} - -async function initSecret() { - try { - if (!checkpointConfig.SecretConfigPath) { - checkpointConfig.SecretConfigPath = join(rootDir, 'data', 'checkpoint_secret.json'); - } - - const secretPath = checkpointConfig.SecretConfigPath; - const exists = fs.existsSync(secretPath); - - if (exists) { - const loaded = loadSecretFromFile(); - if (loaded) { - hmacSecret = loaded; - console.log(`Loaded existing HMAC secret from ${secretPath}`); - return; - } - } - - hmacSecret = crypto.randomBytes(32); - fs.mkdirSync(path.dirname(secretPath), { recursive: true }); - - const secretCfg = { - hmac_secret: hmacSecret.toString('base64'), - created_at: new Date().toISOString(), - updated_at: new Date().toISOString(), - }; - - fs.writeFileSync(secretPath, JSON.stringify(secretCfg), { mode: 0o600 }); - console.log(`Created and saved new HMAC secret to ${secretPath}`); - } catch (err) { - console.error('Error initializing secret:', err); - - hmacSecret = crypto.randomBytes(32); - } -} - -function loadSecretFromFile() { - try { - const data = fs.readFileSync(checkpointConfig.SecretConfigPath, 'utf8'); - const cfg = JSON.parse(data); - const buf = Buffer.from(cfg.hmac_secret, 'base64'); - if (buf.length < 16) return null; - - cfg.updated_at = new Date().toISOString(); - fs.writeFileSync(checkpointConfig.SecretConfigPath, JSON.stringify(cfg), { mode: 0o600 }); - return buf; - } catch (e) { - console.warn('Could not load HMAC secret from file:', e); - return null; - } -} - -(async function initialize() { - await initConfig(); - await initSecret(); - initTokenStore(); - startCleanupTimer(); - - // Only register plugin if enabled - if (checkpointConfig.Enabled !== false) { - registerPlugin('checkpoint', CheckpointMiddleware()); - } else { - console.log('Checkpoint plugin disabled via configuration'); - } -})(); - -export { checkpointConfig, addToken, updateTokenVerification, lookupTokenData, closeTokenStore }; diff --git a/config/behavioral-detection.toml.example b/config/behavioral-detection.toml.example new file mode 100644 index 0000000..d01eaf0 --- /dev/null +++ b/config/behavioral-detection.toml.example @@ -0,0 +1,197 @@ +# ============================================================================= +# BEHAVIORAL DETECTION CONFIGURATION - EXAMPLE +# ============================================================================= +# Copy this file to behavioral-detection.toml and customize for your environment +# ============================================================================= + +[Core] +# Enable or disable the behavioral detection engine +Enabled = true + +# Operation mode: "detect" (log only) or "prevent" (actively block/rate limit) +Mode = "prevent" + +# Default time window for metrics (milliseconds) +DefaultTimeWindow = 300000 # 5 minutes + +# Maximum request history to keep per IP +MaxHistoryPerIP = 1000 + +# Database cleanup interval (milliseconds) +CleanupInterval = 3600000 # 1 hour + +# ============================================================================= +# EXAMPLE DETECTION RULES +# ============================================================================= + +[[Rules]] +Name = "404 Path Enumeration" +Type = "enumeration" +Severity = "medium" +Description = "Detects rapid 404 responses indicating directory/file scanning" + + [[Rules.Triggers]] + Metric = "status_code_count" + StatusCode = 404 + Threshold = 15 + TimeWindow = 60000 # 1 minute + + [[Rules.Triggers]] + Metric = "unique_paths_by_status" + StatusCode = 404 + Threshold = 10 + TimeWindow = 60000 + + [Rules.Action] + Score = 30 + Tags = ["scanning", "enumeration", "reconnaissance"] + RateLimit = { Requests = 10, Window = 60000 } + Alert = false + +# Authentication bruteforce rule removed - not applicable for this security system + +[[Rules]] +Name = "API Endpoint Enumeration" +Type = "enumeration" +Severity = "medium" +Description = "Scanning for API endpoints" + + [[Rules.Triggers]] + Metric = "unique_api_paths" + PathPrefix = "/api/" + Threshold = 20 + TimeWindow = 60000 + + [[Rules.Triggers]] + Metric = "mixed_http_methods" + PathPrefix = "/api/" + MinMethods = 3 # GET, POST, PUT, DELETE, etc. + TimeWindow = 60000 + + [Rules.Action] + Score = 25 + Tags = ["api_abuse", "enumeration"] + RateLimit = { Requests = 20, Window = 60000 } + +[[Rules]] +Name = "Velocity-Based Scanner" +Type = "scanning" +Severity = "medium" +Description = "High-speed request patterns typical of automated scanners" + + [[Rules.Triggers]] + Metric = "request_velocity" + RequestsPerSecond = 10 + Duration = 5000 # Sustained for 5 seconds + + [[Rules.Triggers]] + Metric = "request_regularity" + MaxVariance = 0.1 # Very regular timing + MinRequests = 20 + + [Rules.Action] + Score = 35 + Tags = ["automated_scanner", "bot"] + Challenge = true # Show CAPTCHA or similar + +[[Rules]] +Name = "Admin Interface Probing" +Type = "reconnaissance" +Severity = "medium" +Description = "Attempts to find admin interfaces" + + [[Rules.Triggers]] + Metric = "path_status_combo" + PathPattern = "^/(wp-)?admin|^/administrator|^/manage|^/cpanel|^/phpmyadmin" + StatusCodes = [200, 301, 302, 403, 404] + Threshold = 5 + TimeWindow = 300000 + + [Rules.Action] + Score = 25 + Tags = ["admin_probe", "reconnaissance"] + RateLimit = { Requests = 5, Window = 300000 } + +# ============================================================================= +# CORRELATION RULES EXAMPLES +# ============================================================================= + +[[Correlations]] +Name = "Rotating User-Agent Attack" +Description = "Same IP using multiple user agents rapidly" + + [Correlations.Conditions] + Metric = "unique_user_agents_per_ip" + Threshold = 5 + TimeWindow = 60000 + + [Correlations.Action] + Score = 20 + Tags = ["evasion", "user_agent_rotation"] + +# ============================================================================= +# BEHAVIORAL THRESHOLDS +# ============================================================================= + +[Thresholds] +# Minimum score to trigger any action +MinActionScore = 20 + +# Score thresholds for different severity levels +LowSeverityThreshold = 20 +MediumSeverityThreshold = 40 +HighSeverityThreshold = 60 +CriticalSeverityThreshold = 80 + +# ============================================================================= +# WHITELISTING +# ============================================================================= + +[Whitelist] +# IPs that should never be blocked by behavioral rules +TrustedIPs = [ + "127.0.0.1", + "::1" + # Add your monitoring service IPs here +] + +# User agents to treat with lower sensitivity +TrustedUserAgents = [ + "Googlebot", + "bingbot", + "Slackbot", + "monitoring-bot" +] + +# Paths where higher thresholds apply +MonitoringPaths = [ + "/health", + "/metrics", + "/api/status", + "/.well-known/", + "/robots.txt", + "/sitemap.xml" +] + +# ============================================================================= +# RESPONSE CUSTOMIZATION +# ============================================================================= + +[Responses] +# Custom block message (can include HTML) +BlockMessage = """ + +Access Denied + +

Access Denied

+

Your access has been restricted due to suspicious activity.

+

If you believe this is an error, please contact support.

+ + +""" + +# Rate limit message +RateLimitMessage = "Rate limit exceeded. Please slow down your requests." + +# Challenge page URL (for CAPTCHA/verification) +ChallengePageURL = "/verify" \ No newline at end of file diff --git a/config/ipfilter.toml.example b/config/ipfilter.toml.example index 0ddc249..7736d9b 100644 --- a/config/ipfilter.toml.example +++ b/config/ipfilter.toml.example @@ -20,8 +20,8 @@ AccountID = "" # Can also be set via MAXMIND_LICENSE_KEY environment variable or .env file LicenseKey = "" -# How often to check for database updates (in hours) -DBUpdateIntervalHours = 12 +# How often to check for database updates (uses time.ts format: "24h", "5m", etc.) +DBUpdateInterval = "12h" # ----------------------------------------------------------------------------- # CACHING SETTINGS diff --git a/config/proxy.toml.example b/config/proxy.toml.example index 8ad2372..6b7e59d 100644 --- a/config/proxy.toml.example +++ b/config/proxy.toml.example @@ -12,6 +12,9 @@ # Enable or disable the proxy middleware Enabled = true +# Maximum body size in MB (default: 10MB if not specified) +MaxBodySizeMB = 10 + # ----------------------------------------------------------------------------- # TIMEOUT SETTINGS # ----------------------------------------------------------------------------- @@ -27,6 +30,8 @@ UpstreamTimeoutMs = 30000 # ----------------------------------------------------------------------------- # Map hostnames to backend service URLs # Format: "hostname" = "backend_url" +# Optional: AllowedMethods = ["GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH", "TRACE", "CONNECT"] +# If AllowedMethods is not specified, defaults to ["GET", "HEAD", "POST", "PUT", "OPTIONS"] # ----------------------------------------------------------------------------- [[Mapping]] @@ -44,12 +49,20 @@ Target = "http://192.168.1.100:4533" Host = "git.example.com" Target = "http://192.168.1.100:3000" -# [[Mapping]] -# API service -# Host = "api.example.com" -# Target = "http://localhost:3001" +[[Mapping]] +# Gallery service with DELETE method enabled +Host = "gallery.caileb.com" +Target = "http://192.168.1.100:8080" +AllowedMethods = ["GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS"] # [[Mapping]] -# Admin panel +# API service with specific methods +# Host = "api.example.com" +# Target = "http://localhost:3001" +# AllowedMethods = ["GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH"] + +# [[Mapping]] +# Admin panel (read-only) # Host = "admin.example.com" -# Target = "http://localhost:3002" \ No newline at end of file +# Target = "http://localhost:3002" +# AllowedMethods = ["GET", "HEAD", "OPTIONS"] \ No newline at end of file diff --git a/config/stats.toml.example b/config/stats.toml.example deleted file mode 100644 index b94fe3f..0000000 --- a/config/stats.toml.example +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# STATS CONFIGURATION -# ============================================================================= -# This configuration controls the statistics collection and visualization -# middleware that tracks events and provides a web UI for viewing metrics. -# ============================================================================= - -# ----------------------------------------------------------------------------- -# CORE SETTINGS -# ----------------------------------------------------------------------------- -[Core] -# Enable or disable the stats plugin -Enabled = true - -# ----------------------------------------------------------------------------- -# STORAGE SETTINGS -# ----------------------------------------------------------------------------- -[Storage] -# TTL for stats entries -# Format: "30d", "24h", "1h", etc. -StatsTTL = "30d" - -# ----------------------------------------------------------------------------- -# WEB UI SETTINGS -# ----------------------------------------------------------------------------- -[WebUI] -# Path for stats UI -StatsUIPath = "/stats" - -# Path for stats API -StatsAPIPath = "/stats/api" \ No newline at end of file diff --git a/config/threat-scoring.toml.example b/config/threat-scoring.toml.example new file mode 100644 index 0000000..bf850aa --- /dev/null +++ b/config/threat-scoring.toml.example @@ -0,0 +1,90 @@ +# ============================================================================= +# THREAT SCORING CONFIGURATION - EXAMPLE CONFIG +# ============================================================================= +# Copy this file to threat-scoring.toml and customize for your environment +# All included threat signals are fully implemented and tested + +[Core] +# Enable or disable threat scoring entirely +Enabled = true + +# Enable detailed logging of scoring decisions (for debugging) +LogDetailedScores = false + +[Thresholds] +# Score thresholds that determine the action taken for each request +# Scores are calculated from 0-100+ based on various threat signals + +# Requests with scores <= AllowThreshold are allowed through immediately +AllowThreshold = 15 # Conservative - allows more legitimate traffic + +# Requests with scores <= ChallengeThreshold receive a challenge (proof-of-work) +ChallengeThreshold = 80 # Much higher - blocking is absolute last resort + +# Requests with scores > ChallengeThreshold are blocked +BlockThreshold = 100 # Truly malicious content (javascript:, - - - - -
-

Checkpoint Service Statistics

- -
-
-
-
-
-
Total Hits
-
--
-
-
-
Total Passes
-
--
-
-
-
Total Failures
-
--
-
-
-
-
-
- - -
- -
-
-
- -
-
-
-
Checkpoint Hits
-
-
- - -
-
-
- -
-
Checkpoint Passes
-
-
- - -
-
-
- -
-
Checkpoint Failures
-
-
- - -
-
-
-
-
- -
Plugin stats below this line
- -
-
-

IPfilter Statistics

-
-
-
-
Top Blocked ASNs
-
-
-
No data available yet
-
-
-
- -
-
Top Blocked IPs
-
-
-
No data available yet
-
-
-
- -
-
Top Triggered Rules
-
-
-
No data available yet
-
-
-
-
-

Data retention period: 30 days

-
-
- - - - diff --git a/plugins/ipfilter.js b/plugins/ipfilter.js deleted file mode 100644 index e22b4b2..0000000 --- a/plugins/ipfilter.js +++ /dev/null @@ -1,469 +0,0 @@ -import { registerPlugin, loadConfig, rootDir } from '../index.js'; -import fs from 'fs'; -import { dirname, join } from 'path'; -import { fileURLToPath } from 'url'; -import maxmind from 'maxmind'; -import { AhoCorasick } from 'string-dsa'; -import { getRealIP } from '../utils/network.js'; -import { createGunzip } from 'zlib'; -import tarStream from 'tar-stream'; -import { Buffer } from 'buffer'; -import * as logs from '../utils/logs.js'; -import { recordEvent } from './stats.js'; - -const cfg = {}; -await loadConfig('ipfilter', cfg); - -// Map configuration to internal structure -const enabled = cfg.Core.Enabled; -const accountId = cfg.Core.AccountID || process.env.MAXMIND_ACCOUNT_ID; -const licenseKey = cfg.Core.LicenseKey || process.env.MAXMIND_LICENSE_KEY; -const dbUpdateInterval = cfg.Core.DBUpdateIntervalHours; - -const ipBlockCacheTTL = cfg.Cache.IPBlockCacheTTLSec * 1000; -const ipBlockCacheMaxEntries = cfg.Cache.IPBlockCacheMaxEntries; - -const blockedCountryCodes = new Set(cfg.Blocking.CountryCodes); -const blockedContinentCodes = new Set(cfg.Blocking.ContinentCodes); -const defaultBlockPage = cfg.Blocking.DefaultBlockPage; - -// Process ASN blocks -const blockedASNs = {}; -const asnGroupBlockPages = {}; -for (const [group, config] of Object.entries(cfg.ASN || {})) { - blockedASNs[group] = config.Numbers || []; - asnGroupBlockPages[group] = config.BlockPage; -} - -// Process ASN name blocks -const blockedASNNames = {}; -for (const [group, config] of Object.entries(cfg.ASNNames || {})) { - blockedASNNames[group] = config.Patterns || []; - if (config.BlockPage) { - asnGroupBlockPages[group] = config.BlockPage; - } -} - -const countryBlockPages = cfg.CountryBlockPages || {}; -const continentBlockPages = cfg.ContinentBlockPages || {}; - -const ipBlockCache = new Map(); - -const blockPageCache = new Map(); -async function loadBlockPage(filePath) { - if (!blockPageCache.has(filePath)) { - try { - const txt = await fs.promises.readFile(filePath, 'utf8'); - blockPageCache.set(filePath, txt); - } catch { - blockPageCache.set(filePath, null); - } - } - return blockPageCache.get(filePath); -} - -const __dirname = dirname(fileURLToPath(import.meta.url)); - -const geoIPCountryDBPath = join(rootDir, 'data/GeoLite2-Country.mmdb'); -const geoIPASNDBPath = join(rootDir, 'data/GeoLite2-ASN.mmdb'); -const updateTimestampPath = join(rootDir, 'data/ipfilter_update.json'); - -let geoipCountryReader, geoipASNReader; - -let isReloading = false; -let reloadLock = Promise.resolve(); - -async function getLastUpdateTimestamp() { - try { - if (fs.existsSync(updateTimestampPath)) { - const data = await fs.promises.readFile(updateTimestampPath, 'utf8'); - const json = JSON.parse(data); - return json.lastUpdated || 0; - } - } catch (err) { - logs.warn('ipfilter', `Failed to read last update timestamp: ${err}`); - } - return 0; -} - -async function saveUpdateTimestamp() { - try { - const timestamp = Date.now(); - await fs.promises.writeFile( - updateTimestampPath, - JSON.stringify({ lastUpdated: timestamp }), - 'utf8', - ); - return timestamp; - } catch (err) { - logs.error('ipfilter', `Failed to save update timestamp: ${err}`); - return Date.now(); - } -} - -// Ensure the update timestamp file exists on first run -if (!fs.existsSync(updateTimestampPath)) { - try { - await saveUpdateTimestamp(); - } catch (err) { - logs.error('ipfilter', `Failed to initialize update timestamp file: ${err}`); - } -} - -// Download GeoIP databases if missing -async function downloadGeoIPDatabases() { - if (!licenseKey || !accountId) { - logs.warn( - 'ipfilter', - 'No MaxMind credentials found; skipping GeoIP database download. Please set MAXMIND_ACCOUNT_ID and MAXMIND_LICENSE_KEY environment variables or add AccountID and LicenseKey to config/ipfilter.toml', - ); - return; - } - const editions = [ - { id: 'GeoLite2-Country', filePath: geoIPCountryDBPath }, - { id: 'GeoLite2-ASN', filePath: geoIPASNDBPath }, - ]; - for (const { id, filePath } of editions) { - if (!fs.existsSync(filePath)) { - logs.plugin('ipfilter', `Downloading ${id} database...`); - const url = `https://download.maxmind.com/app/geoip_download?edition_id=${id}&license_key=${licenseKey}&suffix=tar.gz`; - const res = await fetch(url); - if (!res.ok) { - logs.error( - 'ipfilter', - `Failed to download ${id} database: ${res.status} ${res.statusText}`, - ); - continue; - } - const tempTar = join(rootDir, 'data', `${id}.tar.gz`); - // write response body into a .tar.gz file - const arrayBuf = await res.arrayBuffer(); - await fs.promises.writeFile(tempTar, Buffer.from(arrayBuf)); - // extract .mmdb files from the downloaded tar.gz - const extract = tarStream.extract(); - extract.on('entry', (header, stream, next) => { - if (header.name.endsWith('.mmdb')) { - const filename = header.name.split('/').pop(); - const outPath = join(rootDir, 'data', filename); - const ws = fs.createWriteStream(outPath); - stream - .pipe(ws) - .on('finish', next) - .on('error', (err) => { - logs.error('ipfilter', `Extraction error: ${err}`); - next(); - }); - } else { - stream.resume(); - next(); - } - }); - await new Promise((resolve, reject) => { - fs.createReadStream(tempTar) - .pipe(createGunzip()) - .pipe(extract) - .on('finish', resolve) - .on('error', reject); - }); - await fs.promises.unlink(tempTar); - logs.plugin('ipfilter', `${id} database downloaded and extracted.`); - } - } -} - -await downloadGeoIPDatabases(); - -async function loadGeoDatabases() { - if (isReloading) { - await reloadLock; - return true; - } - - isReloading = true; - let lockResolve; - reloadLock = new Promise((resolve) => { - lockResolve = resolve; - }); - - try { - const countryStats = fs.statSync(geoIPCountryDBPath); - const asnStats = fs.statSync(geoIPASNDBPath); - - if (countryStats.size > 1024 && asnStats.size > 1024) { - logs.plugin('ipfilter', 'Initializing GeoIP databases from disk...'); - const newCountryReader = await maxmind.open(geoIPCountryDBPath); - const newASNReader = await maxmind.open(geoIPASNDBPath); - - try { - const testIP = '8.8.8.8'; - const countryTest = newCountryReader.get(testIP); - const asnTest = newASNReader.get(testIP); - - if (!countryTest || !asnTest) { - throw new Error('Database validation failed: test lookups returned empty results'); - } - } catch (validationErr) { - logs.error('ipfilter', `GeoIP database validation failed: ${validationErr}`); - - try { - await newCountryReader.close(); - } catch (e) {} - try { - await newASNReader.close(); - } catch (e) {} - throw new Error('Database validation failed'); - } - - const oldCountryReader = geoipCountryReader; - const oldASNReader = geoipASNReader; - - geoipCountryReader = newCountryReader; - geoipASNReader = newASNReader; - if (oldCountryReader || oldASNReader) { - logs.plugin('ipfilter', 'GeoIP databases reloaded and active'); - } else { - logs.plugin('ipfilter', 'GeoIP databases loaded and active'); - } - - ipBlockCache.clear(); - - await saveUpdateTimestamp(); - - if (oldCountryReader || oldASNReader) { - setTimeout(async () => { - if (oldCountryReader) { - try { - await oldCountryReader.close(); - } catch (e) {} - } - if (oldASNReader) { - try { - await oldASNReader.close(); - } catch (e) {} - } - logs.plugin('ipfilter', 'Old GeoIP database instances closed successfully'); - }, 5000); - } - - return true; - } else { - logs.warn( - 'ipfilter', - 'GeoIP database files are empty or too small. IP filtering will be disabled.', - ); - return false; - } - } catch (err) { - logs.error('ipfilter', `Failed to load GeoIP databases: ${err}`); - return false; - } finally { - isReloading = false; - lockResolve(); - } -} - -async function checkAndUpdateDatabases() { - if (isReloading) return false; - - const lastUpdate = await getLastUpdateTimestamp(); - const now = Date.now(); - const hoursSinceUpdate = (now - lastUpdate) / (1000 * 60 * 60); - - if (hoursSinceUpdate >= dbUpdateInterval) { - logs.plugin( - 'ipfilter', - `GeoIP databases last updated ${hoursSinceUpdate.toFixed(1)} hours ago, reloading...`, - ); - return await loadGeoDatabases(); - } - - return false; -} - -function startPeriodicDatabaseUpdates() { - // Calculate interval in milliseconds - const intervalMs = dbUpdateInterval * 60 * 60 * 1000; - - // Schedule periodic updates - setInterval(async () => { - try { - await checkAndUpdateDatabases(); - } catch (err) { - logs.error('ipfilter', `Failed during periodic database update: ${err}`); - } - }, intervalMs); - - logs.plugin('ipfilter', `Scheduled GeoIP database updates every ${dbUpdateInterval} hours`); -} - -await loadGeoDatabases(); - -startPeriodicDatabaseUpdates(); - -const asnNameMatchers = new Map(); -for (const [group, names] of Object.entries(blockedASNNames)) { - asnNameMatchers.set(group, new AhoCorasick(names)); -} - -function cacheAndReturn(ip, blocked, blockType, blockValue, customPage, asnOrgName) { - const expiresAt = Date.now() + ipBlockCacheTTL; - ipBlockCache.set(ip, { blocked, blockType, blockValue, customPage, asnOrgName, expiresAt }); - // Enforce maximum cache size - if (ipBlockCacheMaxEntries > 0 && ipBlockCache.size > ipBlockCacheMaxEntries) { - // Remove the oldest entry (first key in insertion order) - const oldestKey = ipBlockCache.keys().next().value; - ipBlockCache.delete(oldestKey); - } - return [blocked, blockType, blockValue, customPage, asnOrgName]; -} - -function isBlockedIPExtended(ip) { - const now = Date.now(); - const entry = ipBlockCache.get(ip); - if (entry) { - if (entry.expiresAt > now) { - // Refresh recency by re-inserting entry - ipBlockCache.delete(ip); - ipBlockCache.set(ip, entry); - return [entry.blocked, entry.blockType, entry.blockValue, entry.customPage, entry.asnOrgName]; - } else { - // Entry expired, remove it - ipBlockCache.delete(ip); - } - } - - const countryReader = geoipCountryReader; - const asnReader = geoipASNReader; - - if (!countryReader || !asnReader) { - return [false, '', '', '', '']; - } - - let countryInfo; - try { - countryInfo = countryReader.get(ip); - } catch (e) {} - if (countryInfo?.country && blockedCountryCodes.has(countryInfo.country.iso_code)) { - const page = countryBlockPages[countryInfo.country.iso_code] || defaultBlockPage; - return cacheAndReturn(ip, true, 'country', countryInfo.country.iso_code, page, ''); - } - - if (countryInfo?.continent && blockedContinentCodes.has(countryInfo.continent.code)) { - const page = continentBlockPages[countryInfo.continent.code] || defaultBlockPage; - return cacheAndReturn(ip, true, 'continent', countryInfo.continent.code, page, ''); - } - - let asnInfo; - try { - asnInfo = asnReader.get(ip); - } catch (e) {} - if (asnInfo?.autonomous_system_number) { - const asn = asnInfo.autonomous_system_number; - const orgName = asnInfo.autonomous_system_organization || ''; - - for (const [group, arr] of Object.entries(blockedASNs)) { - if (arr.includes(asn)) { - const page = asnGroupBlockPages[group] || defaultBlockPage; - return cacheAndReturn(ip, true, 'asn', group, page, orgName); - } - } - - for (const [group, matcher] of asnNameMatchers.entries()) { - const matches = matcher.find(orgName); - if (matches.length) { - const page = asnGroupBlockPages[group] || defaultBlockPage; - return cacheAndReturn(ip, true, 'asn', group, page, orgName); - } - } - } - - return cacheAndReturn(ip, false, '', '', '', ''); -} - -function IPBlockMiddleware() { - return { - middleware: async (req, res, next) => { - // Convert Express request to the format expected by ipfilter logic - const request = { - url: `${req.protocol}://${req.get('host')}${req.originalUrl}`, - headers: { - get: (name) => req.get(name), - entries: () => Object.entries(req.headers).map(([k, v]) => [k, Array.isArray(v) ? v.join(', ') : v]) - } - }; - - const clientIP = getRealIP(request); - logs.plugin('ipfilter', `Incoming request from IP: ${clientIP}`); - const [blocked, blockType, blockValue, customPage, asnOrgName] = isBlockedIPExtended(clientIP); - - if (blocked) { - recordEvent('ipfilter.block', { - type: blockType, - value: blockValue, - asn_org: asnOrgName, - ip: clientIP, // Include the IP address for stats - }); - const url = new URL(request.url); - - if (url.pathname.startsWith('/api')) { - return res.status(403).json({ - error: 'Access denied from your location or network.', - reason: 'geoip', - type: blockType, - value: blockValue, - asn_org: asnOrgName, - }); - } - - // Normalize page paths by stripping leading slash - const cleanCustomPage = customPage.replace(/^\/+/, ''); - const cleanDefaultPage = defaultBlockPage.replace(/^\/+/, ''); - - let html = ''; - logs.plugin( - 'ipfilter', - `Block pages: custom="${cleanCustomPage}", default="${cleanDefaultPage}"`, - ); - logs.plugin('ipfilter', 'Searching for block page in the following locations:'); - const paths = [ - // allow absolute paths relative to project root first - join(rootDir, cleanCustomPage), - ]; - // Fallback to default block page if custom page isn't found - if (customPage !== defaultBlockPage) { - paths.push( - // check default page at root directory - join(rootDir, cleanDefaultPage), - ); - } - - for (const p of paths) { - logs.plugin('ipfilter', `Trying block page at: ${p}`); - const content = await loadBlockPage(p); - logs.plugin('ipfilter', `Load result for ${p}: ${content ? 'FOUND' : 'NOT FOUND'}`); - if (content) { - html = content; - break; - } - } - - if (html) { - const output = html.replace('{{.ASNName}}', asnOrgName || 'Blocked Network'); - return res.status(403).type('html').send(output); - } else { - return res.status(403).type('text').send('Access denied from your location or network.'); - } - } - - return next(); - } - }; -} - -if (enabled) { - registerPlugin('ipfilter', IPBlockMiddleware()); -} else { - logs.plugin('ipfilter', 'IP filter plugin disabled via config'); -} - -export { checkAndUpdateDatabases, loadGeoDatabases }; diff --git a/plugins/proxy.js b/plugins/proxy.js deleted file mode 100644 index f842d29..0000000 --- a/plugins/proxy.js +++ /dev/null @@ -1,116 +0,0 @@ -import { registerPlugin, loadConfig } from '../index.js'; -import * as logs from '../utils/logs.js'; -import { createProxyMiddleware } from 'http-proxy-middleware'; -import express from 'express'; -import { fileURLToPath } from 'url'; -import { dirname } from 'path'; -import { createRequire } from 'module'; - -// Setup require for ESM modules -const __filename = fileURLToPath(import.meta.url); -const __dirname = dirname(__filename); -const require = createRequire(import.meta.url); - -// Monkey patch the ws module to prevent "write after end" errors -// Based on https://stackoverflow.com/questions/27769842/write-after-end-error-in-node-js-webserver/33591429 -try { - const ws = require('ws'); - const originalClose = ws.Sender.prototype.close; - - // Override the close method to check if the socket is already closed - ws.Sender.prototype.close = function(code, data, mask, cb) { - if (this._socket && (this._socket.destroyed || !this._socket.writable)) { - logs.plugin('proxy', 'WebSocket close called on already closed socket - ignoring'); - if (typeof cb === 'function') cb(); - return; - } - return originalClose.call(this, code, data, mask, cb); - }; - logs.plugin('proxy', 'Monkey patched ws module to prevent write after end errors'); -} catch (err) { - logs.error('proxy', `Failed to monkey patch ws module: ${err.message}`); -} - -const proxyConfig = {}; -await loadConfig('proxy', proxyConfig); - -const enabled = proxyConfig.Core.Enabled; -const upstreamTimeout = proxyConfig.Timeouts.UpstreamTimeoutMs; - -const proxyMappings = {}; -proxyConfig.Mapping.forEach(mapping => { - proxyMappings[mapping.Host] = mapping.Target; -}); - -logs.plugin('proxy', `Proxy mappings loaded: ${JSON.stringify(proxyMappings)}`); - -// Store for http-proxy-middleware instances -const hpmInstances = {}; - -function createProxyForHost(target) { - const proxyOptions = { - target, - changeOrigin: true, - ws: true, - logLevel: 'info', - timeout: upstreamTimeout, - onError: (err, req, res, _target) => { - const targetInfo = _target && _target.href ? _target.href : (typeof _target === 'string' ? _target : 'N/A'); - logs.error('proxy', `[HPM onError] Proxy error for ${req.method} ${req.url} to ${targetInfo}: ${err.message} (Code: ${err.code || 'N/A'})`); - if (res && typeof res.writeHead === 'function') { - if (!res.headersSent) { - res.writeHead(502, { 'Content-Type': 'text/plain' }); - res.end('Bad Gateway'); - } else if (typeof res.destroy === 'function' && !res.destroyed) { - res.destroy(); - } - } else if (res && typeof res.end === 'function' && res.writable && !res.destroyed) { - logs.plugin('proxy', `[HPM onError] Client WebSocket socket for ${req.url} attempting to end due to proxy error: ${err.message}.`); - res.end(); - } - }, - followRedirects: false, - preserveHeaderKeyCase: true, - autoRewrite: true, - protocolRewrite: 'http', - cookieDomainRewrite: { "*": "" } - }; - - return createProxyMiddleware(proxyOptions); -} - -function proxyMiddleware() { - const router = express.Router(); - - router.use('/api/challenge', (req, res, next) => next('route')); - router.use('/api/verify', (req, res, next) => next('route')); - router.use('/webfont/', (req, res, next) => next('route')); - router.use('/js/', (req, res, next) => next('route')); - - Object.entries(proxyMappings).forEach(([host, target]) => { - hpmInstances[host] = createProxyForHost(target); - }); - - router.use((req, res, next) => { - const hostname = req.hostname || req.headers.host?.split(':')[0]; - const proxyInstance = hpmInstances[hostname]; - - if (proxyInstance) { - proxyInstance(req, res, next); - } else { - next(); - } - }); - - return { middleware: router }; -} - -export function getHpmInstance(hostname) { - return hpmInstances[hostname]; -} - -if (enabled) { - registerPlugin('proxy', proxyMiddleware()); -} else { - logs.plugin('proxy', 'Proxy plugin disabled via config'); -} diff --git a/plugins/stats.js b/plugins/stats.js deleted file mode 100644 index 62eec29..0000000 --- a/plugins/stats.js +++ /dev/null @@ -1,134 +0,0 @@ -import { registerPlugin, rootDir, loadConfig } from '../index.js'; -import { Level } from 'level'; -import ttl from 'level-ttl'; -import fs from 'fs/promises'; -import path from 'path'; -import { fileURLToPath } from 'url'; -import { Readable } from 'stream'; -import cookie from 'cookie'; -import { getRealIP } from '../utils/network.js'; -import { parseDuration } from '../utils/time.js'; - -// Load stats configuration -const statsConfig = {}; -await loadConfig('stats', statsConfig); - -// Map configuration to internal structure -const enabled = statsConfig.Core.Enabled; -const statsTTL = parseDuration(statsConfig.Storage.StatsTTL); -const statsUIPath = statsConfig.WebUI.StatsUIPath; -const statsAPIPath = statsConfig.WebUI.StatsAPIPath; - -// Determine __dirname for ES modules -const __dirname = path.dirname(fileURLToPath(import.meta.url)); - -/** - * Adds createReadStream support to LevelDB instances using async iterator. - */ -function addReadStreamSupport(dbInstance) { - if (!dbInstance.createReadStream) { - dbInstance.createReadStream = (opts) => - Readable.from( - (async function* () { - for await (const [key, value] of dbInstance.iterator(opts)) { - yield { key, value }; - } - })(), - ); - } - return dbInstance; -} - -// Initialize LevelDB for stats under db/stats with TTL and stream support -const statsDBPath = path.join(rootDir, 'db', 'stats'); -await fs.mkdir(statsDBPath, { recursive: true }); -let rawStatsDB = new Level(statsDBPath, { valueEncoding: 'json' }); -rawStatsDB = addReadStreamSupport(rawStatsDB); -const statsDB = ttl(rawStatsDB, { defaultTTL: statsTTL }); -addReadStreamSupport(statsDB); - -/** - * Record a stat event with a metric name and optional data. - * @param {string} metric - * @param {object} data - */ -function recordEvent(metric, data = {}) { - // Skip if statsDB is not initialized - if (typeof statsDB === 'undefined' || !statsDB || typeof statsDB.put !== 'function') { - console.warn(`stats: cannot record "${metric}", statsDB not available`); - return; - } - const timestamp = Date.now(); - // key includes metric and timestamp and a random suffix to avoid collisions - const key = `${metric}:${timestamp}:${Math.random().toString(36).slice(2, 8)}`; - try { - // Use callback form to avoid promise chaining - statsDB.put(key, { timestamp, metric, ...data }, (err) => { - if (err) console.error('stats: failed to record event', err); - }); - } catch (err) { - console.error('stats: failed to record event', err); - } -} - -// Handler for serving the stats HTML UI -async function handleStatsPage(req, res) { - const url = new URL(`${req.protocol}://${req.get('host')}${req.originalUrl}`); - if (url.pathname !== statsUIPath) return false; - try { - // Load the stats UI from pages/stats/stats.html in the project root - const statsHtmlPath = path.join(rootDir, 'pages', 'stats', 'stats.html'); - const html = await fs.readFile(statsHtmlPath, 'utf8'); - res.status(200).type('html').send(html); - return true; - } catch (e) { - res.status(404).send('Stats UI not found'); - return true; - } -} - -// Handler for stats API -async function handleStatsAPI(req, res) { - const url = new URL(`${req.protocol}://${req.get('host')}${req.originalUrl}`); - if (url.pathname !== statsAPIPath) return false; - const metric = url.searchParams.get('metric'); - const start = parseInt(url.searchParams.get('start') || '0', 10); - const end = parseInt(url.searchParams.get('end') || `${Date.now()}`, 10); - const result = []; - // Iterate over keys for this metric in the time range - for await (const [key, value] of statsDB.iterator({ - gte: `${metric}:${start}`, - lte: `${metric}:${end}\uffff`, - })) { - result.push(value); - } - res.status(200).json(result); - return true; -} - -// Middleware for stats plugin -function StatsMiddleware() { - return { - middleware: async (req, res, next) => { - // Always serve stats UI and API first, bypassing auth - const pageHandled = await handleStatsPage(req, res); - if (pageHandled) return; - - const apiHandled = await handleStatsAPI(req, res); - if (apiHandled) return; - - // For any other routes, do not handle - return next(); - } - }; -} - -// Register the stats plugin -if (enabled) { - registerPlugin('stats', StatsMiddleware()); -} else { - console.log('Stats plugin disabled via config'); -} - -// Export recordEvent for other plugins to use -export { recordEvent }; diff --git a/src/checkpoint.ts b/src/checkpoint.ts new file mode 100644 index 0000000..2221b17 --- /dev/null +++ b/src/checkpoint.ts @@ -0,0 +1,1452 @@ +import { registerPlugin, loadConfig, rootDir } from './index.js'; +import * as crypto from 'crypto'; +import * as path from 'path'; +import * as fs from 'fs'; +import { promises as fsPromises } from 'fs'; +import { dirname, join } from 'path'; +import { fileURLToPath } from 'url'; +import { Level } from 'level'; +import * as cookie from 'cookie'; +import { parseDuration } from './utils/time.js'; +import { getRealIP, getRequestURL, type NetworkRequest } from './utils/network.js'; +// @ts-ignore - level-ttl doesn't have TypeScript definitions +import ttl from 'level-ttl'; +import { Readable } from 'stream'; +import { + challengeStore, + generateRequestID as proofGenerateRequestID, + getChallengeParams, + deleteChallenge, + verifyPoW, + verifyPoS, +} from './utils/proof.js'; +import * as express from 'express'; +import { Request, Response as ExpressResponse, NextFunction, Router } from 'express'; +import { threatScorer } from './utils/threat-scoring.js'; +import * as logs from './utils/logs.js'; + +// Pre-computed durations for cleanup timers (avoid parsing overhead) +const CLEANUP_TIMER_INTERVAL = parseDuration('1h'); +const DEFAULT_CHALLENGE_INTERVAL = parseDuration('1m'); + +// ==================== TYPE DEFINITIONS ==================== + +interface CheckpointExclusionRule { + readonly Path: string; + readonly Hosts?: readonly string[]; + readonly UserAgents?: readonly string[]; +} + +interface CheckpointBypassKey { + readonly Type: 'query' | 'header'; + readonly Key: string; + readonly Value: string; + readonly Hosts?: readonly string[]; +} + +interface ProcessedCheckpointConfig extends Record { + Enabled: boolean; + CookieName: string; + CookieDomain?: string; + SanitizeURLs?: boolean; + ThreatScoringEnabled: boolean; + AllowThreshold: number; + ChallengeThreshold: number; + BlockThreshold: number; + Difficulty: number; + SaltLength: number; + ChallengeExpiration: number; + MaxAttemptsPerHour: number; + CheckPoSTimes: boolean; + PoSTimeConsistencyRatio?: number; + TokenExpiration: number; + MaxNonceAge: number; + SecretConfigPath?: string; + TokenStoreDBPath?: string; + InterstitialPaths: readonly string[]; + ExclusionRules: readonly CheckpointExclusionRule[]; + BypassQueryKeys: readonly CheckpointBypassQueryKey[]; + BypassHeaderKeys: readonly CheckpointBypassHeaderKey[]; + HTMLCheckpointIncludedExtensions: readonly string[]; + HTMLCheckpointExcludedExtensions: readonly string[]; + BypassKeys?: readonly CheckpointBypassKey[]; +} + +interface CheckpointBypassQueryKey { + readonly Key: string; + readonly Value: string; + readonly Domains: readonly string[]; +} + +interface CheckpointBypassHeaderKey { + readonly Name: string; + readonly Value: string; + readonly Domains: readonly string[]; +} + +interface CheckpointToken { + readonly Nonce: string; + readonly ExpiresAt: Date; + readonly ClientIP: string; + readonly UserAgent: string; + readonly BrowserHint: string; + readonly Entropy: string; + readonly Created: Date; + readonly LastVerified: Date; + readonly TokenFormat: number; + readonly Signature?: string; +} + +interface TokenValidationResult { + readonly valid: boolean; + readonly reason?: string; + readonly created?: Date; +} + +interface SecretConfig { + readonly hmac_secret: string; + readonly created_at: string; + readonly updated_at: string; +} + +interface TemplateData extends Record { + readonly TargetPath: string; + readonly RequestID: string; + readonly Host: string; + readonly FullURL: string; + readonly ThreatScore: number; + readonly ThreatLevel: string; + readonly ChallengeType: string; + readonly EstimatedTime: string; +} + +interface BypassKeyResult { + readonly type: 'query' | 'header'; + readonly key: string; +} + +// Extend Express types +declare global { + namespace Express { + interface Request { + geoData?: { + country?: string; + continent?: string; + latitude?: number; + longitude?: number; + } | null; + wafSignals?: Record; + } + + interface Locals { + geoData?: { + country?: string; + continent?: string; + latitude?: number; + longitude?: number; + } | null; + wafSignals?: Record; + } + } +} + +// ==================== UTILITY FUNCTIONS ==================== + +function sanitizePath(inputPath: unknown): string { + if (typeof inputPath !== 'string') { + return '/'; + } + + let pathOnly = inputPath.replace(/[\x00-\x1F\x7F]/g, ''); + pathOnly = pathOnly.replace(/[<>;"'`|]/g, ''); + const parts = pathOnly.split('/').filter((seg) => seg && seg !== '.' && seg !== '..'); + return '/' + parts.map((seg) => encodeURIComponent(seg)).join('/'); +} + +// ==================== LIMITED MAP CLASS ==================== + +class LimitedMap { + private readonly maxSize: number; + private readonly map = new Map(); + + constructor(maxSize = 10000) { + this.maxSize = Math.max(1, Math.floor(maxSize)); + } + + set(key: K, value: V): void { + // Delete oldest entries if at capacity + if (this.map.size >= this.maxSize) { + const firstKey = this.map.keys().next().value; + if (firstKey !== undefined) { + this.map.delete(firstKey); + } + } + // Delete and re-add to move to end (LRU) + this.map.delete(key); + this.map.set(key, value); + } + + get(key: K): V | undefined { + const value = this.map.get(key); + if (value !== undefined) { + // Move to end on access (LRU) + this.map.delete(key); + this.map.set(key, value); + } + return value; + } + + has(key: K): boolean { + return this.map.has(key); + } + + delete(key: K): boolean { + return this.map.delete(key); + } + + clear(): void { + this.map.clear(); + } + + get size(): number { + return this.map.size; + } + + entries(): IterableIterator<[K, V]> { + return this.map.entries(); + } + + [Symbol.iterator](): IterableIterator<[K, V]> { + return this.map[Symbol.iterator](); + } +} + +// ==================== GLOBAL STATE ==================== + +const checkpointConfig: ProcessedCheckpointConfig = {} as ProcessedCheckpointConfig; +let hmacSecret: Buffer | null = null; + +const usedNonces = new LimitedMap(5000); +const ipRateLimit = new LimitedMap(10000); +const tokenCache = new LimitedMap(20000); +let db: any; +const tokenExpirations = new LimitedMap(20000); +let interstitialTemplate: ((data: TemplateData) => string) | null = null; + +// @ts-ignore - __dirname equivalent for ES modules +const _dirname = dirname(fileURLToPath(import.meta.url)); + +// ==================== TEMPLATE FUNCTION ==================== + +function simpleTemplate>(str: string): (data: T) => string { + return function (data: T): string { + return str.replace(/\{\{\s*([^{}]+?)\s*\}\}/g, (_, key: string) => { + let value: unknown = data; + for (const part of key.trim().split('.')) { + if (value && typeof value === 'object' && part in value) { + value = (value as Record)[part]; + } else { + value = undefined; + break; + } + } + return value != null ? String(value) : ''; + }); + }; +} + +// ==================== CONFIGURATION INITIALIZATION ==================== + +async function initConfig(): Promise { + await loadConfig('checkpoint', checkpointConfig as Record); + + const config = checkpointConfig as any; + + // Handle new nested configuration structure + checkpointConfig.Enabled = config.Core?.Enabled ?? false; + checkpointConfig.CookieName = config.Core?.CookieName ?? '__checkpoint'; + checkpointConfig.CookieDomain = config.Core?.CookieDomain; + checkpointConfig.SanitizeURLs = config.Core?.SanitizeURLs ?? true; + + // Threat scoring settings - require configuration + checkpointConfig.ThreatScoringEnabled = config.ThreatScoring?.Enabled ?? true; + + // Threat thresholds must be configured - no defaults provided + if (checkpointConfig.ThreatScoringEnabled) { + if (config.ThreatScoring?.AllowThreshold === undefined || config.ThreatScoring?.ChallengeThreshold === undefined || config.ThreatScoring?.BlockThreshold === undefined) { + throw new Error('Threat scoring enabled but thresholds not configured. Please set AllowThreshold, ChallengeThreshold, and BlockThreshold in config file.'); + } + checkpointConfig.AllowThreshold = config.ThreatScoring.AllowThreshold; + checkpointConfig.ChallengeThreshold = config.ThreatScoring.ChallengeThreshold; + checkpointConfig.BlockThreshold = config.ThreatScoring.BlockThreshold; + } else { + // Safe defaults when threat scoring is disabled + checkpointConfig.AllowThreshold = 0; + checkpointConfig.ChallengeThreshold = 50; + checkpointConfig.BlockThreshold = 100; + } + + // Proof of Work settings + checkpointConfig.Difficulty = config.ProofOfWork?.Difficulty ?? 16; + checkpointConfig.SaltLength = config.ProofOfWork?.SaltLength ?? 32; + checkpointConfig.ChallengeExpiration = parseDuration( + config.ProofOfWork?.ChallengeExpiration ?? '5m', + ); + checkpointConfig.MaxAttemptsPerHour = config.ProofOfWork?.MaxAttemptsPerHour ?? 10; + + // Proof of Space-Time settings + checkpointConfig.CheckPoSTimes = config.ProofOfSpaceTime?.Enabled ?? false; + checkpointConfig.PoSTimeConsistencyRatio = config.ProofOfSpaceTime?.ConsistencyRatio; + + // Token settings + checkpointConfig.TokenExpiration = parseDuration(config.Token?.Expiration ?? '24h'); + checkpointConfig.MaxNonceAge = parseDuration(config.Token?.MaxNonceAge ?? '1h'); + + // Storage settings + checkpointConfig.SecretConfigPath = config.Storage?.SecretPath; + checkpointConfig.TokenStoreDBPath = config.Storage?.TokenDBPath; + checkpointConfig.InterstitialPaths = config.Storage?.InterstitialTemplates ?? ['pages/interstitial/index.html']; + + // Process exclusions + checkpointConfig.ExclusionRules = config.Exclusion || []; + + // Process bypass keys + checkpointConfig.BypassQueryKeys = []; + checkpointConfig.BypassHeaderKeys = []; + + if (Array.isArray(config.BypassKeys)) { + config.BypassKeys.forEach((key: any) => { + if (key && typeof key === 'object') { + if (key.Type === 'query') { + (checkpointConfig.BypassQueryKeys as CheckpointBypassQueryKey[]).push({ + Key: String(key.Key || ''), + Value: String(key.Value || ''), + Domains: Array.isArray(key.Hosts) ? key.Hosts.filter((h: unknown) => typeof h === 'string') : [], + }); + } else if (key.Type === 'header') { + (checkpointConfig.BypassHeaderKeys as CheckpointBypassHeaderKey[]).push({ + Name: String(key.Key || ''), + Value: String(key.Value || ''), + Domains: Array.isArray(key.Hosts) ? key.Hosts.filter((h: unknown) => typeof h === 'string') : [], + }); + } + } + }); + } + + // Extension handling + checkpointConfig.HTMLCheckpointIncludedExtensions = config.Extensions?.IncludeOnly || []; + checkpointConfig.HTMLCheckpointExcludedExtensions = config.Extensions?.Exclude || []; + + // Store original BypassKeys for compatibility + checkpointConfig.BypassKeys = config.BypassKeys || []; +} + +// ==================== DATABASE UTILITIES ==================== + +function addReadStreamSupport(dbInstance: any): any { + if (!dbInstance.createReadStream) { + dbInstance.createReadStream = (opts?: unknown) => + Readable.from( + (async function* () { + for await (const [key, value] of dbInstance.iterator(opts)) { + yield { key, value }; + } + })(), + ); + } + return dbInstance; +} + +async function initTokenStore(): Promise { + try { + const storePath = join(rootDir, checkpointConfig.TokenStoreDBPath || 'db/tokenstore'); + await fsPromises.mkdir(storePath, { recursive: true }); + + // Use optimized Level options for better performance + const levelOptions = { + valueEncoding: 'json', + cacheSize: 16 * 1024 * 1024, // 16MB cache + blockSize: 4096, + writeBufferSize: 4 * 1024 * 1024, // 4MB write buffer + compression: true, // Enable compression to save disk space + maxOpenFiles: 1000 + }; + + let rawDB = new Level(storePath, levelOptions); + addReadStreamSupport(rawDB); + + // Ensure database is opened before wrapping with TTL + await rawDB.open(); + + db = ttl(rawDB, { defaultTTL: checkpointConfig.TokenExpiration }); + addReadStreamSupport(db); + + logs.plugin('checkpoint', 'Token store initialized with TTL and optimized settings'); + return db; + } catch (err) { + const error = err as Error; + console.error('Failed to initialize token store:', error); + // CRITICAL: If token store fails to initialize, the system cannot track valid tokens + // This will cause all users to be repeatedly challenged. Ensure the db directory + // has proper write permissions and sufficient disk space. + throw new Error(`Token store initialization failed: ${error.message}`); + } +} + +// ==================== CLIENT IDENTIFICATION ==================== + +function getFullClientIP(request: NetworkRequest): string { + const ip = getRealIP(request) || ''; + const h = crypto.createHash('sha256').update(ip).digest(); + return h.slice(0, 8).toString('hex'); +} + +function hashUserAgent(ua: unknown): string { + if (typeof ua !== 'string' || !ua) return ''; + const h = crypto.createHash('sha256').update(ua).digest(); + return h.slice(0, 8).toString('hex'); +} + +function extractBrowserFingerprint(request: NetworkRequest): string { + const headers = [ + 'sec-ch-ua', + 'sec-ch-ua-platform', + 'sec-ch-ua-mobile', + 'sec-ch-ua-platform-version', + 'sec-ch-ua-arch', + 'sec-ch-ua-model', + ]; + + // Handle both Express and fetch-style headers + const parts = headers.map((h) => { + if (request.headers && typeof request.headers.get === 'function') { + return request.headers.get(h); + } + if (request.headers && typeof request.headers === 'object') { + return (request.headers as Record)[h]; + } + return null; + }).filter((part): part is string => typeof part === 'string' && part.length > 0); + + if (!parts.length) return ''; + const buf = Buffer.from(parts.join('|')); + const h = crypto.createHash('sha256').update(buf).digest(); + return h.slice(0, 12).toString('hex'); +} + +// ==================== TEMPLATE MANAGEMENT ==================== + +async function getInterstitialTemplate(): Promise<(data: TemplateData) => string> { + if (!interstitialTemplate) { + for (const p of checkpointConfig.InterstitialPaths) { + try { + // Always use rootDir (project root) for template paths + let templatePath = join(rootDir, p); + if (fs.existsSync(templatePath)) { + const raw = await fsPromises.readFile(templatePath, 'utf8'); + interstitialTemplate = simpleTemplate(raw); + break; + } + } catch (e) { + const error = e as Error; + console.warn(`Failed to load interstitial template from path ${p}:`, error); + } + } + + if (!interstitialTemplate) { + console.warn('Could not find interstitial HTML template, using minimal fallback'); + interstitialTemplate = simpleTemplate(` + + + + Security Verification + + + + +

Security Verification Required

+

Please wait while we verify your request...

+
+
+ + + + `) as (data: TemplateData) => string; + } + } + return interstitialTemplate; +} + +// ==================== RESPONSE GENERATION ==================== + +async function serveInterstitial(request: NetworkRequest, threatScore = 0): Promise { + const ip = getRealIP(request); + const url = getRequestURL(request); + const requestPath = url?.pathname || (request as any).url || '/'; + console.log(`CHECKPOINT: Serving interstitial for ${ip} to ${requestPath} with threat score ${threatScore}`); + + let tpl: (data: TemplateData) => string; + try { + tpl = await getInterstitialTemplate(); + } catch (err) { + console.error('Interstitial template error:', err); + return new Response('Security verification required.', { + status: 200, + headers: { 'Content-Type': 'text/plain' }, + }); + } + + const requestID = proofGenerateRequestID(request, checkpointConfig); + + // Handle headers in a type-safe way + let host = 'localhost'; + if (request.headers) { + if (typeof request.headers.get === 'function') { + host = request.headers.get('host') || request.headers.get('x-forwarded-host') || host; + } else if (typeof request.headers === 'object') { + const headers = request.headers as Record; + host = String(headers.host || headers['x-forwarded-host'] || host); + } + } + + const targetPath = url?.pathname || (request as any).url || '/'; + const fullURL = url ? url.toString() : `http://${host}${(request as any).url || '/'}`; + + // Enhanced template data with threat information + const templateData: TemplateData = { + TargetPath: targetPath, + RequestID: requestID, + Host: host, + FullURL: fullURL, + ThreatScore: threatScore, + ThreatLevel: getThreatLevel(threatScore), + ChallengeType: threatScore > 60 ? 'advanced' : 'standard', + EstimatedTime: threatScore > 60 ? '10-15' : '5-10' + }; + + const html = tpl(templateData); + + return new Response(html, { + status: 200, + headers: { + 'Content-Type': 'text/html; charset=utf-8', + 'Cache-Control': 'no-cache, no-store, must-revalidate', + 'X-Checkpoint-Score': String(threatScore) + }, + }); +} + +async function serveBlockPage(request: NetworkRequest, threatScore = 100, signals: Record = {}): Promise { + const ip = getRealIP(request); + const url = getRequestURL(request); + const requestPath = url?.pathname || (request as any).url || '/'; + console.log(`CHECKPOINT: Serving block page for ${ip} to ${requestPath} with threat score ${threatScore}`); + + // Enhanced block page with more information + const blockHtml = ` + + + + Access Denied - Security System + + + + + +
+
+
+ + + +
+ +

Access Denied

+

Your request has been blocked by our security system.

+ +
+ Security Assessment +
${threatScore}/100
+
+ ${Object.keys(signals).length > 0 ? + Object.keys(signals).map(signal => + `${formatSignalName(signal)}` + ).join('') : + 'Multiple security violations detected' + } +
+
+ +
+

If you believe this is an error, please contact support with the following information:

+

Request ID: ${crypto.randomBytes(8).toString('hex').toUpperCase()}

+

Timestamp: ${new Date().toISOString()}

+
+
+
+ + + `; + + return new Response(blockHtml, { + status: 403, + headers: { + 'Content-Type': 'text/html; charset=utf-8', + 'X-Checkpoint-Block': 'true', + 'X-Checkpoint-Score': String(threatScore) + }, + }); +} + +// Helper function to format signal names for display +function formatSignalName(signal: string): string { + const formatMap: Record = { + 'sql_injection': 'SQL Injection Attempt', + 'xss_attempt': 'Cross-Site Scripting', + 'command_injection': 'Command Injection', + 'path_traversal': 'Path Traversal', + 'attack_tool_ua': 'Attack Tool Detected', + 'missing_ua': 'Missing User Agent' + }; + + return formatMap[signal] || signal.replace(/_/g, ' ').replace(/\b\w/g, l => l.toUpperCase()); +} + +// Helper function to get threat level description +function getThreatLevel(score: number): string { + if (score >= 90) return 'Critical'; + if (score >= 75) return 'High'; + if (score >= 50) return 'Medium'; + if (score >= 25) return 'Low'; + return 'Minimal'; +} + +// ==================== CHALLENGE HANDLING ==================== + +async function handleGetCheckpointChallenge(request: Request | NetworkRequest): Promise { + const url = getRequestURL(request); + const requestID = url?.searchParams?.get('id'); + if (!requestID) { + return new Response(JSON.stringify({ error: 'Missing request ID' }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + + const ip = getRealIP(request); + const attempts = (ipRateLimit.get(ip) || 0) + 1; + ipRateLimit.set(ip, attempts); + + if (attempts > checkpointConfig.MaxAttemptsPerHour) { + return new Response( + JSON.stringify({ error: 'Too many challenge requests. Try again later.' }), + { + status: 429, + headers: { 'Content-Type': 'application/json' }, + }, + ); + } + + const params = getChallengeParams(requestID); + if (!params) { + return new Response(JSON.stringify({ error: 'Challenge not found or expired' }), { + status: 404, + headers: { 'Content-Type': 'application/json' }, + }); + } + + if (ip !== params.ClientIP) { + return new Response(JSON.stringify({ error: 'IP address mismatch for challenge' }), { + status: 403, + headers: { 'Content-Type': 'application/json' }, + }); + } + + const payload = { + a: params.Challenge, + b: params.Salt, + c: params.Difficulty, + d: params.PoSSeed, + }; + return new Response(JSON.stringify(payload), { + status: 200, + headers: { 'Content-Type': 'application/json' }, + }); +} + +// ==================== TOKEN MANAGEMENT ==================== + +function computeTokenSignature(token: Omit): string { + const copy = { ...token, Signature: '' }; + const serialized = JSON.stringify(copy); + if (!hmacSecret) { + throw new Error('HMAC secret not initialized'); + } + return crypto.createHmac('sha256', hmacSecret).update(serialized).digest('hex'); +} + +function verifyTokenSignature(token: CheckpointToken): boolean { + if (!token.Signature) return false; + try { + const expected = computeTokenSignature(token); + return crypto.timingSafeEqual( + Buffer.from(token.Signature, 'hex'), + Buffer.from(expected, 'hex'), + ); + } catch (e) { + return false; + } +} + +// ==================== VERIFICATION HANDLING ==================== + +async function handleVerifyCheckpoint(request: Request | NetworkRequest): Promise { + let body: any; + if (typeof (request as any).json === 'function') { + // Fetch-style Request + try { + body = await (request as any).json(); + } catch (e) { + return new Response(JSON.stringify({ error: 'Invalid JSON' }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + } else if ((request as any).body) { + // Express Request with parsed JSON body + body = (request as any).body; + } else { + return new Response(JSON.stringify({ error: 'Invalid JSON' }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + + const ip = getRealIP(request); + const params = getChallengeParams(body.request_id); + + if (!params) { + return new Response(JSON.stringify({ error: 'Invalid or expired request ID' }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + + if (ip !== params.ClientIP) { + return new Response(JSON.stringify({ error: 'IP address mismatch' }), { + status: 403, + headers: { 'Content-Type': 'application/json' }, + }); + } + + const challenge = params.Challenge; + const salt = params.Salt; + + if (!body.g || !verifyPoW(challenge, salt, body.g, params.Difficulty)) { + return new Response(JSON.stringify({ error: 'Invalid proof-of-work solution' }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + + const nonceKey = body.g + challenge; + usedNonces.set(nonceKey, Date.now()); + + if (body.h?.length === 3 && body.i?.length === 3) { + try { + verifyPoS(body.h, body.i, checkpointConfig); + } catch (e) { + const error = e as Error; + return new Response(JSON.stringify({ error: error.message }), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + } + + deleteChallenge(body.request_id); + + const now = new Date(); + const expiresAt = new Date(now.getTime() + checkpointConfig.TokenExpiration); + + // Get user agent safely + let userAgent = ''; + if (request.headers) { + if (typeof request.headers.get === 'function') { + userAgent = request.headers.get('user-agent') || ''; + } else if (typeof request.headers === 'object') { + userAgent = String((request.headers as Record)['user-agent'] || ''); + } + } + + const token: Omit = { + Nonce: body.g, + ExpiresAt: expiresAt, + ClientIP: getFullClientIP(request), + UserAgent: hashUserAgent(userAgent), + BrowserHint: extractBrowserFingerprint(request), + Entropy: crypto.randomBytes(8).toString('hex'), + Created: now, + LastVerified: now, + TokenFormat: 2, + }; + + const signature = computeTokenSignature(token); + const signedToken: CheckpointToken = { ...token, Signature: signature }; + const tokenStr = Buffer.from(JSON.stringify(signedToken)).toString('base64'); + + const tokenKey = crypto.createHash('sha256').update(tokenStr).digest('hex'); + try { + await db.put(tokenKey, true); + tokenCache.set(tokenKey, true); + tokenExpirations.set(tokenKey, new Date(signedToken.ExpiresAt).getTime()); + console.log(`checkpoint: token stored in DB and cache key=${tokenKey}`); + } catch (e) { + console.error('checkpoint: failed to store token in DB:', e); + } + + return new Response(JSON.stringify({ token: tokenStr, expires_at: signedToken.ExpiresAt }), { + status: 200, + headers: { 'Content-Type': 'application/json' }, + }); +} + +// ==================== TOKEN VALIDATION ==================== + +async function validateToken(tokenStr: string, _request: NetworkRequest | Request): Promise { + if (!tokenStr) return { valid: false, reason: 'missing_token' }; + + let token: CheckpointToken; + try { + token = JSON.parse(Buffer.from(tokenStr, 'base64').toString()); + } catch { + console.log('checkpoint: invalid token format'); + return { valid: false, reason: 'invalid_format' }; + } + + if (Date.now() > new Date(token.ExpiresAt).getTime()) { + console.log('checkpoint: token expired'); + return { valid: false, reason: 'expired' }; + } + + if (!verifyTokenSignature(token)) { + console.log('checkpoint: invalid token signature'); + return { valid: false, reason: 'invalid_signature' }; + } + + const tokenKey = crypto.createHash('sha256').update(tokenStr).digest('hex'); + + if (tokenCache.has(tokenKey)) return { valid: true, created: token.Created }; + + // Check if database is open before attempting to access + if (!db || !db.isOpen()) { + console.warn('checkpoint: database not open, using cache only'); + return { valid: false, reason: 'db_not_ready' }; + } + + try { + await db.get(tokenKey); + tokenCache.set(tokenKey, true); + tokenExpirations.set(tokenKey, new Date(token.ExpiresAt).getTime()); + return { valid: true, created: token.Created }; + } catch (err: any) { + if (err.code === 'LEVEL_DATABASE_NOT_OPEN') { + console.warn('checkpoint: database not open during token validation'); + return { valid: false, reason: 'db_not_ready' }; + } + console.log('checkpoint: token not found in DB'); + return { valid: false, reason: 'not_found' }; + } +} + +// ==================== TOKEN REDIRECT HANDLING ==================== + +async function handleTokenRedirect(request: Request): Promise { + let tokenStr: string | undefined; + if ((request as any).query) { + // Express request + tokenStr = (request as any).query.token; + } else { + // Fetch-style request + const url = getRequestURL(request); + tokenStr = url?.searchParams?.get('token') || undefined; + } + + if (!tokenStr) return undefined; + + let token: CheckpointToken; + try { + token = JSON.parse(Buffer.from(tokenStr, 'base64').toString()); + + if (Date.now() > new Date(token.ExpiresAt).getTime()) { + console.log('checkpoint: token in URL parameter expired'); + return undefined; + } + + if (!verifyTokenSignature(token)) { + console.log('checkpoint: invalid token signature in URL parameter'); + return undefined; + } + + const tokenKey = crypto.createHash('sha256').update(tokenStr).digest('hex'); + try { + await db.get(tokenKey); + } catch { + console.log('checkpoint: token in URL parameter not found in DB'); + return undefined; + } + } catch (e) { + console.log('checkpoint: invalid token format in URL parameter', e); + return undefined; + } + + const expires = new Date(token.ExpiresAt).toUTCString(); + const cookieDomain = checkpointConfig.CookieDomain || ''; + const sameSite = cookieDomain ? 'Lax' : 'Strict'; + const protocol = (request as any).protocol || 'http'; + const securePart = protocol === 'https' ? '; Secure' : ''; + const domainPart = cookieDomain ? `; Domain=${cookieDomain}` : ''; + const cookieStr = + `${checkpointConfig.CookieName}=${tokenStr}; Path=/` + + `${domainPart}; Expires=${expires}; HttpOnly; SameSite=${sameSite}${securePart}`; + + // Build clean URL without token parameter + let cleanUrl: string; + if ((request as any).query) { + // Express request + const cleanQuery = { ...(request as any).query }; + delete cleanQuery.token; + const queryString = new URLSearchParams(cleanQuery).toString(); + const basePath = checkpointConfig.SanitizeURLs ? sanitizePath((request as any).path) : (request as any).path; + cleanUrl = basePath + (queryString ? `?${queryString}` : ''); + } else { + // Fetch-style request + const url = getRequestURL(request); + if (url) { + url.searchParams.delete('token'); + } + const pathname = checkpointConfig.SanitizeURLs ? sanitizePath(url?.pathname || '/') : (url?.pathname || '/'); + cleanUrl = pathname + (url?.search || ''); + } + + return new Response(null, { + status: 302, + headers: { + 'Set-Cookie': cookieStr, + Location: cleanUrl, + }, + }); +} + +// ==================== MIDDLEWARE CREATION ==================== + +function CheckpointMiddleware(): Router { + const router = express.Router(); + // Ensure JSON bodies are parsed for /api/verify and legacy verify endpoints + router.use(express.json({ limit: '1mb' })); + // Ensure query parameters are parsed for token redirects + router.use(express.urlencoded({ extended: true })); + const checkpointChallengeRoute = '/__checkpoint/challenge'; + const checkpointVerifyRoute = '/__checkpoint/verify'; + const apiChallengeRoute = '/api/challenge'; + const apiVerifyRoute = '/api/verify'; + + // Handle both legacy and /api routes + router.get(checkpointChallengeRoute, async (req: Request, res: ExpressResponse): Promise => { + if (!checkpointConfig.Enabled) { + res.status(404).send('Not Found'); + return; + } + const response = await handleGetCheckpointChallenge(req); + res.status(response.status); + response.headers.forEach((value, name) => { + res.setHeader(name, value); + }); + const body = await response.text(); + res.send(body); + }); + + router.get(apiChallengeRoute, async (req: Request, res: ExpressResponse): Promise => { + if (!checkpointConfig.Enabled) { + res.status(404).send('Not Found'); + return; + } + const response = await handleGetCheckpointChallenge(req); + res.status(response.status); + response.headers.forEach((value, name) => { + res.setHeader(name, value); + }); + const body = await response.text(); + res.send(body); + }); + + router.post(checkpointVerifyRoute, async (req: Request, res: ExpressResponse): Promise => { + if (!checkpointConfig.Enabled) { + res.status(404).send('Not Found'); + return; + } + const response = await handleVerifyCheckpoint(req); + res.status(response.status); + response.headers.forEach((value, name) => { + res.setHeader(name, value); + }); + const body = await response.text(); + res.send(body); + }); + + router.post(apiVerifyRoute, async (req: Request, res: ExpressResponse): Promise => { + if (!checkpointConfig.Enabled) { + res.status(404).send('Not Found'); + return; + } + const response = await handleVerifyCheckpoint(req); + res.status(response.status); + response.headers.forEach((value, name) => { + res.setHeader(name, value); + }); + const body = await response.text(); + res.send(body); + }); + + router.use(async (req: Request, res: ExpressResponse, next: NextFunction) => { + if (!checkpointConfig.Enabled) { + return next(); + } + + // Skip if request is pre-excluded + if (req._excluded || res.locals._excluded) { + return next(); + } + + // Handle token redirects first + const tokenRedirectResponse = await handleTokenRedirect(req); + if (tokenRedirectResponse) { + res.status(tokenRedirectResponse.status); + tokenRedirectResponse.headers.forEach((value, name) => { + res.setHeader(name, value); + }); + return res.end(); + } + + const clientIP = getRealIP(req); + const userAgent = req.headers['user-agent'] || ''; + const url = new URL(req.url, `http://${req.headers.host || 'localhost'}`); + const pathname = url.pathname; + + console.log(`CHECKPOINT: Processing request from ${clientIP} to ${pathname}`); + + // Skip checkpoint for specific routes + if (pathname === checkpointChallengeRoute || pathname === checkpointVerifyRoute || pathname === apiChallengeRoute || pathname === apiVerifyRoute) { + return next(); + } + + + const shouldExclude = checkpointConfig.ExclusionRules?.some(rule => { + const pathMatch = pathname.startsWith(rule.Path); + const hostMatch = !rule.Hosts || rule.Hosts.length === 0 || rule.Hosts.includes(req.hostname); + const uaMatch = !rule.UserAgents || rule.UserAgents.length === 0 || + rule.UserAgents.some(pattern => { + try { + return new RegExp(pattern, 'i').test(userAgent); + } catch { + return false; + } + }); + + return pathMatch && hostMatch && uaMatch; + }); + + if (shouldExclude) { + console.log(`CHECKPOINT: Request excluded by rules for ${pathname}`); + return next(); + } + + // Check bypass keys + const bypassKey = checkBypassKeys(req); + if (bypassKey) { + console.log(`CHECKPOINT: Request bypassed via ${bypassKey.type} key`); + return next(); + } + + // Check if request has valid token + const cookies = req.headers.cookie || ''; + const parsedCookies = cookie.parse(cookies); + const token = parsedCookies[checkpointConfig.CookieName]; + + if (token) { + const tokenData = await validateToken(token, req); + if (tokenData.valid) { + console.log(`CHECKPOINT: Valid token for ${clientIP}`); + + // Set checkpoint headers + res.setHeader('X-Checkpoint-Status', 'passed'); + res.setHeader('X-Checkpoint-Token-Age', String(Date.now() - new Date(tokenData.created!).getTime())); + + return next(); + } else { + console.log(`CHECKPOINT: Invalid token for ${clientIP}: ${tokenData.reason}`); + } + } + + // Enhanced threat scoring integration + try { + const threatScore = await threatScorer.scoreRequest(req); + + console.log(`CHECKPOINT: Threat assessment for ${clientIP}: score=${threatScore.totalScore}, action=${threatScore.riskLevel}, confidence=${threatScore.confidence}`); + + // Add threat data to response headers for debugging + res.setHeader('X-Checkpoint-Score', String(threatScore.totalScore)); + res.setHeader('X-Checkpoint-Action', threatScore.riskLevel); + + // Take action based on threat assessment + if (threatScore.riskLevel === 'allow') { + console.log(`CHECKPOINT: Allowing request from ${clientIP} (score: ${threatScore.totalScore})`); + return next(); + } else if (threatScore.riskLevel === 'block') { + console.log(`CHECKPOINT: Blocking request from ${clientIP} (score: ${threatScore.totalScore})`); + const response = await serveBlockPage(req, threatScore.totalScore, { signals: threatScore.signalsTriggered }); + res.status(response.status); + response.headers.forEach((value, name) => { + res.setHeader(name, value); + }); + const body = await response.text(); + return res.send(body); + } else { + // Challenge + console.log(`CHECKPOINT: Challenging request from ${clientIP} (score: ${threatScore.totalScore})`); + const response = await serveInterstitial(req, threatScore.totalScore); + res.status(response.status); + response.headers.forEach((value, name) => { + res.setHeader(name, value); + }); + const body = await response.text(); + return res.send(body); + } + } catch (err) { + console.error(`CHECKPOINT: Error in threat assessment: ${err}`); + + // On error, fail open but log the issue + return next(); + } + }); + + return router; +} + +// ==================== BYPASS KEY CHECKING ==================== + +function checkBypassKeys(req: Request): BypassKeyResult | null { + const url = new URL(req.url, `http://${req.headers.host || 'localhost'}`); + const bypassKeys = checkpointConfig.BypassKeys || []; + + for (const key of bypassKeys) { + if (key.Type === 'query') { + // Check query bypass keys + if (url.searchParams.get(key.Key) === key.Value) { + const hostMatch = !key.Hosts || key.Hosts.length === 0 || key.Hosts.includes(req.hostname); + if (hostMatch) { + return { type: 'query', key: key.Key }; + } + } + } else if (key.Type === 'header') { + // Check header bypass keys + if (req.headers[key.Key.toLowerCase()] === key.Value) { + const hostMatch = !key.Hosts || key.Hosts.length === 0 || key.Hosts.includes(req.hostname); + if (hostMatch) { + return { type: 'header', key: key.Key }; + } + } + } + } + + return null; +} + +// ==================== DATABASE MANAGEMENT ==================== + + + +// ==================== CLEANUP TIMERS ==================== + +function startCleanupTimer(): void { + setInterval(() => { + cleanupExpiredData(); + }, CLEANUP_TIMER_INTERVAL); + const challengeInterval = checkpointConfig.ChallengeExpiration || DEFAULT_CHALLENGE_INTERVAL; + setInterval(() => { + cleanupExpiredChallenges(); + }, challengeInterval); +} + +function cleanupExpiredData(): void { + const now = Date.now(); + let cleanedItems = 0; + + // CRITICAL MEMORY MANAGEMENT: These cleanup operations prevent memory leaks + // If these fail or are disabled, the system will eventually run out of memory + // under high load. Always ensure these run regularly and complete successfully. + + try { + // Clean expired nonces + Array.from(usedNonces.entries()).forEach(([nonce, timestamp]) => { + if (now - timestamp > checkpointConfig.MaxNonceAge) { + usedNonces.delete(nonce); + cleanedItems++; + } + }); + + // Clean expired rate limit entries + Array.from(ipRateLimit.entries()).forEach(([ip, data]) => { + if (typeof data === 'object' && (data as any).lastReset && now - (data as any).lastReset > 3600000) { // 1 hour + ipRateLimit.delete(ip); + cleanedItems++; + } else if (typeof data === 'number') { // Legacy format + ipRateLimit.delete(ip); + cleanedItems++; + } + }); + + // Clean expired token cache entries + Array.from(tokenCache.entries()).forEach(([token, data]) => { + if (data && (data as any).expiresAt && now > (data as any).expiresAt) { + tokenCache.delete(token); + cleanedItems++; + } + }); + + // Clean expired token expiration tracking + Array.from(tokenExpirations.entries()).forEach(([token, expiration]) => { + if (now > expiration) { + tokenExpirations.delete(token); + cleanedItems++; + } + }); + + if (cleanedItems > 0) { + console.log(`Cleaned up ${cleanedItems} expired cache entries`); + } + } catch (err) { + console.error('Error during cache cleanup:', err); + // Don't throw here - cache cleanup errors shouldn't crash the system + } + + // Note: LimitedMap already prevents unbounded growth, but we can still log if approaching limits + if (usedNonces.size > 4500) { // 90% of max size + console.warn(`Nonce cache approaching limit: ${usedNonces.size}/5000`); + } + + if (tokenCache.size > 18000) { // 90% of max size + console.warn(`Token cache approaching limit: ${tokenCache.size}/20000`); + } +} + +function cleanupExpiredChallenges(): void { + const now = Date.now(); + let count = 0; + Array.from(challengeStore.entries()).forEach(([id, params]) => { + if ((params as any).ExpiresAt && (params as any).ExpiresAt < now) { + challengeStore.delete(id); + count++; + } + }); + if (count) console.log(`Checkpoint: cleaned up ${count} expired challenges.`); +} + +// ==================== SECRET MANAGEMENT ==================== + +async function initSecret(): Promise { + try { + if (!checkpointConfig.SecretConfigPath) { + checkpointConfig.SecretConfigPath = join(rootDir, 'data', 'checkpoint_secret.json'); + } + + const secretPath = checkpointConfig.SecretConfigPath; + const exists = fs.existsSync(secretPath); + + if (exists) { + const loaded = loadSecretFromFile(); + if (loaded) { + hmacSecret = loaded; + console.log(`Loaded existing HMAC secret from ${secretPath}`); + return; + } + } + + hmacSecret = crypto.randomBytes(32); + fs.mkdirSync(path.dirname(secretPath), { recursive: true }); + + const secretCfg: SecretConfig = { + hmac_secret: hmacSecret.toString('base64'), + created_at: new Date().toISOString(), + updated_at: new Date().toISOString(), + }; + + fs.writeFileSync(secretPath, JSON.stringify(secretCfg), { mode: 0o600 }); + console.log(`Created and saved new HMAC secret to ${secretPath}`); + } catch (err) { + console.error('Error initializing secret:', err); + hmacSecret = crypto.randomBytes(32); + } +} + +function loadSecretFromFile(): Buffer | null { + try { + if (!checkpointConfig.SecretConfigPath) return null; + + const data = fs.readFileSync(checkpointConfig.SecretConfigPath, 'utf8'); + const cfg = JSON.parse(data) as SecretConfig; + const buf = Buffer.from(cfg.hmac_secret, 'base64'); + if (buf.length < 16) return null; + + const updatedCfg: SecretConfig = { + ...cfg, + updated_at: new Date().toISOString() + }; + fs.writeFileSync(checkpointConfig.SecretConfigPath, JSON.stringify(updatedCfg), { mode: 0o600 }); + return buf; + } catch (e) { + console.warn('Could not load HMAC secret from file:', e); + return null; + } +} + +// ==================== INITIALIZATION ==================== + +// Initialize async components +let isInitialized = false; +let initPromise: Promise | null = null; + +// Start initialization immediately (skip during tests) +if (process.env.NODE_ENV !== 'test' && process.env.JEST_WORKER_ID === undefined) { + initPromise = (async function initialize(): Promise { + await initConfig(); + await initSecret(); + await initTokenStore(); + startCleanupTimer(); + isInitialized = true; + + if (checkpointConfig.Enabled === false) { + logs.plugin('checkpoint', 'Disabled via configuration'); + } else { + logs.plugin('checkpoint', 'Security verification system ready'); + } + })(); +} else { + // Test environment - create a mock promise + initPromise = Promise.resolve(); +} + +// Create middleware that waits for initialization +let checkpointMiddleware: Router | null = null; +const deferredMiddleware = express.Router(); + +deferredMiddleware.use(async (req: Request, res: ExpressResponse, next: NextFunction) => { + if (!isInitialized) { + try { + await initPromise; + } catch (err) { + console.error('Checkpoint initialization failed:', err); + return next(); + } + } + + if (!checkpointConfig.Enabled) return next(); + + if (!checkpointMiddleware) { + checkpointMiddleware = CheckpointMiddleware(); + } + + checkpointMiddleware(req, res, next); +}); + +registerPlugin('checkpoint', { middleware: deferredMiddleware }); + +// Export initialization promise so index.js can wait for it +export const initializationComplete = initPromise; + +export { + checkpointConfig +}; \ No newline at end of file diff --git a/src/index.ts b/src/index.ts new file mode 100644 index 0000000..4bc398e --- /dev/null +++ b/src/index.ts @@ -0,0 +1,782 @@ +import { mkdir, readFile } from 'fs/promises'; +import { existsSync, readdirSync } from 'fs'; +import { join, dirname, basename } from 'path'; +import { fileURLToPath } from 'url'; +import { secureImportModule } from './utils/plugins.js'; +import * as logs from './utils/logs.js'; +import express, { Request, Response, NextFunction, Router } from 'express'; +import { createServer, Server } from 'http'; +import { Socket } from 'net'; + +// Load environment variables from .env file +import * as dotenv from 'dotenv'; +dotenv.config(); + +// Order of critical plugins that must load before others +// Proxy is registered dynamically (see PROXY section in main()) +const PLUGIN_LOAD_ORDER: readonly string[] = ['ipfilter', 'waf'] as const; + +// Type definitions for the system +interface PluginRegistration { + readonly name: string; + readonly handler: PluginHandler; +} + +interface PluginHandler { + readonly middleware?: PluginMiddleware | PluginMiddleware[]; + readonly initializationComplete?: Promise; + readonly handleUpgrade?: (req: Request, socket: Socket, head: Buffer) => void; +} + +type PluginMiddleware = (req: Request, res: Response, next: NextFunction) => void; + +interface PluginInfo { + readonly name: string; + readonly path: string; +} + +interface ExclusionRule { + readonly Path: string; + readonly Hosts?: readonly string[]; + readonly UserAgents?: readonly string[]; +} + +interface CompiledExclusionRule extends ExclusionRule { + readonly pathStartsWith: string; + readonly hostsSet: Set | null; + readonly userAgentPatterns: readonly RegExp[]; +} + +interface CheckpointConfig { + readonly Core?: { + readonly Enabled?: boolean; + }; + readonly Exclusion?: readonly ExclusionRule[]; +} + +interface AppConfigs { + checkpoint?: CheckpointConfig; + [configName: string]: unknown; +} + +// Type-safe interfaces for threat scoring TOML configuration +interface ThreatScoringTomlConfig { + readonly Core?: { + readonly Enabled?: boolean; + readonly LogDetailedScores?: boolean; + }; + readonly Thresholds?: { + readonly AllowThreshold?: number; + readonly ChallengeThreshold?: number; + readonly BlockThreshold?: number; + }; + readonly SignalWeights?: { + readonly ATTACK_TOOL_UA?: { + readonly weight?: number; + readonly confidence?: number; + }; + readonly MISSING_UA?: { + readonly weight?: number; + readonly confidence?: number; + }; + readonly SQL_INJECTION?: { + readonly weight?: number; + readonly confidence?: number; + }; + readonly XSS_ATTEMPT?: { + readonly weight?: number; + readonly confidence?: number; + }; + readonly COMMAND_INJECTION?: { + readonly weight?: number; + readonly confidence?: number; + }; + readonly PATH_TRAVERSAL?: { + readonly weight?: number; + readonly confidence?: number; + }; + }; + readonly Features?: { + readonly EnableBotVerification?: boolean; + readonly EnableGeoAnalysis?: boolean; + readonly EnableBehaviorAnalysis?: boolean; + readonly EnableContentAnalysis?: boolean; + }; +} + +// Type-safe configuration transformation +function transformThreatScoringConfig(tomlConfig: ThreatScoringTomlConfig): { + enabled: boolean; + thresholds: { + ALLOW: number; + CHALLENGE: number; + BLOCK: number; + }; + signalWeights: { + ATTACK_TOOL_UA: { weight: number; confidence: number }; + MISSING_UA: { weight: number; confidence: number }; + SQL_INJECTION: { weight: number; confidence: number }; + XSS_ATTEMPT: { weight: number; confidence: number }; + COMMAND_INJECTION: { weight: number; confidence: number }; + PATH_TRAVERSAL: { weight: number; confidence: number }; + }; + enableBotVerification: boolean; + enableGeoAnalysis: boolean; + enableBehaviorAnalysis: boolean; + enableContentAnalysis: boolean; + logDetailedScores: boolean; +} { + return { + enabled: tomlConfig.Core?.Enabled ?? false, + thresholds: { + ALLOW: tomlConfig.Thresholds?.AllowThreshold ?? 20, + CHALLENGE: tomlConfig.Thresholds?.ChallengeThreshold ?? 60, + BLOCK: tomlConfig.Thresholds?.BlockThreshold ?? 100 + }, + signalWeights: { + ATTACK_TOOL_UA: { + weight: tomlConfig.SignalWeights?.ATTACK_TOOL_UA?.weight ?? 30, + confidence: tomlConfig.SignalWeights?.ATTACK_TOOL_UA?.confidence ?? 0.75 + }, + MISSING_UA: { + weight: tomlConfig.SignalWeights?.MISSING_UA?.weight ?? 10, + confidence: tomlConfig.SignalWeights?.MISSING_UA?.confidence ?? 0.60 + }, + SQL_INJECTION: { + weight: tomlConfig.SignalWeights?.SQL_INJECTION?.weight ?? 60, + confidence: tomlConfig.SignalWeights?.SQL_INJECTION?.confidence ?? 0.92 + }, + XSS_ATTEMPT: { + weight: tomlConfig.SignalWeights?.XSS_ATTEMPT?.weight ?? 50, + confidence: tomlConfig.SignalWeights?.XSS_ATTEMPT?.confidence ?? 0.88 + }, + COMMAND_INJECTION: { + weight: tomlConfig.SignalWeights?.COMMAND_INJECTION?.weight ?? 65, + confidence: tomlConfig.SignalWeights?.COMMAND_INJECTION?.confidence ?? 0.95 + }, + PATH_TRAVERSAL: { + weight: tomlConfig.SignalWeights?.PATH_TRAVERSAL?.weight ?? 45, + confidence: tomlConfig.SignalWeights?.PATH_TRAVERSAL?.confidence ?? 0.85 + } + }, + enableBotVerification: tomlConfig.Features?.EnableBotVerification ?? false, + enableGeoAnalysis: tomlConfig.Features?.EnableGeoAnalysis ?? false, + enableBehaviorAnalysis: tomlConfig.Features?.EnableBehaviorAnalysis ?? false, + enableContentAnalysis: tomlConfig.Features?.EnableContentAnalysis ?? false, + logDetailedScores: tomlConfig.Core?.LogDetailedScores ?? false + }; +} + +// Extend Express Request to include our custom properties +declare global { + namespace Express { + interface Request { + isWebSocketRequest?: boolean; + _excluded?: boolean; + } + + interface Locals { + _excluded?: boolean; + } + } +} + +// Command-line argument handling - use pm2 for process management +if (process.argv.includes('-k') || process.argv.includes('-d')) { + console.error('Command-line daemonization is deprecated. Use pm2 instead:'); + console.error(' npm run daemon # Start as daemon'); + console.error(' npm run stop # Stop daemon'); + console.error(' npm run restart # Restart daemon'); + console.error(' npm run logs # View logs'); + process.exit(1); +} + +// Disable console.log in production to suppress output in daemon mode +if (process.env.NODE_ENV === 'production') { + console.log = (): void => {}; +} + +const pluginRegistry: PluginRegistration[] = []; + +export function registerPlugin(pluginName: string, handler: PluginHandler): void { + if (typeof pluginName !== 'string' || !pluginName.trim()) { + throw new Error('Plugin name must be a non-empty string'); + } + + if (!handler || typeof handler !== 'object') { + throw new Error('Plugin handler must be an object'); + } + + // Check for duplicate registration + if (pluginRegistry.some(p => p.name === pluginName)) { + throw new Error(`Plugin '${pluginName}' is already registered`); + } + + pluginRegistry.push({ name: pluginName, handler }); +} + +/** + * Return the array of middleware handlers in registration order. + */ +export function loadPlugins(): readonly PluginHandler[] { + return pluginRegistry.map((item) => item.handler); +} + +/** + * Return the names of all registered plugins. + */ +export function getRegisteredPluginNames(): readonly string[] { + return pluginRegistry.map((item) => item.name); +} + +/** + * Freeze plugin registry to prevent further registration and log the final set. + */ +export function freezePlugins(): void { + Object.freeze(pluginRegistry); + pluginRegistry.forEach((item) => Object.freeze(item)); + logs.msg('Plugin registration frozen'); +} + +// Determine root directory for config loading +let _dirname: string; +try { + _dirname = dirname(fileURLToPath(import.meta.url)); +} catch (error) { + // Fallback for test environments or cases where import.meta.url isn't available + _dirname = process.cwd(); +} + +// Ensure _dirname is valid +if (!_dirname) { + _dirname = process.cwd(); +} + +export const rootDir: string = _dirname.endsWith('/dist') || _dirname.endsWith('\\dist') ? + dirname(_dirname) : + (_dirname.endsWith('/src') || _dirname.endsWith('\\src') ? dirname(_dirname) : _dirname); + +export async function loadConfig>( + name: string, + target: T +): Promise { + if (typeof name !== 'string' || !name.trim()) { + throw new Error('Config name must be a non-empty string'); + } + + if (!target || typeof target !== 'object') { + throw new Error('Config target must be an object'); + } + + const configPath = join(rootDir, 'config', `${name}.toml`); + + try { + const txt = await readFile(configPath, 'utf8'); + const toml = await import('@iarna/toml'); + const parsed = toml.parse(txt) as Partial; + Object.assign(target, parsed); + } catch (error) { + const err = error as Error; + throw new Error(`Failed to load config '${name}': ${err.message}`); + } +} + +// Discover all config files in the config directory +function discoverConfigs(): string[] { + try { + const configDir = join(rootDir, 'config'); + if (!existsSync(configDir)) { + return []; + } + + return readdirSync(configDir) + .filter(file => file.endsWith('.toml') && !file.includes('.example')) + .map(file => basename(file, '.toml')) + .sort(); + } catch { + return []; + } +} + +// Discover all plugin files in the plugins directory +function discoverPlugins(): PluginInfo[] { + try { + // Look for plugins in the correct directory based on execution context + const isCompiledMode = _dirname.endsWith('/dist') || _dirname.endsWith('\\dist'); + const pluginsDir = isCompiledMode ? + join(_dirname, 'plugins') : // dist/plugins when running compiled + join(rootDir, 'src', 'plugins'); // src/plugins when running source + + if (!existsSync(pluginsDir)) { + return []; + } + + const fileExt = isCompiledMode ? '.js' : '.ts'; + const relativePathPrefix = isCompiledMode ? 'dist/plugins' : 'src/plugins'; + + const allPlugins: PluginInfo[] = readdirSync(pluginsDir) + .filter(file => file.endsWith(fileExt)) + .map(file => ({ + name: basename(file, fileExt), + path: join(relativePathPrefix, file) + })); + + // Sort by load order, then alphabetically + const ordered: PluginInfo[] = []; + const remaining = [...allPlugins]; + + PLUGIN_LOAD_ORDER.forEach(name => { + const idx = remaining.findIndex(p => p.name === name); + if (idx >= 0) { + ordered.push(...remaining.splice(idx, 1)); + } + }); + + return [...ordered, ...remaining.sort((a, b) => a.name.localeCompare(b.name))]; + } catch { + return []; + } +} + +async function initDataDirectories(): Promise { + logs.section('INIT'); + const directories = [ + join(rootDir, 'data'), + join(rootDir, 'db'), + join(rootDir, 'config') + ]; + + for (const dirPath of directories) { + try { + await mkdir(dirPath, { recursive: true }); + } catch { + // Ignore errors if directory already exists + } + } + logs.init('Data directories are now in place'); +} + +function staticFileMiddleware(): Router { + const router = express.Router(); + + // Validate static directories exist before serving + const webfontPath = join(rootDir, 'pages/interstitial/webfont'); + const jsPath = join(rootDir, 'pages/interstitial/js'); + + if (existsSync(webfontPath)) { + router.use('/webfont', express.static(webfontPath, { + maxAge: '7d' + })); + } + + if (existsSync(jsPath)) { + router.use('/js', express.static(jsPath, { + maxAge: '7d' + })); + } + + return router; +} + +async function main(): Promise { + await initDataDirectories(); + + logs.section('CONFIG'); + + // Dynamically discover and load all config files + const configNames = discoverConfigs(); + const configs: AppConfigs = {}; + + for (const configName of configNames) { + configs[configName] = {}; + try { + await loadConfig(configName, configs[configName] as Record); + logs.config(configName, 'loaded'); + } catch (err) { + const error = err as Error; + logs.error('config', `Failed to load ${configName} config: ${error.message}`); + // Don't exit on config error - plugin might work without config + } + } + + const earlyCheckpointConfig = configs.checkpoint as CheckpointConfig || {}; + + // Initialize threat scoring system if threat-scoring config exists + logs.section('THREAT SCORING'); + if (configs['threat-scoring']) { + try { + const { configureDefaultThreatScorer } = await import('./utils/threat-scoring.js'); + const threatConfig = configs['threat-scoring'] as ThreatScoringTomlConfig; + + // Transform config structure to match ThreatScoringConfig interface + const scoringConfig = transformThreatScoringConfig(threatConfig); + + configureDefaultThreatScorer(scoringConfig); + logs.msg('Threat scoring system initialized'); + } catch (e) { + const error = e as Error; + logs.error('threat-scoring', `Failed to initialize threat scoring: ${error.message}`); + } + } else { + logs.msg('Threat scoring disabled - no config file found'); + } + + const app = express(); + + // Disable Express default header so our headers plugin can set its own value + app.disable('x-powered-by'); + + // Global header applied to all responses handled by Express + app.use((_req: Request, res: Response, next: NextFunction) => { + // Only set if not already set + if (!res.headersSent) { + res.setHeader('X-Powered-By', 'Checkpoint (https://git.caileb.com/Caileb/Checkpoint)'); + } + next(); + }); + + // Hold proxy plugin module for WebSocket upgrade forwarding + let proxyPluginModule: PluginHandler | undefined; + + // Trust proxy headers (important for proper protocol detection) + app.set('trust proxy', true); + + // WebSocket requests bypass body parsing + app.use((req: Request, _res: Response, next: NextFunction) => { + const upgradeHeader = req.headers.upgrade; + const connectionHeader = req.headers.connection; + + if (upgradeHeader === 'websocket' || + (connectionHeader && connectionHeader.toLowerCase().includes('upgrade'))) { + req.isWebSocketRequest = true; + return next(); + } + next(); + }); + + const bodyLimit = process.env.MAX_BODY_SIZE || '10mb'; + app.use((req: Request, res: Response, next: NextFunction) => { + if (req.isWebSocketRequest) return next(); + express.json({ limit: bodyLimit })(req, res, next); + }); + + app.use((req: Request, res: Response, next: NextFunction) => { + if (req.isWebSocketRequest) return next(); + express.urlencoded({ extended: true, limit: bodyLimit })(req, res, next); + }); + + // Load plugins + + // Load behavioral detection middleware + logs.section('BEHAVIORAL DETECTION'); + try { + await import('./utils/behavioral-middleware.js'); + logs.msg('Behavioral detection middleware loaded'); + } catch (e) { + const error = e as Error; + logs.error('behavioral', `Failed to load behavioral detection: ${error.message}`); + } + + // CRITICAL: Load checkpoint middleware directly (since it's not in plugins directory) + logs.section('CHECKPOINT'); + try { + await import('./checkpoint.js'); + logs.msg('Checkpoint middleware loaded'); + } catch (e) { + const error = e as Error; + logs.error('checkpoint', `Failed to load checkpoint middleware: ${error.message}`); + } + + // --------------------------------------------------------------------------- + // PROXY (dynamic registration) + // --------------------------------------------------------------------------- + + logs.section('PROXY'); + try { + const { + getProxyMiddleware, + handleUpgrade: proxyHandleUpgrade, + isProxyEnabled + } = await import('./proxy.js'); + + if (typeof isProxyEnabled === 'function' && isProxyEnabled()) { + const proxyMw = getProxyMiddleware(); + if (proxyMw) { + registerPlugin('proxy', { + middleware: proxyMw, + handleUpgrade: proxyHandleUpgrade + }); + proxyPluginModule = { + middleware: proxyMw, + handleUpgrade: proxyHandleUpgrade + }; + logs.msg('Proxy middleware enabled and registered'); + } else { + logs.msg('Proxy middleware disabled via configuration'); + } + } else { + logs.msg('Proxy disabled via configuration'); + } + } catch (err) { + const error = err as Error; + logs.error('proxy', `Failed to initialize proxy: ${error.message}`); + } + + // --------------------------------------------------------------------------- + // Discover and load all plugins from the plugins directory + // --------------------------------------------------------------------------- + + const plugins = discoverPlugins(); + + for (const plugin of plugins) { + // Create section header based on plugin name + const sectionName = plugin.name.toUpperCase().replace(/-/g, ' '); + logs.section(sectionName); + + try { + const module = await secureImportModule(plugin.path) as PluginHandler; + + // Wait for plugin initialization if it exports an init promise + if (module.initializationComplete) { + await module.initializationComplete; + } + } catch (e) { + const error = e as Error; + logs.error(plugin.name, `Failed to load ${plugin.name} plugin: ${error.message}`); + } + } + + // Register static middleware + app.use(staticFileMiddleware()); + + logs.section('PLUGINS'); + // Display all registered plugins + const registeredPluginNames = getRegisteredPluginNames(); + registeredPluginNames.forEach(name => logs.msg(name)); + + logs.section('SYSTEM'); + freezePlugins(); + + // Use pre-loaded checkpoint config for exclusion rules + const checkpointConfig = earlyCheckpointConfig; + const exclusionRules = checkpointConfig.Exclusion || []; + + // Pre-compile patterns once at startup for better performance + const compiledExclusionPatterns: CompiledExclusionRule[] = exclusionRules.map(rule => ({ + ...rule, + pathStartsWith: rule.Path, // Cache for faster comparison + hostsSet: rule.Hosts ? new Set(rule.Hosts) : null, // Use Set for O(1) lookup + userAgentPatterns: (rule.UserAgents || []).map(pattern => { + try { + return new RegExp(pattern, 'i'); + } catch { + logs.error('config', `Invalid UserAgent regex pattern: ${pattern}`); + // Return a pattern that never matches if the regex is invalid + return /(?!)/; + } + }) + })); + + // Create exclusion pre-check middleware that runs BEFORE all plugins + // CRITICAL: This middleware determines which requests bypass security processing + // Breaking this logic will either block legitimate traffic or let malicious traffic through + app.use((req: Request, res: Response, next: NextFunction) => { + // Skip exclusion check if checkpoint is disabled + if (!checkpointConfig.Core?.Enabled) { + return next(); + } + + const pathname = req.path; + const hostname = req.hostname; + const userAgent = req.headers['user-agent'] || ''; + + // Validate inputs to prevent bypasses through malformed data + if (typeof pathname !== 'string' || typeof hostname !== 'string') { + logs.error('server', 'Invalid pathname or hostname in request'); + return next(); + } + + // Process exclusion rules with optimized data structures for better performance + const shouldExclude = compiledExclusionPatterns.some(rule => { + // Check path match first (most likely to fail, so fail fast) + if (!pathname.startsWith(rule.pathStartsWith)) return false; + + // Check host match using Set for O(1) lookup + if (rule.hostsSet && !rule.hostsSet.has(hostname)) { + return false; + } + + // Check user agent match using pre-compiled patterns + if (rule.userAgentPatterns.length > 0) { + return rule.userAgentPatterns.some(pattern => { + try { + return pattern.test(userAgent); + } catch { + // If regex test fails, don't exclude (fail secure) + return false; + } + }); + } + + return true; // No UA restrictions, so it matches + }); + + if (shouldExclude) { + // Mark request as excluded so plugins can skip processing + req._excluded = true; + res.locals._excluded = true; + logs.server(`Pre-excluded request from ${req.ip} to ${pathname}`); + } + + next(); + }); + + // Apply all plugin middlewares to Express + const middlewareHandlers = loadPlugins(); + middlewareHandlers.forEach(handler => { + // Validate plugin interface + if (!handler || typeof handler !== 'object') { + logs.error('server', 'Invalid plugin: must export an object with middleware property'); + return; + } + + if (handler.middleware) { + // If plugin exports an object with middleware property + if (Array.isArray(handler.middleware)) { + // If middleware is an array, apply each one + handler.middleware.forEach(mw => { + if (typeof mw === 'function') { + app.use(mw); + } else { + logs.error('server', 'Invalid middleware function in array'); + } + }); + } else if (typeof handler.middleware === 'function') { + // Single middleware + app.use(handler.middleware); + } else { + logs.error('server', 'Middleware must be a function or array of functions'); + } + } else { + logs.error('server', 'Plugin missing required middleware property'); + } + }); + + // Basic test route for middleware testing + app.get('/', (req: Request, res: Response) => { + res.json({ + message: 'Checkpoint Security Gateway', + timestamp: new Date().toISOString(), + ip: req.ip || 'unknown', + userAgent: req.headers['user-agent'] || 'unknown' + }); + }); + + // 404 handler + app.use((_req: Request, res: Response) => { + res.status(404).send('Not Found'); + }); + + // Error handler + app.use((err: Error, _req: Request, res: Response, _next: NextFunction) => { + logs.error('server', `Server error: ${err.message}`); + res.status(500).send(`Server Error: ${err.message}`); + }); + + logs.section('SERVER'); + const portNumber = Number(process.env.PORT || 3000); + + // Validate port number + if (isNaN(portNumber) || portNumber < 1 || portNumber > 65535) { + throw new Error(`Invalid port number: ${process.env.PORT}`); + } + + const server: Server = createServer(app); + + // Track active sockets for proper shutdown handling + const activeSockets = new Set(); + let isShuttingDown = false; + + // Extend socket timeout to prevent premature disconnections + server.on('connection', (socket: Socket) => { + // Track this socket + activeSockets.add(socket); + socket.on('close', () => activeSockets.delete(socket)); + + // Set longer socket timeouts to avoid connection issues + socket.setTimeout(120000); // 2 minutes timeout + socket.setKeepAlive(true, 60000); // Keep-alive every 60 seconds + + socket.on('error', (err: Error) => { + logs.error('server', `Socket error: ${err.message}`); + // Don't destroy socket on error, just let it handle itself + }); + }); + + // Better WebSocket upgrade handling + server.on('upgrade', (req: Request, socket: Socket, head: Buffer) => { + // Mark this as a WebSocket request + req.isWebSocketRequest = true; + + // WebSocket upgrade events for diagnostic purposes + logs.server(`WebSocket upgrade request to ${req.url || 'unknown'}`); + + // Add keep-alive to prevent socket timeouts + socket.setKeepAlive(true, 30000); + + // Socket error handling for upgrades + socket.on('error', (err: Error) => { + logs.error('server', `WebSocket upgrade socket error: ${err.message}`); + if (!socket.destroyed) { + socket.destroy(); + } + }); + + // Forward upgrade to proxy plugin + if (proxyPluginModule && typeof proxyPluginModule.handleUpgrade === 'function') { + proxyPluginModule.handleUpgrade(req, socket, head); + } else { + socket.destroy(); + } + }); + + server.listen(portNumber, () => { + logs.server(`🚀 Server is up and running on port ${portNumber}...`); + logs.section('REQ LOGS'); + }); + + // Graceful shutdown handling + const shutdownHandler = (signal: string): void => { + if (isShuttingDown) { + console.log('Shutdown already in progress, please wait...'); + return; + } + isShuttingDown = true; + console.log(`\n📡 Received ${signal}, shutting down gracefully...`); + + // Destroy all active sockets to ensure server.close completes + activeSockets.forEach((sock) => sock.destroy()); + + server.close(() => { + console.log('✅ HTTP server closed'); + process.exit(0); + }); + + // Force exit if still hanging + setTimeout(() => { + console.error('Forcing shutdown after timeout'); + process.exit(1); + }, 10000); + }; + + process.on('SIGINT', () => shutdownHandler('SIGINT')); + process.on('SIGTERM', () => shutdownHandler('SIGTERM')); +} + +// Skip auto-execution during tests +if (process.env.NODE_ENV !== 'test' && process.env.JEST_WORKER_ID === undefined) { + main().catch((error: Error) => { + console.error('Fatal error during startup:', error.message); + process.exit(1); + }); +} \ No newline at end of file diff --git a/src/plugins/ipfilter.ts b/src/plugins/ipfilter.ts new file mode 100644 index 0000000..f26c16b --- /dev/null +++ b/src/plugins/ipfilter.ts @@ -0,0 +1,1078 @@ +import { registerPlugin, loadConfig, rootDir } from '../index.js'; +import fs, { promises as fsPromises } from 'fs'; +import { join } from 'path'; +import maxmind from 'maxmind'; +import { getRealIP, type NetworkRequest } from '../utils/network.js'; +import { createGunzip } from 'zlib'; +// @ts-ignore - tar-stream doesn't have TypeScript definitions +import tarStream from 'tar-stream'; +import { Buffer } from 'buffer'; +import { parseDuration, type DurationInput } from '../utils/time.js'; +import * as logs from '../utils/logs.js'; +import { Request, Response, NextFunction } from 'express'; + +// @ts-ignore - string-dsa doesn't have TypeScript definitions +import { AhoCorasick } from 'string-dsa'; + +// ==================== SECURITY-HARDENED TYPE DEFINITIONS ==================== + +type BlockType = 'country' | 'continent' | 'asn' | 'reputation' | ''; +type SuspiciousActivity = + | 'rate_limit_hit' | 'waf_trigger' | 'geo_velocity' + | 'suspicious_ua' | 'enumeration' | 'attack_pattern' | 'geo_block'; + +interface IPFilterCoreConfig { + Enabled: boolean; + AccountID: string; + LicenseKey: string; + DBUpdateInterval: DurationInput; // Uses time.ts format: "24h", "5m", etc. +} + +interface IPFilterCacheConfig { + IPBlockCacheTTLSec: number; + IPBlockCacheMaxEntries: number; +} + +interface IPFilterBlockingConfig { + CountryCodes: string[]; + ContinentCodes: string[]; + DefaultBlockPage: string; +} + +interface IPFilterAdvancedConfig { + SuspiciousActivityThreshold: number; + ReputationTTL: number; +} + +interface ASNGroupConfig { + Numbers: number[]; + BlockPage?: string; +} + +interface ASNNameGroupConfig { + Patterns: string[]; + BlockPage?: string; +} + +interface IPFilterConfiguration { + Core: IPFilterCoreConfig; + Cache: IPFilterCacheConfig; + Blocking: IPFilterBlockingConfig; + Advanced: IPFilterAdvancedConfig; + ASN?: Record; + ASNNames?: Record; + CountryBlockPages?: Record; + ContinentBlockPages?: Record; +} + +interface GeoData { + country?: string; + continent?: string; + latitude?: number; + longitude?: number; +} + +interface CountryInfo { + country?: { + iso_code: string; + names?: Record; + }; + continent?: { + code: string; + names?: Record; + }; + location?: { + latitude: number; + longitude: number; + }; +} + +interface ASNInfo { + autonomous_system_number: number; + autonomous_system_organization?: string; +} + +interface ReputationData { + score: number; + activities: Record; + lastUpdate: number; + blocked: boolean; +} + +interface CacheEntry { + blocked: boolean; + blockType: BlockType; + blockValue: string; + customPage: string; + asnOrgName: string; + geoData: GeoData | null; + expiresAt: number; +} + +interface AhoCorasickMatcher { + find(text: string): string[]; +} + +interface MaxMindReader { + get(ip: string): CountryInfo | ASNInfo | null; +} + + + +// ==================== SECURITY CONSTANTS ==================== + +const SECURITY_LIMITS = { + MAX_REPUTATION_CACHE_SIZE: 25000, + MAX_KNOWN_IPS_SIZE: 10000, + MAX_BLOCK_PAGE_CACHE_SIZE: 100, + MAX_ASN_GROUPS: 50, + MAX_COUNTRY_CODES: 300, + MAX_CONTINENT_CODES: 10, + CLEANUP_INTERVAL: parseDuration('5m'), // 5 minutes + DATABASE_VALIDATION_TIMEOUT: parseDuration('10s'), // 10 seconds + MIN_DATABASE_SIZE: 1024, // 1KB minimum + OLD_READER_CLEANUP_DELAY: parseDuration('5s'), // 5 seconds +} as const; + +const ACTIVITY_WEIGHTS: Record = { + rate_limit_hit: 1, + waf_trigger: 3, + geo_velocity: 2, + suspicious_ua: 1, + enumeration: 3, + attack_pattern: 4, + geo_block: 1 +} as const; + +// IP filter configuration - loaded during initialization to avoid race conditions +let cfg: IPFilterConfiguration = { + Core: { + Enabled: false, + AccountID: '', + LicenseKey: '', + DBUpdateInterval: '24h' + }, + Cache: { + IPBlockCacheTTLSec: 3600, + IPBlockCacheMaxEntries: 10000 + }, + Blocking: { + CountryCodes: [], + ContinentCodes: [], + DefaultBlockPage: '' + }, + Advanced: { + SuspiciousActivityThreshold: 5, + ReputationTTL: 3600000 + } +}; + +/** + * SECURITY VALIDATION: Initialize IP filter configuration with comprehensive error handling + * Prevents configuration tampering and ensures safe defaults + */ +async function initializeIPFilter(): Promise { + try { + const loadedConfig: any = {}; + await loadConfig('ipfilter', loadedConfig); + + // Validate and sanitize loaded configuration + cfg = { + Core: { + Enabled: Boolean(loadedConfig.Core?.Enabled), + AccountID: String(loadedConfig.Core?.AccountID || '').slice(0, 100), + LicenseKey: String(loadedConfig.Core?.LicenseKey || '').slice(0, 100), + DBUpdateInterval: String(loadedConfig.Core?.DBUpdateInterval || '24h') + }, + Cache: { + IPBlockCacheTTLSec: Math.max(60, Math.min(86400, Number(loadedConfig.Cache?.IPBlockCacheTTLSec) || 3600)), + IPBlockCacheMaxEntries: Math.max(100, Math.min(100000, Number(loadedConfig.Cache?.IPBlockCacheMaxEntries) || 10000)) + }, + Blocking: { + CountryCodes: Array.isArray(loadedConfig.Blocking?.CountryCodes) + ? loadedConfig.Blocking.CountryCodes.filter((c: any) => typeof c === 'string' && c.length === 2).slice(0, SECURITY_LIMITS.MAX_COUNTRY_CODES) + : [], + ContinentCodes: Array.isArray(loadedConfig.Blocking?.ContinentCodes) + ? loadedConfig.Blocking.ContinentCodes.filter((c: any) => typeof c === 'string' && c.length === 2).slice(0, SECURITY_LIMITS.MAX_CONTINENT_CODES) + : [], + DefaultBlockPage: String(loadedConfig.Blocking?.DefaultBlockPage || '').slice(0, 500) + }, + Advanced: { + SuspiciousActivityThreshold: Math.max(1, Math.min(100, Number(loadedConfig.Advanced?.SuspiciousActivityThreshold) || 5)), + ReputationTTL: Math.max(60000, Math.min(604800000, Number(loadedConfig.Advanced?.ReputationTTL) || 3600000)) // 1 minute to 1 week + }, + ASN: {}, + ASNNames: {}, + CountryBlockPages: {}, + ContinentBlockPages: {} + }; + + // Safely process ASN configuration + if (loadedConfig.ASN && typeof loadedConfig.ASN === 'object') { + const asnEntries = Object.entries(loadedConfig.ASN).slice(0, SECURITY_LIMITS.MAX_ASN_GROUPS); + for (const [group, config] of asnEntries) { + if (typeof group === 'string' && config && typeof config === 'object') { + cfg.ASN![group] = { + Numbers: Array.isArray((config as any).Numbers) + ? (config as any).Numbers.filter((n: any) => Number.isInteger(n) && n > 0).slice(0, 1000) + : [], + BlockPage: typeof (config as any).BlockPage === 'string' ? (config as any).BlockPage.slice(0, 500) : undefined + }; + } + } + } + + // Safely process ASN names configuration + if (loadedConfig.ASNNames && typeof loadedConfig.ASNNames === 'object') { + const asnNameEntries = Object.entries(loadedConfig.ASNNames).slice(0, SECURITY_LIMITS.MAX_ASN_GROUPS); + for (const [group, config] of asnNameEntries) { + if (typeof group === 'string' && config && typeof config === 'object') { + cfg.ASNNames![group] = { + Patterns: Array.isArray((config as any).Patterns) + ? (config as any).Patterns.filter((p: any) => typeof p === 'string' && p.length <= 100).slice(0, 100) + : [], + BlockPage: typeof (config as any).BlockPage === 'string' ? (config as any).BlockPage.slice(0, 500) : undefined + }; + } + } + } + + // Safely process country block pages + if (loadedConfig.CountryBlockPages && typeof loadedConfig.CountryBlockPages === 'object') { + const countryEntries = Object.entries(loadedConfig.CountryBlockPages).slice(0, SECURITY_LIMITS.MAX_COUNTRY_CODES); + for (const [country, page] of countryEntries) { + if (typeof country === 'string' && country.length === 2 && typeof page === 'string') { + cfg.CountryBlockPages![country] = page.slice(0, 500); + } + } + } + + // Safely process continent block pages + if (loadedConfig.ContinentBlockPages && typeof loadedConfig.ContinentBlockPages === 'object') { + const continentEntries = Object.entries(loadedConfig.ContinentBlockPages).slice(0, SECURITY_LIMITS.MAX_CONTINENT_CODES); + for (const [continent, page] of continentEntries) { + if (typeof continent === 'string' && continent.length === 2 && typeof page === 'string') { + cfg.ContinentBlockPages![continent] = page.slice(0, 500); + } + } + } + + logs.plugin('ipfilter', 'IP filter configuration loaded and validated successfully'); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Unknown error'; + logs.error('ipfilter', `Failed to load IP filter config: ${errorMessage}`); + // cfg already has safe defaults + } +} + +// Initialize configuration on module load +await initializeIPFilter(); + +const enabled = cfg.Core.Enabled; +const accountId = cfg.Core.AccountID || process.env.MAXMIND_ACCOUNT_ID || ''; +const licenseKey = cfg.Core.LicenseKey || process.env.MAXMIND_LICENSE_KEY || ''; +const dbUpdateIntervalMs = parseDuration(cfg.Core.DBUpdateInterval); + +const ipBlockCacheTTL = cfg.Cache.IPBlockCacheTTLSec * 1000; +const ipBlockCacheMaxEntries = cfg.Cache.IPBlockCacheMaxEntries; + +const blockedCountryCodes = new Set(cfg.Blocking.CountryCodes); +const blockedContinentCodes = new Set(cfg.Blocking.ContinentCodes); +const defaultBlockPage = cfg.Blocking.DefaultBlockPage; + +// Enhanced reputation tracking +const ipReputationCache = new Map(); +const suspiciousActivityThreshold = cfg.Advanced.SuspiciousActivityThreshold; +const reputationTTL = cfg.Advanced.ReputationTTL; + +// Process ASN blocks with error handling +const blockedASNs: Record = {}; +const asnGroupBlockPages: Record = {}; + +for (const [group, config] of Object.entries(cfg.ASN || {})) { + blockedASNs[group] = config.Numbers; + if (config.BlockPage) { + asnGroupBlockPages[group] = config.BlockPage; + } +} + +// Process ASN name blocks with error handling +const blockedASNNames: Record = {}; + +for (const [group, config] of Object.entries(cfg.ASNNames || {})) { + blockedASNNames[group] = config.Patterns; + if (config.BlockPage) { + asnGroupBlockPages[group] = config.BlockPage; + } +} + +const countryBlockPages = cfg.CountryBlockPages || {}; +const continentBlockPages = cfg.ContinentBlockPages || {}; + +// Cache with size limits to prevent memory leaks +const ipBlockCache = new Map(); +const blockPageCache = new Map(); + +const geoIPCountryDBPath = join(rootDir, 'data/GeoLite2-Country.mmdb'); +const geoIPASNDBPath = join(rootDir, 'data/GeoLite2-ASN.mmdb'); +const updateTimestampPath = join(rootDir, 'data/ipfilter_update.json'); + +let geoipCountryReader: MaxMindReader | null = null; +let geoipASNReader: MaxMindReader | null = null; +let isReloading = false; +let reloadLock: Promise = Promise.resolve(); + +/** + * SECURITY ENGINE: Enhanced IP reputation tracking with comprehensive threat analysis + * Tracks suspicious activities and maintains IP reputation scores for threat detection + */ +export class IPReputationTracker { + private readonly suspiciousActivities = new Map(); + private readonly knownGoodIPs = new Set(); + private readonly knownBadIPs = new Set(); + private cleanupTimer?: NodeJS.Timeout; + + constructor() { + this.startCleanupTimer(); + } + + // CRITICAL: Prevents memory leaks from unbounded reputation cache growth + private startCleanupTimer(): void { + this.cleanupTimer = setInterval(() => { + this.cleanupReputationCache(); + }, SECURITY_LIMITS.CLEANUP_INTERVAL); + } + + recordSuspiciousActivity(ip: string, activity: SuspiciousActivity): void { + try { + // Input validation + if (!ip || typeof ip !== 'string' || ip.length > 45) return; // Max IPv6 length + if (!activity || !Object.keys(ACTIVITY_WEIGHTS).includes(activity)) return; + + const key = `${ip}:${activity}`; + const current = this.suspiciousActivities.get(key) || 0; + this.suspiciousActivities.set(key, Math.min(current + 1, 1000)); // Cap activity count + + this.updateReputation(ip, activity); + } catch (error) { + logs.error('ipfilter', `Failed to record suspicious activity: ${error}`); + } + } + + private updateReputation(ip: string, activity: SuspiciousActivity): void { + try { + let reputation = ipReputationCache.get(ip) || { + score: 0, + activities: {} as Record, + lastUpdate: Date.now(), + blocked: false + }; + + reputation.activities[activity] = Math.min((reputation.activities[activity] || 0) + 1, 100); + reputation.lastUpdate = Date.now(); + + // Calculate reputation score + reputation.score = this.calculateReputationScore(reputation.activities); + + // Mark as suspicious if threshold exceeded + if (reputation.score >= suspiciousActivityThreshold) { + reputation.blocked = true; + this.knownBadIPs.add(ip); + } + + ipReputationCache.set(ip, reputation); + + // Trigger cleanup if cache is getting large + if (ipReputationCache.size > SECURITY_LIMITS.MAX_REPUTATION_CACHE_SIZE * 2) { + this.cleanupReputationCache(); + } + } catch (error) { + logs.error('ipfilter', `Failed to update reputation: ${error}`); + } + } + + private calculateReputationScore(activities: Record): number { + let score = 0; + for (const [activity, count] of Object.entries(activities)) { + const weight = ACTIVITY_WEIGHTS[activity as SuspiciousActivity] || 1; + score += count * weight; + } + return Math.min(score, 1000); // Cap total score + } + + getReputation(ip: string): ReputationData | null { + if (!ip || typeof ip !== 'string') return null; + + const reputation = ipReputationCache.get(ip); + if (!reputation) return null; + + // Check if reputation data is stale + if (Date.now() - reputation.lastUpdate > reputationTTL) { + ipReputationCache.delete(ip); + this.knownBadIPs.delete(ip); + return null; + } + + return reputation; + } + + isKnownBad(ip: string): boolean { + const reputation = this.getReputation(ip); + return reputation ? reputation.blocked : this.knownBadIPs.has(ip); + } + + isKnownGood(ip: string): boolean { + return this.knownGoodIPs.has(ip); + } + + markAsGood(ip: string): void { + if (!ip || typeof ip !== 'string') return; + + this.knownGoodIPs.add(ip); + this.knownBadIPs.delete(ip); + ipReputationCache.delete(ip); + + // Enforce size limits + if (this.knownGoodIPs.size > SECURITY_LIMITS.MAX_KNOWN_IPS_SIZE) { + const excess = this.knownGoodIPs.size - SECURITY_LIMITS.MAX_KNOWN_IPS_SIZE; + const entries = Array.from(this.knownGoodIPs).slice(0, excess); + entries.forEach(ipAddr => this.knownGoodIPs.delete(ipAddr)); + } + } + + private cleanupReputationCache(): void { + const now = Date.now(); + let cleanedItems = 0; + + try { + // Clean expired reputation entries + for (const [ip, reputation] of ipReputationCache.entries()) { + if (now - reputation.lastUpdate > reputationTTL) { + ipReputationCache.delete(ip); + this.knownBadIPs.delete(ip); + cleanedItems++; + } + } + + // Clean suspicious activities cache + for (const [key] of this.suspiciousActivities.entries()) { + const [ip] = key.split(':'); + if (ip && !ipReputationCache.has(ip)) { + this.suspiciousActivities.delete(key); + cleanedItems++; + } + } + + // Enforce maximum cache sizes to prevent memory exhaustion + if (ipReputationCache.size > SECURITY_LIMITS.MAX_REPUTATION_CACHE_SIZE) { + const excess = ipReputationCache.size - SECURITY_LIMITS.MAX_REPUTATION_CACHE_SIZE; + const entries = Array.from(ipReputationCache.entries()).slice(0, excess); + entries.forEach(([ip]) => { + ipReputationCache.delete(ip); + this.knownBadIPs.delete(ip); + }); + cleanedItems += excess; + } + + if (this.knownGoodIPs.size > SECURITY_LIMITS.MAX_KNOWN_IPS_SIZE) { + const excess = this.knownGoodIPs.size - SECURITY_LIMITS.MAX_KNOWN_IPS_SIZE; + const entries = Array.from(this.knownGoodIPs).slice(0, excess); + entries.forEach(ip => this.knownGoodIPs.delete(ip)); + cleanedItems += excess; + } + + if (cleanedItems > 0) { + logs.plugin('ipfilter', `Cleaned ${cleanedItems} reputation cache entries`); + } + } catch (error) { + logs.error('ipfilter', `Error during reputation cache cleanup: ${error}`); + } + } + + // Cleanup method for graceful shutdown + destroy(): void { + if (this.cleanupTimer) { + clearInterval(this.cleanupTimer); + this.cleanupTimer = undefined; + } + } +} + +const reputationTracker = new IPReputationTracker(); + +async function getLastUpdateTimestamp(): Promise { + try { + if (fs.existsSync(updateTimestampPath)) { + const data = await fsPromises.readFile(updateTimestampPath, 'utf8'); + const json = JSON.parse(data); + return Number(json.lastUpdated) || 0; + } + } catch (error) { + logs.warn('ipfilter', `Failed to read last update timestamp: ${error}`); + } + return 0; +} + +async function saveUpdateTimestamp(): Promise { + try { + const timestamp = Date.now(); + await fsPromises.writeFile( + updateTimestampPath, + JSON.stringify({ lastUpdated: timestamp }), + 'utf8', + ); + return timestamp; + } catch (error) { + logs.error('ipfilter', `Failed to save update timestamp: ${error}`); + return Date.now(); + } +} + +// Ensure the update timestamp file exists on first run +if (!fs.existsSync(updateTimestampPath)) { + try { + await saveUpdateTimestamp(); + } catch (error) { + logs.error('ipfilter', `Failed to initialize update timestamp file: ${error}`); + } +} + +/** + * SECURITY DOWNLOAD: Download GeoIP databases with comprehensive validation + * Prevents malicious database injection and ensures data integrity + */ +async function downloadGeoIPDatabases(): Promise { + if (!licenseKey || !accountId) { + logs.warn( + 'ipfilter', + 'No MaxMind credentials found; skipping GeoIP database download. Please set MAXMIND_ACCOUNT_ID and MAXMIND_LICENSE_KEY environment variables or add AccountID and LicenseKey to config/ipfilter.toml', + ); + return; + } + + const editions = [ + { id: 'GeoLite2-Country', filePath: geoIPCountryDBPath }, + { id: 'GeoLite2-ASN', filePath: geoIPASNDBPath }, + ]; + + for (const { id, filePath } of editions) { + if (!fs.existsSync(filePath)) { + logs.plugin('ipfilter', `Downloading ${id} database...`); + + try { + const url = `https://download.maxmind.com/app/geoip_download?edition_id=${encodeURIComponent(id)}&license_key=${encodeURIComponent(licenseKey)}&suffix=tar.gz`; + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), 300000); // 5 minute timeout + + const res = await fetch(url, { + signal: controller.signal, + headers: { 'User-Agent': 'Checkpoint-Security-Gateway/1.0' } + }); + + clearTimeout(timeoutId); + + if (!res.ok) { + logs.error('ipfilter', `Failed to download ${id} database: ${res.status} ${res.statusText}`); + continue; + } + + const contentLength = res.headers.get('content-length'); + if (contentLength && parseInt(contentLength) > 100 * 1024 * 1024) { // 100MB limit + logs.error('ipfilter', `Database ${id} too large: ${contentLength} bytes`); + continue; + } + + const tempTar = join(rootDir, 'data', `${id}.tar.gz`); + const arrayBuf = await res.arrayBuffer(); + + if (arrayBuf.byteLength < SECURITY_LIMITS.MIN_DATABASE_SIZE) { + logs.error('ipfilter', `Downloaded database ${id} too small: ${arrayBuf.byteLength} bytes`); + continue; + } + + await fsPromises.writeFile(tempTar, Buffer.from(arrayBuf)); + + // Extract .mmdb files from the downloaded tar.gz with security validation + const extract = tarStream.extract(); + let extractedFiles = 0; + + extract.on('entry', (header: any, stream: any, next: any) => { + if (header.name.endsWith('.mmdb') && extractedFiles < 5) { // Limit extracted files + const filename = header.name.split('/').pop(); + if (filename && filename.length < 100) { // Filename length validation + const outPath = join(rootDir, 'data', filename); + const ws = fs.createWriteStream(outPath); + extractedFiles++; + + stream + .pipe(ws) + .on('finish', next) + .on('error', (error: any) => { + logs.error('ipfilter', `Extraction error: ${error}`); + next(); + }); + } else { + stream.resume(); + next(); + } + } else { + stream.resume(); + next(); + } + }); + + await new Promise((resolve, reject) => { + fs.createReadStream(tempTar) + .pipe(createGunzip()) + .pipe(extract) + .on('finish', resolve) + .on('error', reject); + }); + + await fsPromises.unlink(tempTar); + logs.plugin('ipfilter', `${id} database downloaded and extracted.`); + } catch (error) { + logs.error('ipfilter', `Error downloading ${id} database: ${error}`); + } + } + } +} + +await downloadGeoIPDatabases(); + +/** + * Load block page with cache management to prevent memory leaks + */ +async function loadBlockPage(filePath: string): Promise { + if (!blockPageCache.has(filePath)) { + try { + // Security: Validate file path + if (!filePath || filePath.length > 500) { + blockPageCache.set(filePath, null); + return null; + } + + const txt = await fsPromises.readFile(filePath, 'utf8'); + + // Security: Limit file size + if (txt.length > 1024 * 1024) { // 1MB limit + logs.warn('ipfilter', `Block page ${filePath} too large, truncating`); + blockPageCache.set(filePath, txt.slice(0, 1024 * 1024)); + } else { + blockPageCache.set(filePath, txt); + } + + // Enforce cache size limit + if (blockPageCache.size > SECURITY_LIMITS.MAX_BLOCK_PAGE_CACHE_SIZE) { + const firstKey = blockPageCache.keys().next().value; + if (firstKey) { + blockPageCache.delete(firstKey); + } + } + } catch (error) { + logs.warn('ipfilter', `Failed to load block page ${filePath}: ${error}`); + blockPageCache.set(filePath, null); + } + } + return blockPageCache.get(filePath) || null; +} + +/** + * SECURITY CRITICAL: Load GeoIP databases with comprehensive validation and error handling + */ +async function loadGeoDatabases(): Promise { + if (isReloading) { + await reloadLock; + return true; + } + + isReloading = true; + let lockResolve: (() => void) | undefined; + reloadLock = new Promise((resolve) => { + lockResolve = resolve; + }); + + try { + // Check if database files exist and have reasonable sizes + if (!fs.existsSync(geoIPCountryDBPath) || !fs.existsSync(geoIPASNDBPath)) { + logs.warn('ipfilter', 'GeoIP database files not found. IP filtering will be disabled.'); + return false; + } + + const countryStats = fs.statSync(geoIPCountryDBPath); + const asnStats = fs.statSync(geoIPASNDBPath); + + if (countryStats.size > SECURITY_LIMITS.MIN_DATABASE_SIZE && asnStats.size > SECURITY_LIMITS.MIN_DATABASE_SIZE) { + logs.plugin('ipfilter', 'Initializing GeoIP databases from disk...'); + + let newCountryReader: any; + let newASNReader: any; + + try { + // Use timeout to prevent hanging + const timeoutPromise = new Promise((_, reject) => { + setTimeout(() => reject(new Error('Database opening timeout')), SECURITY_LIMITS.DATABASE_VALIDATION_TIMEOUT); + }); + + newCountryReader = await Promise.race([ + maxmind.open(geoIPCountryDBPath), + timeoutPromise + ]); + + newASNReader = await Promise.race([ + maxmind.open(geoIPASNDBPath), + timeoutPromise + ]); + } catch (dbOpenErr) { + const errorMessage = dbOpenErr instanceof Error ? dbOpenErr.message : 'Unknown error'; + logs.error('ipfilter', `Failed to open GeoIP databases: ${errorMessage}`); + return false; + } + + // Validate databases with test lookup + try { + const testIP = '8.8.8.8'; + const countryTest = newCountryReader.get(testIP); + const asnTest = newASNReader.get(testIP); + + if (!countryTest || !asnTest) { + throw new Error('Database validation failed: test lookups returned empty results'); + } + } catch (validationErr) { + const errorMessage = validationErr instanceof Error ? validationErr.message : 'Unknown error'; + logs.error('ipfilter', `GeoIP database validation failed: ${errorMessage}`); + + return false; + } + + // Safely replace old readers + const oldCountryReader = geoipCountryReader; + const oldASNReader = geoipASNReader; + + geoipCountryReader = newCountryReader as MaxMindReader; + geoipASNReader = newASNReader as MaxMindReader; + + if (oldCountryReader || oldASNReader) { + logs.plugin('ipfilter', 'GeoIP databases reloaded and active'); + } else { + logs.plugin('ipfilter', 'GeoIP databases loaded and active'); + } + + // Clear IP block cache since geo data may have changed + ipBlockCache.clear(); + + await saveUpdateTimestamp(); + + return true; + } else { + logs.warn( + 'ipfilter', + 'GeoIP database files are empty or too small. IP filtering will be disabled.', + ); + return false; + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Unknown error'; + logs.error('ipfilter', `Failed to load GeoIP databases: ${errorMessage}`); + return false; + } finally { + isReloading = false; + if (lockResolve) lockResolve(); + } +} + +async function checkAndUpdateDatabases(): Promise { + if (isReloading) return false; + + const lastUpdate = await getLastUpdateTimestamp(); + const now = Date.now(); + const timeSinceUpdate = now - lastUpdate; + + if (timeSinceUpdate >= dbUpdateIntervalMs) { + const hoursSinceUpdate = timeSinceUpdate / (1000 * 60 * 60); + logs.plugin( + 'ipfilter', + `GeoIP databases last updated ${hoursSinceUpdate.toFixed(1)} hours ago, reloading...`, + ); + return await loadGeoDatabases(); + } + + return false; +} + +function startPeriodicDatabaseUpdates(): void { + // Schedule periodic updates using pre-computed interval + setInterval(async () => { + try { + await checkAndUpdateDatabases(); + } catch (error) { + logs.error('ipfilter', `Failed during periodic database update: ${error}`); + } + }, dbUpdateIntervalMs); + + logs.plugin('ipfilter', `Scheduled GeoIP database updates every ${cfg.Core.DBUpdateInterval}`); +} + +await loadGeoDatabases(); +startPeriodicDatabaseUpdates(); + +const asnNameMatchers = new Map(); +for (const [group, names] of Object.entries(blockedASNNames)) { + if (names.length > 0) { + try { + asnNameMatchers.set(group, new AhoCorasick(names) as AhoCorasickMatcher); + } catch (error) { + logs.error('ipfilter', `Failed to create ASN name matcher for ${group}: ${error}`); + } + } +} + +function cacheAndReturn( + ip: string, + blocked: boolean, + blockType: BlockType, + blockValue: string, + customPage: string, + asnOrgName: string, + geoData: GeoData | null = null +): [boolean, BlockType, string, string, string, GeoData | null] { + const expiresAt = Date.now() + ipBlockCacheTTL; + ipBlockCache.set(ip, { blocked, blockType, blockValue, customPage, asnOrgName, geoData, expiresAt }); + + // Enforce maximum cache size + if (ipBlockCacheMaxEntries > 0 && ipBlockCache.size > ipBlockCacheMaxEntries) { + const oldestKey = ipBlockCache.keys().next().value; + if (oldestKey) { + ipBlockCache.delete(oldestKey); + } + } + return [blocked, blockType, blockValue, customPage, asnOrgName, geoData]; +} + +/** + * SECURITY CORE: Extended IP blocking with comprehensive threat analysis + * Analyzes IP addresses against geographic, ASN, and reputation-based filters + */ +function isBlockedIPExtended(ip: string): [boolean, BlockType, string, string, string, GeoData | null] { + // Input validation + if (!ip || typeof ip !== 'string' || ip.length > 45) { + return [false, '', '', '', '', null]; + } + + const now = Date.now(); + const entry = ipBlockCache.get(ip); + if (entry) { + if (entry.expiresAt > now) { + // Refresh recency by re-inserting entry + ipBlockCache.delete(ip); + ipBlockCache.set(ip, entry); + return [entry.blocked, entry.blockType, entry.blockValue, entry.customPage, entry.asnOrgName, entry.geoData]; + } else { + // Entry expired, remove it + ipBlockCache.delete(ip); + } + } + + // Check reputation first + if (reputationTracker.isKnownBad(ip)) { + return cacheAndReturn(ip, true, 'reputation', 'bad_reputation', defaultBlockPage, '', null); + } + + if (reputationTracker.isKnownGood(ip)) { + return cacheAndReturn(ip, false, '', '', '', '', null); + } + + const countryReader = geoipCountryReader; + const asnReader = geoipASNReader; + + if (!countryReader || !asnReader) { + return [false, '', '', '', '', null]; + } + + let countryInfo: CountryInfo | null = null; + let geoData: GeoData | null = null; + + try { + const result = countryReader.get(ip); + if (result && 'country' in result) { + countryInfo = result as CountryInfo; + if (countryInfo) { + geoData = { + country: countryInfo.country?.iso_code, + continent: countryInfo.continent?.code, + latitude: countryInfo.location?.latitude, + longitude: countryInfo.location?.longitude + }; + } + } + } catch (error) { + // Silently handle lookup errors + } + + if (countryInfo?.country && blockedCountryCodes.has(countryInfo.country.iso_code)) { + const page = countryBlockPages[countryInfo.country.iso_code] || defaultBlockPage; + return cacheAndReturn(ip, true, 'country', countryInfo.country.iso_code, page, '', geoData); + } + + if (countryInfo?.continent && blockedContinentCodes.has(countryInfo.continent.code)) { + const page = continentBlockPages[countryInfo.continent.code] || defaultBlockPage; + return cacheAndReturn(ip, true, 'continent', countryInfo.continent.code, page, '', geoData); + } + + let asnInfo: ASNInfo | null = null; + try { + const result = asnReader.get(ip); + if (result && 'autonomous_system_number' in result) { + asnInfo = result as ASNInfo; + } + } catch (error) { + // Silently handle lookup errors + } + + if (asnInfo?.autonomous_system_number) { + const asn = asnInfo.autonomous_system_number; + const orgName = asnInfo.autonomous_system_organization || ''; + + for (const [group, arr] of Object.entries(blockedASNs)) { + if (arr.includes(asn)) { + const page = asnGroupBlockPages[group] || defaultBlockPage; + return cacheAndReturn(ip, true, 'asn', group, page, orgName, geoData); + } + } + + for (const [group, matcher] of asnNameMatchers.entries()) { + try { + const matches = matcher.find(orgName); + if (matches.length) { + const page = asnGroupBlockPages[group] || defaultBlockPage; + return cacheAndReturn(ip, true, 'asn', group, page, orgName, geoData); + } + } catch (error) { + logs.warn('ipfilter', `Error in ASN name matching for ${group}: ${error}`); + } + } + } + + return cacheAndReturn(ip, false, '', '', '', '', geoData); +} + +// Extend Express types to include our custom properties +declare global { + namespace Express { + interface Request { + geoData?: GeoData | null; + } + } +} + +function IPBlockMiddleware() { + return { + middleware: (req: Request, res: Response, next: NextFunction) => { + // Skip if request is pre-excluded + if (req._excluded || res.locals._excluded) { + return next(); + } + + // Convert Express request to the format expected by ipfilter logic + const requestUrl = `${req.protocol}://${req.get('host') || 'localhost'}${req.originalUrl}`; + const request: NetworkRequest = { + url: requestUrl, + headers: { + get: (name: string) => req.get(name) || null + } + }; + + const clientIP = getRealIP(request); + logs.plugin('ipfilter', `Incoming request from IP: ${clientIP}`); + const [blocked, blockType, blockValue, customPage, asnOrgName, geoData] = isBlockedIPExtended(clientIP); + + // Store geo data in request for other middleware + req.geoData = geoData; + + if (blocked) { + // Record in reputation tracker + reputationTracker.recordSuspiciousActivity(clientIP, 'geo_block'); + + const url = new URL(requestUrl); + + if (url.pathname.startsWith('/api')) { + return res.status(403).json({ + error: 'Access denied from your location or network.', + reason: 'geoip', + type: blockType, + value: blockValue, + asn_org: asnOrgName, + }); + } + + // Process block page asynchronously + (async () => { + // Normalize page paths by stripping leading slash + const cleanCustomPage = customPage.replace(/^\/+/, ''); + const cleanDefaultPage = defaultBlockPage.replace(/^\/+/, ''); + + let html = ''; + logs.plugin( + 'ipfilter', + `Block pages: custom="${cleanCustomPage}", default="${cleanDefaultPage}"`, + ); + + const paths = [ + // allow absolute paths relative to project root first + join(rootDir, cleanCustomPage), + ]; + + // Fallback to default block page if custom page isn't found + if (customPage !== defaultBlockPage) { + paths.push( + // check default page at root directory + join(rootDir, cleanDefaultPage), + ); + } + + for (const p of paths) { + logs.plugin('ipfilter', `Trying block page at: ${p}`); + const content = await loadBlockPage(p); + logs.plugin('ipfilter', `Load result for ${p}: ${content ? 'FOUND' : 'NOT FOUND'}`); + if (content) { + html = content; + break; + } + } + + if (html) { + const output = html.replace('{{.ASNName}}', asnOrgName || 'Blocked Network'); + res.status(403).type('html').send(output); + } else { + res.status(403).type('text').send('Access denied from your location or network.'); + } + })().catch(err => { + logs.error('ipfilter', `Error processing block page: ${err}`); + res.status(403).type('text').send('Access denied from your location or network.'); + }); + + return; // End the middleware chain + } + + return next(); + } + }; +} + +if (enabled) { + registerPlugin('ipfilter', IPBlockMiddleware()); +} else { + logs.plugin('ipfilter', 'IP filter plugin disabled via config'); +} + +export { checkAndUpdateDatabases, loadGeoDatabases, reputationTracker }; + +/** + * Checks if a given IP address is currently in the blacklist according to the reputation tracker. + * @param ip - The IP address to check. + * @returns True if the IP is known bad, false otherwise. + */ +export function isIPBlacklisted(ip: string): boolean { + return reputationTracker.isKnownBad(ip); +} \ No newline at end of file diff --git a/src/plugins/waf.ts b/src/plugins/waf.ts new file mode 100644 index 0000000..62a8fc0 --- /dev/null +++ b/src/plugins/waf.ts @@ -0,0 +1,1270 @@ +import { registerPlugin, loadConfig } from '../index.js'; +import { getRealIP } from '../utils/network.js'; +import { parseDuration } from '../utils/time.js'; +import { PatternMatcherFactory, CommonPatterns } from '../utils/pattern-matching.js'; +import { TimestampCacheCleaner } from '../utils/cache-utils.js'; +import { BotVerificationEngine, type BotVerificationConfig, type BotVerificationResult } from '../utils/bot-verification.js'; +import * as logs from '../utils/logs.js'; + +// @ts-ignore - string-dsa doesn't have TypeScript definitions +import { AhoCorasick } from 'string-dsa'; + +// ==================== SECURITY-HARDENED TYPE DEFINITIONS ==================== + +type AttackCategory = + | 'SQL_INJECTION' | 'XSS' | 'COMMAND_INJECTION' | 'PATH_TRAVERSAL' + | 'LFI_RFI' | 'NOSQL_INJECTION' | 'XXE' | 'LDAP_INJECTION'; + +type RuleField = 'uri' | 'uri_path' | 'uri_query' | 'user_agent' | 'referer'; +type RuleAction = 'block' | 'monitor' | 'log' | 'rate_limit'; +type WAFMode = 'prevent' | 'monitor' | 'disabled'; + +interface WAFRuleCondition { + Field: RuleField; + Pattern: string; +} + +interface CustomWAFRule { + Name: string; + Category: string; + Score: number; + Action: RuleAction; + Enabled: boolean; + Hosts: string[]; + Conditions?: WAFRuleCondition[]; + Pattern?: string; + Field?: RuleField; +} + +interface CompiledWAFRule { + name: string; + category: string; + score: number; + action: RuleAction; + hosts: string[]; + conditions: Array<{ + field: RuleField; + regex: RegExp; + }>; +} + +interface WAFConfiguration { + Core: { Enabled: boolean; Mode: WAFMode }; + Exceptions: { ExcludedPaths: string[]; TrustedUserAgents: string[] }; + CustomRules?: CustomWAFRule[]; + BotVerification?: { + Enabled: boolean; + AllowVerifiedBots: boolean; + BlockUnverifiedBots: boolean; + EnableDNSVerification: boolean; + EnableIPRangeVerification: boolean; + DNSTimeout: string; + MinimumConfidence: number; + BotSources?: Array<{ + name: string; + userAgentPattern: string; + ipRangeURL: string; + dnsVerificationDomain?: string; + updateInterval: string; + enabled: boolean; + }>; + }; +} + +interface WAFAnalysisResult { + score: number; + attacks: string[]; + signals: Record; +} + +interface RequestHeaders { + get?(name: string): string | undefined; + entries?(): IterableIterator<[string, string]>; + [key: string]: string | string[] | undefined | Function; +} + +interface AnalyzableRequest { + url: string; + method: string; + headers: RequestHeaders; + body?: unknown; +} + +interface RateLimitData { + count: number; + lastReset: number; +} + +// ==================== SECURITY CONSTANTS ==================== + +const SECURITY_LIMITS = { + MAX_CACHE_SIZE: 10000, + MAX_URL_LENGTH: 8192, + MAX_HEADER_LENGTH: 32768, + MAX_BODY_SIZE: (process.env.MAX_BODY_SIZE_MB ? parseInt(process.env.MAX_BODY_SIZE_MB) : 10) * 1024 * 1024, + MAX_DECODE_ATTEMPTS: 3, + MAX_PATTERN_MATCHES: 100, + CLEANUP_INTERVAL: parseDuration('5m'), // 5 minutes + RATE_LIMIT_WINDOW: parseDuration('10m'), // 10 minutes +} as const; + +// WAF configuration - loaded during initialization to avoid race conditions +let wafConfig: WAFConfiguration = { + Core: { Enabled: false, Mode: 'disabled' }, + Exceptions: { ExcludedPaths: [], TrustedUserAgents: [] } +}; + +// Bot verification engine +let botVerificationEngine: BotVerificationEngine | null = null; + +// Pre-compiled regex patterns for better performance +const COMPILED_WAF_RULES: Record = {}; +const COMPILED_CUSTOM_RULES: CompiledWAFRule[] = []; + +// Centralized pattern matchers using the new utility +const WAF_PATTERN_MATCHERS = { + SQL_INJECTION: PatternMatcherFactory.getAhoCorasickMatcher('waf_sql_injection', CommonPatterns.SQL_INJECTION), + XSS: PatternMatcherFactory.getAhoCorasickMatcher('waf_xss', CommonPatterns.XSS), + COMMAND_INJECTION: PatternMatcherFactory.getAhoCorasickMatcher('waf_command_injection', CommonPatterns.COMMAND_INJECTION), + PATH_TRAVERSAL: PatternMatcherFactory.getAhoCorasickMatcher('waf_path_traversal', CommonPatterns.PATH_TRAVERSAL), + LFI_RFI: PatternMatcherFactory.getAhoCorasickMatcher('waf_lfi_rfi', [ + 'php://filter', 'php://input', 'data://text', 'file:///', 'expect://', + 'zip://', 'phar://', 'ftp://', 'gopher://', 'dict://', 'sftp://' + ]), + NOSQL_INJECTION: PatternMatcherFactory.getAhoCorasickMatcher('waf_nosql_injection', [ + '$where', '$ne', '$gt', '$lt', '$regex', '$exists', '$in', '$nin', + 'this.', 'sleep(', 'constructor', 'prototype' + ]), + XXE: PatternMatcherFactory.getAhoCorasickMatcher('waf_xxe', [ + ' = { + SQL_INJECTION: [ + /union\s+select\s+.*\s+from/i, + /insert\s+into\s+.*\s+values/i, + /delete\s+from\s+.*\s+where/i, + /update\s+.*\s+set\s+.*\s+where/i, + /exec\s*\(\s*'[^']*'\s*\)/i, + /'\s*(union|select|insert|delete|update|drop)/i, + /--\s*(union|select|insert|delete)/i, + /0x[0-9a-f]+/i, + /char\([0-9,\s]+\)/i, + /concat\s*\(/i + ], + XSS: [ + // Script tag detection - multiple variations + /]*>.*<\/script>/i, + /]*>/i, + /<\/script[^>]*>/i, + /\bscript\s*>/i, + /%3Cscript/i, + /\\x3cscript/i, + /\\u003cscript/i, + + // javascript: protocol - CRITICAL + /javascript\s*:/i, + /javascript\s*&#[x0-9]+3a/i, // HTML entity encoded colon + /javascript\s*%3a/i, // URL encoded colon + /\\x6A\\x61\\x76\\x61\\x73\\x63\\x72\\x69\\x70\\x74\\x3A/i, // Hex encoded + + // Event handlers - comprehensive list + /on(load|error|click|mouseover|mouseout|mousemove|mousedown|mouseup|keydown|keypress|keyup|submit|change|focus|blur|dblclick|scroll|select|contextmenu|wheel|drag|drop|copy|cut|paste|input|invalid|reset|search|toggle|touchstart|touchend|touchmove|pointerdown|pointerup|pointermove|animationstart|animationend|animationiteration|transitionend|message|online|offline|popstate|storage|hashchange|pagehide|pageshow|unload|beforeunload|resize|orientationchange|devicemotion|deviceorientation)\s*=/i, + + // Data URLs with dangerous content + /data:\s*text\/html/i, + /data:\s*text\/javascript/i, + /data:\s*application\/javascript/i, + /data:\s*image\/svg\+xml[^,]*', 'javascript:', 'document.cookie', 'document.write', + 'alert(', 'prompt(', 'confirm(', 'onload=', 'onerror=', 'onclick=', + '', 'javascript:', 'data:text/html', 'data:application', + 'ondblclick=', 'onmouseenter=', 'onmouseleave=', 'onmousemove=', 'onkeydown=', + 'onkeypress=', 'onkeyup=', 'onsubmit=', 'onreset=', 'onblur=', 'onchange=', + 'onsearch=', 'onselect=', 'ontoggle=', 'ondrag=', 'ondrop=', 'oninput=', + 'oninvalid=', 'onpaste=', 'oncopy=', 'oncut=', 'onwheel=', 'ontouchstart=', + 'ontouchend=', 'ontouchmove=', 'onpointerdown=', 'onpointerup=', 'onpointermove=', + 'srcdoc=', ' c.patterns); + const uniquePatterns = Array.from(new Set(allPatterns)); + const names = collections.map(c => c.name).join('+'); + + return { + name: names, + patterns: uniquePatterns, + description: `Merged collection: ${names}` + }; + }, + + /** + * Validates pattern array + */ + validatePatterns(patterns: readonly string[]): { valid: readonly string[]; invalid: readonly string[] } { + const valid: string[] = []; + const invalid: string[] = []; + + for (const pattern of patterns) { + if (typeof pattern === 'string' && pattern.length > 0 && pattern.length <= 200) { + valid.push(pattern); + } else { + invalid.push(pattern); + } + } + + return { valid, invalid }; + } +}; \ No newline at end of file diff --git a/src/utils/performance.ts b/src/utils/performance.ts new file mode 100644 index 0000000..f419b51 --- /dev/null +++ b/src/utils/performance.ts @@ -0,0 +1,510 @@ +// Performance optimization utilities shared across plugin + +import { parseDuration } from './time.js'; + +// Performance utilities for plugin development - provide sensible defaults +// These are internal utilities, not user-configurable + +// Default values for performance utilities +const DEFAULT_RATE_LIMITER_WINDOW = parseDuration('1m'); +const DEFAULT_RATE_LIMITER_CLEANUP = parseDuration('1m'); +const DEFAULT_BATCH_FLUSH_INTERVAL = parseDuration('1s'); +const DEFAULT_CONNECTION_TIMEOUT = parseDuration('30s'); + +// Type definitions for performance utilities +export interface CacheOptions { + maxSize?: number; + ttl?: number | null; +} + +export interface RateLimiterOptions { + windowMs?: number; + maxRequests?: number; +} + +export interface BatchProcessorOptions { + batchSize?: number; + flushInterval?: number; +} + +export interface MemoizeOptions { + maxSize?: number; + ttl?: number; +} + +export interface ConnectionPoolOptions { + maxConnections?: number; + timeout?: number; +} + +export interface PoolStats { + available: number; + inUse: number; + total: number; +} + +export interface PoolData { + connections: T[]; + inUse: Set; +} + +export interface Connection { + host: string; + created: number; +} + +// Type aliases for function types +export type ObjectFactory = () => T; +export type ObjectReset = (obj: T) => void; +export type BatchProcessorFunction = (batch: T[]) => Promise; +export type DebouncedFunction = (...args: T) => void; +export type ThrottledFunction = (...args: T) => void; +export type MemoizedFunction = (...args: T) => R; + +/** + * LRU (Least Recently Used) cache implementation with size limits + * Prevents memory leaks by automatically evicting oldest entries + */ +export class LRUCache { + private readonly maxSize: number; + private readonly ttl: number | null; + private readonly cache = new Map(); + private readonly accessOrder = new Map(); // Track access times + + constructor(maxSize: number = 10000, ttl: number | null = null) { + this.maxSize = maxSize; + this.ttl = ttl; // Time to live in milliseconds + } + + set(key: K, value: V): void { + // Delete if at capacity + if (this.cache.size >= this.maxSize) { + const oldestKey = this.cache.keys().next().value; + if (oldestKey !== undefined) { + this.cache.delete(oldestKey); + this.accessOrder.delete(oldestKey); + } + } + + // Add/update entry + this.cache.delete(key); + this.cache.set(key, value); + this.accessOrder.set(key, Date.now()); + } + + get(key: K): V | undefined { + if (!this.cache.has(key)) return undefined; + + // Check TTL if configured + if (this.ttl) { + const accessTime = this.accessOrder.get(key); + if (accessTime && Date.now() - accessTime > this.ttl) { + this.delete(key); + return undefined; + } + } + + // Move to end (most recently used) + const value = this.cache.get(key); + if (value !== undefined) { + this.cache.delete(key); + this.cache.set(key, value); + this.accessOrder.set(key, Date.now()); + } + + return value; + } + + has(key: K): boolean { + if (this.ttl) { + const accessTime = this.accessOrder.get(key) || 0; + const age = Date.now() - accessTime; + if (age > this.ttl) { + this.delete(key); + return false; + } + } + return this.cache.has(key); + } + + delete(key: K): boolean { + this.accessOrder.delete(key); + return this.cache.delete(key); + } + + clear(): void { + this.cache.clear(); + this.accessOrder.clear(); + } + + get size(): number { + return this.cache.size; + } + + // Clean expired entries + cleanup(): number { + if (!this.ttl) return 0; + + const now = Date.now(); + let cleaned = 0; + + for (const [key, timestamp] of this.accessOrder.entries()) { + if (now - timestamp > this.ttl) { + this.delete(key); + cleaned++; + } + } + + return cleaned; + } +} + +/** + * Rate limiter with sliding window and automatic cleanup + */ +export class RateLimiter { + private readonly windowMs: number; + private readonly maxRequests: number; + private readonly requests = new Map(); + private cleanupInterval: NodeJS.Timeout | null = null; + + constructor(windowMs: number = DEFAULT_RATE_LIMITER_WINDOW, maxRequests: number = 100, cleanupIntervalMs: number = DEFAULT_RATE_LIMITER_CLEANUP) { + this.windowMs = windowMs; + this.maxRequests = maxRequests; + + // Automatic cleanup with configured interval + this.cleanupInterval = setInterval(() => { + this.cleanup(); + }, cleanupIntervalMs); + } + + isAllowed(identifier: string): boolean { + const now = Date.now(); + const userRequests = this.requests.get(identifier) || []; + + // Remove old requests outside the window + const validRequests = userRequests.filter(timestamp => now - timestamp < this.windowMs); + + if (validRequests.length >= this.maxRequests) { + this.requests.set(identifier, validRequests); + return false; + } + + // Add new request + validRequests.push(now); + this.requests.set(identifier, validRequests); + + return true; + } + + cleanup(): number { + const now = Date.now(); + let cleaned = 0; + + for (const [identifier, timestamps] of this.requests.entries()) { + const validRequests = timestamps.filter(t => now - t < this.windowMs); + + if (validRequests.length === 0) { + this.requests.delete(identifier); + cleaned++; + } else { + this.requests.set(identifier, validRequests); + } + } + + return cleaned; + } + + destroy(): void { + if (this.cleanupInterval) { + clearInterval(this.cleanupInterval); + this.cleanupInterval = null; + } + this.requests.clear(); + } +} + +/** + * Object pool for reusing expensive objects + */ +export class ObjectPool { + private readonly factory: ObjectFactory; + private readonly reset: ObjectReset; + private readonly maxSize: number; + private available: T[] = []; + private readonly inUse = new Set(); + + constructor(factory: ObjectFactory, reset: ObjectReset, maxSize: number = 100) { + this.factory = factory; + this.reset = reset; + this.maxSize = maxSize; + } + + acquire(): T { + let obj: T; + + if (this.available.length > 0) { + obj = this.available.pop()!; + } else { + obj = this.factory(); + } + + this.inUse.add(obj); + return obj; + } + + release(obj: T): void { + if (!this.inUse.has(obj)) return; + + this.inUse.delete(obj); + + if (this.available.length < this.maxSize) { + this.reset(obj); + this.available.push(obj); + } + } + + clear(): void { + this.available = []; + this.inUse.clear(); + } + + get size(): PoolStats { + return { + available: this.available.length, + inUse: this.inUse.size, + total: this.available.length + this.inUse.size + }; + } +} + +/** + * Batch processor for aggregating operations + */ +export class BatchProcessor { + private readonly processor: BatchProcessorFunction; + private readonly batchSize: number; + private readonly flushInterval: number; + private queue: T[] = []; + private processing = false; + private intervalId: NodeJS.Timeout | null = null; + + constructor(processor: BatchProcessorFunction, options: BatchProcessorOptions = {}) { + this.processor = processor; + this.batchSize = options.batchSize || 100; + this.flushInterval = options.flushInterval || DEFAULT_BATCH_FLUSH_INTERVAL; + + // Auto-flush on interval + this.intervalId = setInterval(() => { + this.flush(); + }, this.flushInterval); + } + + add(item: T): void { + this.queue.push(item); + + if (this.queue.length >= this.batchSize) { + this.flush(); + } + } + + async flush(): Promise { + if (this.processing || this.queue.length === 0) return; + + this.processing = true; + const batch = this.queue.splice(0, this.batchSize); + + try { + await this.processor(batch); + } catch (err) { + console.error('Batch processing error:', err); + } finally { + this.processing = false; + } + } + + destroy(): void { + if (this.intervalId) { + clearInterval(this.intervalId); + this.intervalId = null; + } + this.flush(); + } +} + +/** + * Debounce function for reducing function call frequency + */ +export function debounce( + func: (...args: T) => void, + wait: number +): DebouncedFunction { + let timeout: NodeJS.Timeout | undefined; + + return function executedFunction(...args: T): void { + const later = (): void => { + timeout = undefined; + func(...args); + }; + + if (timeout) { + clearTimeout(timeout); + } + timeout = setTimeout(later, wait); + }; +} + +/** + * Throttle function for limiting function execution rate + */ +export function throttle( + func: (...args: T) => void, + limit: number +): ThrottledFunction { + let inThrottle = false; + + return function(...args: T): void { + if (!inThrottle) { + func(...args); + inThrottle = true; + setTimeout(() => { + inThrottle = false; + }, limit); + } + }; +} + +/** + * Memoize function results with optional TTL + */ +export function memoize( + func: (...args: T) => R, + options: MemoizeOptions = {} +): MemoizedFunction { + const cache = new LRUCache(options.maxSize || 1000, options.ttl); + + return function(...args: T): R { + const key = JSON.stringify(args); + + if (cache.has(key)) { + const cached = cache.get(key); + if (cached !== undefined) { + return cached; + } + } + + const result = func(...args); + cache.set(key, result); + + return result; + }; +} + +/** + * Efficient string search using Set for O(1) lookups + */ +export class StringMatcher { + private readonly patterns: Set; + + constructor(patterns: string[]) { + this.patterns = new Set(patterns.map(p => p.toLowerCase())); + } + + contains(text: string): boolean { + return this.patterns.has(text.toLowerCase()); + } + + containsAny(texts: string[]): boolean { + return texts.some(text => this.contains(text)); + } + + add(pattern: string): void { + this.patterns.add(pattern.toLowerCase()); + } + + remove(pattern: string): boolean { + return this.patterns.delete(pattern.toLowerCase()); + } + + get size(): number { + return this.patterns.size; + } +} + +/** + * Connection pool for reusing network connections + */ +export class ConnectionPool { + private readonly maxConnections: number; + private readonly connectionTimeoutMs: number; + private readonly pools = new Map>(); // host -> connections + + constructor(options: ConnectionPoolOptions = {}) { + this.maxConnections = options.maxConnections || 50; + this.connectionTimeoutMs = options.timeout || DEFAULT_CONNECTION_TIMEOUT; + } + + // Getter for subclasses to access connection timeout + protected get connectionTimeout(): number { + return this.connectionTimeoutMs; + } + + getConnection(host: string): T | null { + if (!this.pools.has(host)) { + this.pools.set(host, { + connections: [], + inUse: new Set() + }); + } + + const pool = this.pools.get(host)!; + + // Reuse existing connection + if (pool.connections.length > 0) { + const conn = pool.connections.pop()!; + pool.inUse.add(conn); + return conn; + } + + // Create new connection if under limit + if (pool.inUse.size < this.maxConnections) { + const conn = this.createConnection(host); + pool.inUse.add(conn); + return conn; + } + + return null; // Pool exhausted + } + + releaseConnection(host: string, conn: T): void { + const pool = this.pools.get(host); + if (!pool || !pool.inUse.has(conn)) return; + + pool.inUse.delete(conn); + + if (pool.connections.length < this.maxConnections / 2) { + pool.connections.push(conn); + } else { + this.closeConnection(conn); + } + } + + protected createConnection(host: string): T { + // Override in subclass + return { host, created: Date.now() } as T; + } + + protected closeConnection(_conn: T): void { + // Override in subclass + } + + destroy(): void { + for (const [_host, pool] of this.pools.entries()) { + pool.connections.forEach(conn => this.closeConnection(conn)); + pool.inUse.forEach(conn => this.closeConnection(conn)); + } + this.pools.clear(); + } +} + +// Note: All types are already exported above \ No newline at end of file diff --git a/src/utils/plugins.ts b/src/utils/plugins.ts new file mode 100644 index 0000000..b05e66b --- /dev/null +++ b/src/utils/plugins.ts @@ -0,0 +1,182 @@ +// ============================================================================= +// SECURE PLUGIN SYSTEM - TYPESCRIPT VERSION +// ============================================================================= +// Enhanced security for module imports with comprehensive path validation +// Prevents path traversal, validates file extensions, and enforces application boundaries + +import { resolve, extname, sep, isAbsolute, normalize } from 'path'; +import { pathToFileURL } from 'url'; +import { rootDir } from '../index.js'; + +// Type definitions for secure plugin system +export interface PluginModule { + readonly [key: string]: unknown; +} + +// Security constants for module validation +const ALLOWED_EXTENSIONS = new Set(['.js', '.mjs']); +const MAX_PATH_LENGTH = 1024; // Reasonable path length limit +const MAX_PATH_DEPTH = 20; // Maximum directory depth +const BLOCKED_PATTERNS = [ + /\.\./, // Directory traversal + /\/\/+/, // Double slashes + /\0/, // Null bytes + /[\x00-\x1f]/, // Control characters + /node_modules/i, // Prevent node_modules access + /package\.json/i, // Prevent package.json access + /\.env/i, // Prevent environment file access +] as const; + +// Input validation with zero trust approach +function validateModulePath(relPath: unknown): string { + // Type validation + if (typeof relPath !== 'string') { + throw new Error('Module path must be a string'); + } + + // Length validation + if (relPath.length === 0) { + throw new Error('Module path cannot be empty'); + } + + if (relPath.length > MAX_PATH_LENGTH) { + throw new Error(`Module path too long: ${relPath.length} > ${MAX_PATH_LENGTH}`); + } + + // Security pattern validation + for (const pattern of BLOCKED_PATTERNS) { + if (pattern.test(relPath)) { + throw new Error(`Module path contains blocked pattern: ${relPath}`); + } + } + + // Normalize path to prevent encoding bypasses + const normalizedPath = normalize(relPath); + + // Validate path depth + const pathSegments = normalizedPath.split(sep).filter(segment => segment !== ''); + if (pathSegments.length > MAX_PATH_DEPTH) { + throw new Error(`Module path too deep: ${pathSegments.length} > ${MAX_PATH_DEPTH}`); + } + + return normalizedPath; +} + +function validateFileExtension(filePath: string): void { + const ext = extname(filePath).toLowerCase(); + + if (!ALLOWED_EXTENSIONS.has(ext as any)) { + throw new Error(`Only ${Array.from(ALLOWED_EXTENSIONS).join(', ')} files can be imported: ${filePath}`); + } +} + +function validateRootDirectory(): string { + if (typeof rootDir !== 'string' || rootDir.length === 0) { + throw new Error('Invalid application root directory'); + } + + return normalize(rootDir); +} + +function validateResolvedPath(absPath: string, rootDir: string): void { + const normalizedAbsPath = normalize(absPath); + const normalizedRootDir = normalize(rootDir); + + // Ensure the resolved path is within the application root + if (!normalizedAbsPath.startsWith(normalizedRootDir + sep) && normalizedAbsPath !== normalizedRootDir) { + throw new Error(`Module path outside of application root: ${normalizedAbsPath}`); + } + + // Additional security check for symbolic link traversal + try { + const relativePath = normalizedAbsPath.substring(normalizedRootDir.length); + if (relativePath.includes('..')) { + throw new Error(`Path traversal detected in resolved path: ${normalizedAbsPath}`); + } + } catch (error) { + throw new Error(`Path validation failed: ${error instanceof Error ? error.message : 'unknown'}`); + } +} + +/** + * Securely import a JavaScript module from within the application root. + * Enhanced with comprehensive security validation and TypeScript safety. + * Prevents path traversal, validates extensions, and enforces application boundaries. + * + * @param relPath - The relative path to the module from the application root + * @returns Promise that resolves to the imported module + * @throws Error if the path is invalid, unsafe, or outside application boundaries + */ +export async function secureImportModule(relPath: unknown): Promise { + try { + // Validate and normalize the input path + const validatedPath = validateModulePath(relPath); + + // Security check: reject absolute paths + if (isAbsolute(validatedPath)) { + throw new Error('Absolute paths are not allowed for module imports'); + } + + // Validate file extension + validateFileExtension(validatedPath); + + // Validate root directory + const validatedRootDir = validateRootDirectory(); + + // Resolve the absolute path + const absPath = resolve(validatedRootDir, validatedPath); + + // Validate the resolved path is within application boundaries + validateResolvedPath(absPath, validatedRootDir); + + // Convert to file URL for secure import + const url = pathToFileURL(absPath).href; + + // Perform the actual import with error handling + try { + const importedModule = await import(url); + + // Validate the imported module + if (!importedModule || typeof importedModule !== 'object') { + throw new Error(`Invalid module structure: ${validatedPath}`); + } + + return importedModule as PluginModule; + + } catch (importError) { + // Provide more context for import failures + throw new Error(`Failed to import module ${validatedPath}: ${importError instanceof Error ? importError.message : 'unknown error'}`); + } + + } catch (error) { + // Re-throw with additional context while preventing information leakage + if (error instanceof Error) { + throw new Error(`Module import failed: ${error.message}`); + } else { + throw new Error('Module import failed due to unknown error'); + } + } +} + +/** + * Type guard to check if an imported module has a specific export + * @param module - The imported module + * @param exportName - The name of the export to check + * @returns True if the export exists + */ +export function hasExport(module: PluginModule, exportName: string): boolean { + return exportName in module && module[exportName] !== undefined; +} + +/** + * Safely extract a specific export from a module with type checking + * @param module - The imported module + * @param exportName - The name of the export to extract + * @returns The export value or undefined if not found + */ +export function getExport(module: PluginModule, exportName: string): T | undefined { + if (hasExport(module, exportName)) { + return module[exportName] as T; + } + return undefined; +} \ No newline at end of file diff --git a/src/utils/proof.ts b/src/utils/proof.ts new file mode 100644 index 0000000..7f65bc5 --- /dev/null +++ b/src/utils/proof.ts @@ -0,0 +1,306 @@ +import * as crypto from 'crypto'; +import { getRealIP, type NetworkRequest } from './network.js'; +import { parseDuration } from './time.js'; + +// Type definitions for secure proof operations +export interface ChallengeData { + readonly challenge: string; + readonly salt: string; +} + +export interface ChallengeParams { + readonly Challenge: string; + readonly Salt: string; + readonly Difficulty: number; + readonly ExpiresAt: number; + readonly CreatedAt: number; + readonly ClientIP: string; + readonly PoSSeed: string; +} + +export interface CheckpointConfig { + readonly SaltLength: number; + readonly Difficulty: number; + readonly ChallengeExpiration: number; + readonly CheckPoSTimes: boolean; + readonly PoSTimeConsistencyRatio: number; +} + +// Security constants - prevent DoS attacks while respecting user config +const ABSOLUTE_MAX_SALT_LENGTH = 1024; // 1KB - prevents memory exhaustion +const ABSOLUTE_MAX_DIFFICULTY = 64; // Reasonable upper bound for crypto safety +const ABSOLUTE_MIN_DIFFICULTY = 1; // Must be at least 1 +const ABSOLUTE_MAX_DURATION = parseDuration('365d'); // 1 year - prevents overflow +const EXPECTED_POS_TIMES_LENGTH = 3; // Protocol requirement +const EXPECTED_POS_HASHES_LENGTH = 3; // Protocol requirement +const EXPECTED_HASH_LENGTH = 64; // SHA-256 hex length +const ABSOLUTE_MAX_INPUT_LENGTH = 100000; // 100KB - prevents DoS +const ABSOLUTE_MAX_REQUEST_ID_LENGTH = 64; // Reasonable hex string limit + +// Input validation functions - zero trust approach +function validateHexString(value: unknown, paramName: string, maxLength: number): string { + if (typeof value !== 'string') { + throw new Error(`${paramName} must be a string`); + } + if (value.length === 0) { + throw new Error(`${paramName} cannot be empty`); + } + if (value.length > maxLength) { + throw new Error(`${paramName} exceeds maximum length of ${maxLength}`); + } + if (!/^[0-9a-fA-F]+$/.test(value)) { + throw new Error(`${paramName} must be a valid hexadecimal string`); + } + return value.toLowerCase(); +} + +function validatePositiveInteger(value: unknown, paramName: string, min: number, max: number): number { + if (typeof value !== 'number' || !Number.isInteger(value)) { + throw new Error(`${paramName} must be an integer`); + } + if (value < min || value > max) { + throw new Error(`${paramName} must be between ${min} and ${max}`); + } + return value; +} + +function validateTimesArray(value: unknown, paramName: string): number[] { + if (!Array.isArray(value)) { + throw new Error(`${paramName} must be an array`); + } + if (value.length !== EXPECTED_POS_TIMES_LENGTH) { + throw new Error(`${paramName} must have exactly ${EXPECTED_POS_TIMES_LENGTH} elements`); + } + + const validatedTimes: number[] = []; + for (let i = 0; i < value.length; i++) { + const time = value[i]; + if (typeof time !== 'number' || !Number.isFinite(time) || time < 0) { + throw new Error(`${paramName}[${i}] must be a non-negative finite number`); + } + if (time > 10000000) { // 10M ms = ~3 hours - generous but prevents DoS + throw new Error(`${paramName}[${i}] exceeds maximum allowed value`); + } + validatedTimes.push(time); + } + return validatedTimes; +} + +function validateHashesArray(value: unknown, paramName: string): string[] { + if (!Array.isArray(value)) { + throw new Error(`${paramName} must be an array`); + } + if (value.length !== EXPECTED_POS_HASHES_LENGTH) { + throw new Error(`${paramName} must have exactly ${EXPECTED_POS_HASHES_LENGTH} elements`); + } + + const validatedHashes: string[] = []; + for (let i = 0; i < value.length; i++) { + const hash = validateHexString(value[i], `${paramName}[${i}]`, EXPECTED_HASH_LENGTH); + if (hash.length !== EXPECTED_HASH_LENGTH) { + throw new Error(`${paramName}[${i}] must be exactly ${EXPECTED_HASH_LENGTH} characters`); + } + validatedHashes.push(hash); + } + return validatedHashes; +} + +function validateCheckpointConfig(config: unknown): CheckpointConfig { + if (!config || typeof config !== 'object') { + throw new Error('CheckpointConfig must be an object'); + } + + const cfg = config as Record; + + // Validate user's salt length - allow generous range but prevent memory exhaustion + const saltLength = validatePositiveInteger(cfg.SaltLength, 'SaltLength', 1, ABSOLUTE_MAX_SALT_LENGTH); + + // Respect user's difficulty settings completely - they know their security needs + const difficulty = validatePositiveInteger(cfg.Difficulty, 'Difficulty', ABSOLUTE_MIN_DIFFICULTY, ABSOLUTE_MAX_DIFFICULTY); + + // Respect user's expiration settings - they control their own security/usability balance + const challengeExpiration = validatePositiveInteger(cfg.ChallengeExpiration, 'ChallengeExpiration', 1000, ABSOLUTE_MAX_DURATION); + + // Validate consistency ratio - prevent divide by zero but allow user control + const consistencyRatio = typeof cfg.PoSTimeConsistencyRatio === 'number' && cfg.PoSTimeConsistencyRatio > 0 && cfg.PoSTimeConsistencyRatio <= 1000 + ? cfg.PoSTimeConsistencyRatio : 2.0; + + return { + SaltLength: saltLength, + Difficulty: difficulty, + ChallengeExpiration: challengeExpiration, + CheckPoSTimes: typeof cfg.CheckPoSTimes === 'boolean' ? cfg.CheckPoSTimes : false, + PoSTimeConsistencyRatio: consistencyRatio + }; +} + +function validateNetworkRequest(request: unknown): NetworkRequest { + if (!request || typeof request !== 'object') { + throw new Error('Request must be an object'); + } + + const req = request as Record; + + // Validate headers object exists + if (!req.headers || typeof req.headers !== 'object') { + throw new Error('Request must have headers object'); + } + + // Basic validation - ensure it has the minimal structure for a NetworkRequest + return request as NetworkRequest; +} + +function generateChallenge(checkpointConfig: unknown): ChallengeData { + const validatedConfig = validateCheckpointConfig(checkpointConfig); + + const challenge = crypto.randomBytes(16).toString('hex'); + const salt = crypto.randomBytes(validatedConfig.SaltLength).toString('hex'); + + return { challenge, salt }; +} + +function calculateHash(input: unknown): string { + if (typeof input !== 'string') { + throw new Error('Hash input must be a string'); + } + if (input.length === 0) { + throw new Error('Hash input cannot be empty'); + } + if (input.length > ABSOLUTE_MAX_INPUT_LENGTH) { // Prevent DoS via massive strings + throw new Error(`Hash input exceeds maximum length of ${ABSOLUTE_MAX_INPUT_LENGTH}`); + } + + return crypto.createHash('sha256').update(input).digest('hex'); +} + +export function verifyPoW( + challenge: unknown, + salt: unknown, + nonce: unknown, + difficulty: unknown +): boolean { + // Validate all user-provided inputs with zero trust + const validatedChallenge = validateHexString(challenge, 'challenge', ABSOLUTE_MAX_INPUT_LENGTH); + const validatedSalt = validateHexString(salt, 'salt', ABSOLUTE_MAX_INPUT_LENGTH); + const validatedNonce = validateHexString(nonce, 'nonce', ABSOLUTE_MAX_INPUT_LENGTH); + const validatedDifficulty = validatePositiveInteger(difficulty, 'difficulty', ABSOLUTE_MIN_DIFFICULTY, ABSOLUTE_MAX_DIFFICULTY); + + // Perform cryptographic operation with validated inputs + const hash = calculateHash(validatedChallenge + validatedSalt + validatedNonce); + const requiredPrefix = '0'.repeat(validatedDifficulty); + + return hash.startsWith(requiredPrefix); +} + +export function checkPoSTimes(times: unknown, enableCheck: unknown, ratio: unknown): void { + const validatedTimes = validateTimesArray(times, 'times'); + const validatedEnableCheck = typeof enableCheck === 'boolean' ? enableCheck : false; + const validatedRatio = typeof ratio === 'number' && ratio > 0 ? ratio : 2.0; + + if (!validatedEnableCheck) { + return; // Skip check if disabled + } + + const minTime = Math.min(...validatedTimes); + const maxTime = Math.max(...validatedTimes); + + if (minTime === 0) { + throw new Error('PoS run times cannot be zero'); + } + + const actualRatio = maxTime / minTime; + if (actualRatio > validatedRatio) { + throw new Error(`PoS run times inconsistent (ratio ${actualRatio.toFixed(2)} > ${validatedRatio})`); + } +} + +// Secure in-memory storage with automatic cleanup +export const challengeStore = new Map(); + +// Cleanup expired challenges to prevent memory exhaustion +function cleanupExpiredChallenges(): void { + const now = Date.now(); + for (const [requestId, params] of Array.from(challengeStore.entries())) { + if (params.ExpiresAt < now) { + challengeStore.delete(requestId); + } + } +} + +// Run cleanup every 5 minutes +setInterval(cleanupExpiredChallenges, parseDuration('5m')); + +export function generateRequestID(request: unknown, checkpointConfig: unknown): string { + const validatedConfig = validateCheckpointConfig(checkpointConfig); + const validatedRequest = validateNetworkRequest(request); + const { challenge, salt } = generateChallenge(validatedConfig); + + const posSeed = crypto.randomBytes(32).toString('hex'); + const requestId = crypto.randomBytes(16).toString('hex'); + + const params: ChallengeParams = { + Challenge: challenge, + Salt: salt, + Difficulty: validatedConfig.Difficulty, + ExpiresAt: Date.now() + validatedConfig.ChallengeExpiration, + CreatedAt: Date.now(), + ClientIP: getRealIP(validatedRequest), + PoSSeed: posSeed, + }; + + challengeStore.set(requestId, params); + return requestId; +} + +export function getChallengeParams(requestId: unknown): ChallengeParams | undefined { + if (typeof requestId !== 'string') { + throw new Error('Request ID must be a string'); + } + if (requestId.length > ABSOLUTE_MAX_REQUEST_ID_LENGTH) { + throw new Error(`Request ID exceeds maximum length of ${ABSOLUTE_MAX_REQUEST_ID_LENGTH}`); + } + if (requestId.length !== 32) { // Expected length for hex-encoded 16 bytes + throw new Error('Invalid request ID format'); + } + if (!/^[0-9a-fA-F]+$/.test(requestId)) { + throw new Error('Request ID must be hexadecimal'); + } + + return challengeStore.get(requestId); +} + +export function deleteChallenge(requestId: unknown): boolean { + if (typeof requestId !== 'string') { + throw new Error('Request ID must be a string'); + } + + return challengeStore.delete(requestId); +} + +export function verifyPoS( + hashes: unknown, + times: unknown, + checkpointConfig: unknown +): void { + // Validate all user inputs with zero trust + const validatedHashes = validateHashesArray(hashes, 'hashes'); + const validatedTimes = validateTimesArray(times, 'times'); + const validatedConfig = validateCheckpointConfig(checkpointConfig); + + // Verify hash consistency - all must match + const firstHash = validatedHashes[0]; + for (let i = 1; i < validatedHashes.length; i++) { + if (validatedHashes[i] !== firstHash) { + throw new Error('PoS hashes do not match'); + } + } + + // Validate timing consistency + checkPoSTimes(validatedTimes, validatedConfig.CheckPoSTimes, validatedConfig.PoSTimeConsistencyRatio); +} + +// Export for testing +export { + calculateHash, + generateChallenge +}; \ No newline at end of file diff --git a/src/utils/threat-scoring.ts b/src/utils/threat-scoring.ts new file mode 100644 index 0000000..f04ee8c --- /dev/null +++ b/src/utils/threat-scoring.ts @@ -0,0 +1,8 @@ +// ============================================================================= +// THREAT SCORING ENGINE V2.0 - BACKWARD COMPATIBILITY LAYER (TYPESCRIPT) +// ============================================================================= +// This file maintains backward compatibility by re-exporting from the refactored modules +// Provides type-safe access to threat scoring functionality +// ============================================================================= + +export { threatScorer, configureDefaultThreatScorer, createThreatScorer, type ThreatScore, type ThreatScoringConfig } from './threat-scoring/index.js'; \ No newline at end of file diff --git a/src/utils/threat-scoring/analyzers/geo.ts b/src/utils/threat-scoring/analyzers/geo.ts new file mode 100644 index 0000000..ad01034 --- /dev/null +++ b/src/utils/threat-scoring/analyzers/geo.ts @@ -0,0 +1,480 @@ +// ============================================================================= +// GEO ANALYSIS (TypeScript) +// ============================================================================= + +// ============================================================================= +// TYPE DEFINITIONS +// ============================================================================= + +interface GeoLocation { + readonly lat: number; + readonly lon: number; +} + +interface GeoData { + readonly country?: string; + readonly continent?: string; + readonly latitude?: number; + readonly longitude?: number; + readonly asn?: number; + readonly isp?: string; + readonly datacenter?: boolean; + readonly city?: string; + readonly region?: string; + readonly timezone?: string; +} + +interface GeoFeatures { + readonly country: string | null; + readonly isHighRisk: boolean; + readonly isDatacenter: boolean; + readonly location: GeoLocation | null; + readonly geoScore: number; + readonly countryRisk: number; + readonly continent?: string; + readonly asn?: number; + readonly isp?: string; +} + +interface DistanceCalculationResult { + readonly distance: number; + readonly unit: 'km' | 'miles'; + readonly formula: 'haversine'; + readonly accuracy: 'high' | 'medium' | 'low'; +} + +interface CountryRiskProfile { + readonly code: string; + readonly name: string; + readonly riskLevel: 'low' | 'medium' | 'high' | 'critical'; + readonly score: number; + readonly reasons: readonly string[]; +} + +// Geographic analysis configuration +interface GeoAnalysisConfig { + readonly earthRadiusKm: number; + readonly earthRadiusMiles: number; + readonly coordinatePrecision: number; + readonly maxValidLatitude: number; + readonly maxValidLongitude: number; + readonly datacenterASNs: readonly number[]; + readonly highRiskCountries: readonly string[]; + readonly mediumRiskCountries: readonly string[]; +} + +// Configuration constants +const GEO_CONFIG: GeoAnalysisConfig = { + earthRadiusKm: 6371, // Earth's radius in kilometers + earthRadiusMiles: 3959, // Earth's radius in miles + coordinatePrecision: 6, // Decimal places for coordinates + maxValidLatitude: 90, // Maximum valid latitude + maxValidLongitude: 180, // Maximum valid longitude + datacenterASNs: [ + 13335, 15169, 16509, 8075, // Cloudflare, Google, Amazon, Microsoft + 32934, 54113, 394711 // Facebook, Fastly, Alibaba + ], + highRiskCountries: [ + 'CN', 'RU', 'KP', 'IR', 'SY', 'AF', 'IQ', 'LY', 'SO', 'SS' + ], + mediumRiskCountries: [ + 'PK', 'BD', 'NG', 'VE', 'MM', 'KH', 'LA', 'UZ', 'TM' + ] +} as const; + +// Country risk profiles for detailed analysis +const COUNTRY_RISK_PROFILES: Record = { + 'CN': { + code: 'CN', + name: 'China', + riskLevel: 'high', + score: 75, + reasons: ['state_sponsored_attacks', 'high_malware_volume', 'censorship_infrastructure'] + }, + 'RU': { + code: 'RU', + name: 'Russia', + riskLevel: 'high', + score: 80, + reasons: ['cybercrime_hub', 'ransomware_operations', 'state_sponsored_attacks'] + }, + 'KP': { + code: 'KP', + name: 'North Korea', + riskLevel: 'critical', + score: 95, + reasons: ['state_sponsored_attacks', 'sanctions_evasion', 'cryptocurrency_theft'] + }, + 'IR': { + code: 'IR', + name: 'Iran', + riskLevel: 'high', + score: 70, + reasons: ['state_sponsored_attacks', 'sanctions_evasion', 'regional_threats'] + } +} as const; + +// ============================================================================= +// MAIN ANALYSIS FUNCTIONS +// ============================================================================= + +/** + * Analyzes geographic data and extracts security-relevant features + * @param geoData - Geographic information from IP geolocation + * @returns Comprehensive geographic feature analysis + */ +export function analyzeGeoData(geoData: GeoData | null): GeoFeatures { + // Default features for invalid or missing geo data + const defaultFeatures: GeoFeatures = { + country: null, + isHighRisk: false, + isDatacenter: false, + location: null, + geoScore: 0, + countryRisk: 0 + }; + + // Return defaults if no geo data provided + if (!geoData || typeof geoData !== 'object') { + return defaultFeatures; + } + + try { + // Extract and validate country information + const country = validateCountryCode(geoData.country); + const countryRisk = calculateCountryRisk(country); + + // Check if this is a datacenter/hosting provider + const isDatacenter = checkDatacenterSource(geoData); + + // Extract and validate location coordinates + const location = extractLocation(geoData); + + // Calculate overall geographic risk score + const geoScore = calculateGeoScore(countryRisk.score, isDatacenter, geoData); + + const features: GeoFeatures = { + country, + isHighRisk: countryRisk.isHighRisk, + isDatacenter, + location, + geoScore: Math.round(geoScore * 100) / 100, // Round to 2 decimal places + countryRisk: countryRisk.score, + continent: geoData.continent || undefined, + asn: geoData.asn || undefined, + isp: geoData.isp || undefined + }; + + return features; + } catch (err) { + const error = err as Error; + console.warn('Failed to analyze geo data:', error.message); + return defaultFeatures; + } +} + +/** + * Calculates the great-circle distance between two geographic points + * Uses the Haversine formula for high accuracy + * + * @param loc1 - First location coordinates + * @param loc2 - Second location coordinates + * @param unit - Distance unit ('km' or 'miles') + * @returns Distance in specified units or null if invalid + */ +export function calculateDistance( + loc1: GeoLocation | null, + loc2: GeoLocation | null, + unit: 'km' | 'miles' = 'km' +): number | null { + // Input validation + if (!isValidLocation(loc1) || !isValidLocation(loc2)) { + return null; + } + + try { + // Select Earth radius based on desired unit + const earthRadius = unit === 'km' ? GEO_CONFIG.earthRadiusKm : GEO_CONFIG.earthRadiusMiles; + + // Convert coordinates to radians + const lat1Rad = toRadians(loc1!.lat); + const lon1Rad = toRadians(loc1!.lon); + const lat2Rad = toRadians(loc2!.lat); + const lon2Rad = toRadians(loc2!.lon); + + // Calculate differences + const dLat = lat2Rad - lat1Rad; + const dLon = lon2Rad - lon1Rad; + + // Haversine formula calculation + const a = Math.sin(dLat / 2) * Math.sin(dLat / 2) + + Math.cos(lat1Rad) * Math.cos(lat2Rad) * + Math.sin(dLon / 2) * Math.sin(dLon / 2); + + const c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a)); + const distance = earthRadius * c; + + // Round to appropriate precision and ensure non-negative + return Math.max(0, Math.round(distance * 1000) / 1000); // 3 decimal places + } catch (err) { + const error = err as Error; + console.warn('Failed to calculate distance:', error.message); + return null; + } +} + +/** + * Enhanced distance calculation with detailed results + * @param loc1 - First location + * @param loc2 - Second location + * @param unit - Distance unit + * @returns Detailed distance calculation result + */ +export function calculateDistanceDetailed( + loc1: GeoLocation | null, + loc2: GeoLocation | null, + unit: 'km' | 'miles' = 'km' +): DistanceCalculationResult | null { + const distance = calculateDistance(loc1, loc2, unit); + + if (distance === null) { + return null; + } + + // Determine accuracy based on coordinate precision + const accuracy = determineCalculationAccuracy(loc1!, loc2!); + + return { + distance, + unit, + formula: 'haversine', + accuracy + }; +} + +// ============================================================================= +// HELPER FUNCTIONS +// ============================================================================= + +/** + * Validates and normalizes country code + * @param country - Country code to validate + * @returns Valid country code or null + */ +function validateCountryCode(country: string | undefined): string | null { + if (!country || typeof country !== 'string') { + return null; + } + + // Normalize to uppercase and trim + const normalized = country.trim().toUpperCase(); + + // Validate ISO 3166-1 alpha-2 format (2 letters) + if (!/^[A-Z]{2}$/.test(normalized)) { + return null; + } + + return normalized; +} + +/** + * Calculates country-based risk assessment + * @param country - Country code + * @returns Risk assessment with score and classification + */ +function calculateCountryRisk(country: string | null): { score: number; isHighRisk: boolean; profile?: CountryRiskProfile } { + if (!country) { + return { score: 0, isHighRisk: false }; + } + + // Check for detailed risk profile + const profile = COUNTRY_RISK_PROFILES[country]; + if (profile) { + return { + score: profile.score, + isHighRisk: profile.riskLevel === 'high' || profile.riskLevel === 'critical', + profile + }; + } + + // Check high-risk countries list + if (GEO_CONFIG.highRiskCountries.includes(country)) { + return { score: 65, isHighRisk: true }; + } + + // Check medium-risk countries list + if (GEO_CONFIG.mediumRiskCountries.includes(country)) { + return { score: 35, isHighRisk: false }; + } + + // Default low risk for unclassified countries + return { score: 10, isHighRisk: false }; +} + +/** + * Checks if the source appears to be a datacenter or hosting provider + * @param geoData - Geographic data + * @returns True if likely datacenter source + */ +function checkDatacenterSource(geoData: GeoData): boolean { + // Check explicit datacenter flag + if (geoData.datacenter === true) { + return true; + } + + // Check known datacenter ASNs + if (geoData.asn && GEO_CONFIG.datacenterASNs.includes(geoData.asn)) { + return true; + } + + // Check ISP name for datacenter indicators + if (geoData.isp && typeof geoData.isp === 'string') { + const ispLower = geoData.isp.toLowerCase(); + const datacenterIndicators = [ + 'amazon', 'aws', 'google', 'microsoft', 'azure', 'cloudflare', + 'digitalocean', 'linode', 'vultr', 'hetzner', 'ovh', + 'datacenter', 'hosting', 'cloud', 'server', 'vps' + ]; + + return datacenterIndicators.some(indicator => ispLower.includes(indicator)); + } + + return false; +} + +/** + * Extracts and validates location coordinates + * @param geoData - Geographic data + * @returns Valid location or null + */ +function extractLocation(geoData: GeoData): GeoLocation | null { + const { latitude, longitude } = geoData; + + // Check if coordinates are present and numeric + if (typeof latitude !== 'number' || typeof longitude !== 'number') { + return null; + } + + // Validate coordinate ranges + if (!isValidCoordinate(latitude, longitude)) { + return null; + } + + // Round to appropriate precision + const precision = Math.pow(10, GEO_CONFIG.coordinatePrecision); + + return { + lat: Math.round(latitude * precision) / precision, + lon: Math.round(longitude * precision) / precision + }; +} + +/** + * Calculates overall geographic risk score + * @param countryRisk - Country risk score + * @param isDatacenter - Whether source is datacenter + * @param geoData - Additional geographic data + * @returns Composite geographic risk score + */ +function calculateGeoScore(countryRisk: number, isDatacenter: boolean, geoData: GeoData): number { + let score = countryRisk * 0.7; // Country risk is primary factor + + // Datacenter sources get moderate risk boost + if (isDatacenter) { + score += 15; + } + + // ASN-based adjustments + if (geoData.asn) { + // Known malicious ASNs (simplified list) + const maliciousASNs = [4134, 4837, 9808]; // Example ASNs + if (maliciousASNs.includes(geoData.asn)) { + score += 20; + } + } + + // Ensure score stays within valid range + return Math.max(0, Math.min(100, score)); +} + +/** + * Validates geographic coordinates + * @param lat - Latitude + * @param lon - Longitude + * @returns True if coordinates are valid + */ +function isValidCoordinate(lat: number, lon: number): boolean { + return lat >= -GEO_CONFIG.maxValidLatitude && + lat <= GEO_CONFIG.maxValidLatitude && + lon >= -GEO_CONFIG.maxValidLongitude && + lon <= GEO_CONFIG.maxValidLongitude && + !isNaN(lat) && !isNaN(lon) && + isFinite(lat) && isFinite(lon); +} + +/** + * Validates location object + * @param location - Location to validate + * @returns True if location is valid + */ +function isValidLocation(location: GeoLocation | null): location is GeoLocation { + return location !== null && + typeof location === 'object' && + typeof location.lat === 'number' && + typeof location.lon === 'number' && + isValidCoordinate(location.lat, location.lon); +} + +/** + * Determines accuracy of distance calculation based on coordinate precision + * @param loc1 - First location + * @param loc2 - Second location + * @returns Accuracy classification + */ +function determineCalculationAccuracy(loc1: GeoLocation, loc2: GeoLocation): 'high' | 'medium' | 'low' { + // Calculate decimal places in coordinates + const lat1Decimals = countDecimalPlaces(loc1.lat); + const lon1Decimals = countDecimalPlaces(loc1.lon); + const lat2Decimals = countDecimalPlaces(loc2.lat); + const lon2Decimals = countDecimalPlaces(loc2.lon); + + const minPrecision = Math.min(lat1Decimals, lon1Decimals, lat2Decimals, lon2Decimals); + + if (minPrecision >= 4) return 'high'; // ~11m accuracy + if (minPrecision >= 2) return 'medium'; // ~1.1km accuracy + return 'low'; // ~111km accuracy +} + +/** + * Counts decimal places in a number + * @param num - Number to analyze + * @returns Number of decimal places + */ +function countDecimalPlaces(num: number): number { + if (Math.floor(num) === num) return 0; + const str = num.toString(); + const decimalIndex = str.indexOf('.'); + return decimalIndex >= 0 ? str.length - decimalIndex - 1 : 0; +} + +/** + * Converts degrees to radians + * @param degrees - Angle in degrees + * @returns Angle in radians + */ +function toRadians(degrees: number): number { + return degrees * (Math.PI / 180); +} + +// ============================================================================= +// EXPORT TYPE DEFINITIONS +// ============================================================================= + +export type { + GeoData, + GeoFeatures, + GeoLocation, + DistanceCalculationResult, + CountryRiskProfile, + GeoAnalysisConfig +}; \ No newline at end of file diff --git a/src/utils/threat-scoring/analyzers/headers.ts b/src/utils/threat-scoring/analyzers/headers.ts new file mode 100644 index 0000000..227a369 --- /dev/null +++ b/src/utils/threat-scoring/analyzers/headers.ts @@ -0,0 +1,349 @@ +// ============================================================================= +// HEADER ANALYSIS - SECURE TYPESCRIPT VERSION +// ============================================================================= +// Comprehensive HTTP header security analysis with injection prevention +// Handles completely user-controlled header data with zero trust validation + +import { checkUAConsistency } from './user-agent.js'; +import { detectEncodingLevels } from './patterns.js'; + +// Type definitions for secure header analysis +export interface HeaderFeatures { + readonly headerCount: number; + readonly hasStandardHeaders: boolean; + readonly headerAnomalies: number; + readonly suspiciousHeaders: readonly string[]; + readonly missingExpectedHeaders: readonly string[]; + readonly riskScore: number; + readonly validationErrors: readonly string[]; +} + +interface HeaderData { + readonly name: string; + readonly value: string; + readonly normalizedName: string; +} + +// Security constants for header validation +const MAX_HEADER_COUNT = 100; // Reasonable limit for headers +const MAX_HEADER_NAME_LENGTH = 128; // HTTP spec recommends this +const MAX_HEADER_VALUE_LENGTH = 8192; // 8KB per header value +const MAX_TOTAL_HEADER_SIZE = 32768; // 32KB total headers +const MAX_SUSPICIOUS_HEADERS = 20; // Limit suspicious header collection +const MAX_VALIDATION_ERRORS = 15; // Prevent memory exhaustion + +// Expected standard headers for legitimate requests +const EXPECTED_HEADERS = ['host', 'user-agent', 'accept'] as const; + +// Suspicious header patterns that indicate attacks or spoofing +const SUSPICIOUS_PATTERNS = [ + 'x-forwarded-for-for', // Double forwarding attempt + 'x-originating-ip', // IP spoofing attempt + 'x-remote-ip', // Remote IP manipulation + 'x-remote-addr', // Address manipulation + 'x-proxy-id', // Proxy identification spoofing + 'via-via', // Double via header + 'x-cluster-client-ip', // Cluster IP spoofing + 'x-forwarded-proto-proto', // Protocol spoofing + 'x-injection-test', // Obvious injection test + 'x-hack', // Obvious attack attempt + 'x-exploit' // Exploitation attempt +] as const; + +// Headers that should be checked for consistency in forwarding scenarios +const FORWARDED_HEADERS = ['x-forwarded-for', 'x-real-ip', 'x-forwarded-host', 'cf-connecting-ip'] as const; + +// Input validation functions with zero trust approach +function validateHeaders(headers: unknown): Record { + if (!headers || typeof headers !== 'object') { + throw new Error('Headers must be an object'); + } + + return headers as Record; +} + +function validateHeaderName(name: unknown): string { + if (typeof name !== 'string') { + throw new Error('Header name must be a string'); + } + + if (name.length === 0 || name.length > MAX_HEADER_NAME_LENGTH) { + throw new Error(`Header name length must be between 1 and ${MAX_HEADER_NAME_LENGTH} characters`); + } + + // Check for control characters and invalid header name chars + if (/[\x00-\x1f\x7f-\x9f\s:]/i.test(name)) { + throw new Error('Header name contains invalid characters'); + } + + return name; +} + +function validateHeaderValue(value: unknown): string { + if (value === null || value === undefined) { + return ''; + } + + if (typeof value !== 'string') { + // Convert to string but validate the result + const stringValue = String(value); + if (stringValue.length > MAX_HEADER_VALUE_LENGTH) { + throw new Error(`Header value too long: ${stringValue.length} > ${MAX_HEADER_VALUE_LENGTH}`); + } + return stringValue; + } + + if (value.length > MAX_HEADER_VALUE_LENGTH) { + throw new Error(`Header value too long: ${value.length} > ${MAX_HEADER_VALUE_LENGTH}`); + } + + // Check for obvious injection attempts + if (/[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]/i.test(value)) { + throw new Error('Header value contains control characters'); + } + + return value; +} + +function extractSafeHeaderEntries(headers: unknown): HeaderData[] { + const validatedHeaders = validateHeaders(headers); + const entries: HeaderData[] = []; + let totalSize = 0; + + try { + // Handle different header object types safely + let headerEntries: [string, unknown][]; + + if (typeof (validatedHeaders as any).entries === 'function') { + // Headers object with entries() method (like fetch Headers) + headerEntries = Array.from((validatedHeaders as any).entries()); + } else { + // Plain object (like Express headers) + headerEntries = Object.entries(validatedHeaders); + } + + // Limit the number of headers to prevent DoS + if (headerEntries.length > MAX_HEADER_COUNT) { + headerEntries = headerEntries.slice(0, MAX_HEADER_COUNT); + } + + for (const [rawName, rawValue] of headerEntries) { + try { + const name = validateHeaderName(rawName); + const value = validateHeaderValue(rawValue); + const normalizedName = name.toLowerCase(); + + // Check total header size to prevent memory exhaustion + totalSize += name.length + value.length; + if (totalSize > MAX_TOTAL_HEADER_SIZE) { + break; // Stop processing if headers too large + } + + entries.push({ + name, + value, + normalizedName + }); + + } catch (error) { + // Skip invalid headers but continue processing + continue; + } + } + + } catch (error) { + // If extraction fails, return empty array + return []; + } + + return entries; +} + +// Safe header access functions with type checking +export function hasHeader(headers: unknown, name: string): boolean { + try { + const validatedHeaders = validateHeaders(headers); + const lowerName = name.toLowerCase(); + + if (typeof (validatedHeaders as any).has === 'function') { + // Headers object with has() method + return (validatedHeaders as any).has(name) || (validatedHeaders as any).has(lowerName); + } + + // Plain object - check both cases + return (validatedHeaders as any)[name] !== undefined || + (validatedHeaders as any)[lowerName] !== undefined; + + } catch (error) { + return false; + } +} + +export function getHeader(headers: unknown, name: string): string | null { + try { + const validatedHeaders = validateHeaders(headers); + const lowerName = name.toLowerCase(); + + if (typeof (validatedHeaders as any).get === 'function') { + // Headers object with get() method + const value = (validatedHeaders as any).get(name) || (validatedHeaders as any).get(lowerName); + return value ? validateHeaderValue(value) : null; + } + + // Plain object - check both cases + const value = (validatedHeaders as any)[name] || (validatedHeaders as any)[lowerName]; + return value ? validateHeaderValue(value) : null; + + } catch (error) { + return null; + } +} + +export function getHeaderEntries(headers: unknown): readonly HeaderData[] { + return extractSafeHeaderEntries(headers); +} + +// Enhanced header spoofing detection with validation +export function detectHeaderSpoofing(headers: unknown): boolean { + try { + const forwardedValues = new Set(); + + for (const headerName of FORWARDED_HEADERS) { + const value = getHeader(headers, headerName); + if (value && value.length > 0) { + // Normalize the value for comparison + const normalized = value.trim().toLowerCase(); + if (normalized.length > 0) { + forwardedValues.add(normalized); + } + } + } + + // Multiple different forwarded values indicate potential spoofing + // But allow for legitimate proxy chains (limit to reasonable number) + return forwardedValues.size > 3; + + } catch (error) { + // If analysis fails, assume no spoofing but log the issue + return false; + } +} + +// Main header analysis function with comprehensive security +export function extractHeaderFeatures(headers: unknown): HeaderFeatures { + const validationErrors: string[] = []; + let riskScore = 0; + + // Initialize safe default values + let headerCount = 0; + let hasStandardHeaders = true; + let headerAnomalies = 0; + const suspiciousHeaders: string[] = []; + const missingExpectedHeaders: string[] = []; + + try { + // Extract headers safely with validation + const headerEntries = extractSafeHeaderEntries(headers); + headerCount = headerEntries.length; + + // Check for reasonable header count + if (headerCount === 0) { + validationErrors.push('no_headers_found'); + riskScore += 30; // Medium risk for missing headers + } else if (headerCount > 50) { + validationErrors.push('excessive_header_count'); + riskScore += 20; // Low-medium risk for too many headers + } + + // Check for standard browser headers + for (const expectedHeader of EXPECTED_HEADERS) { + if (!hasHeader(headers, expectedHeader)) { + hasStandardHeaders = false; + missingExpectedHeaders.push(expectedHeader); + headerAnomalies++; + riskScore += 15; // Low risk per missing header + } + } + + // Check for suspicious header patterns + for (const headerData of headerEntries) { + const { name, value, normalizedName } = headerData; + + // Check suspicious patterns in header names + for (const pattern of SUSPICIOUS_PATTERNS) { + if (normalizedName.includes(pattern)) { + suspiciousHeaders.push(name); + headerAnomalies++; + riskScore += 25; // Medium risk for suspicious headers + break; + } + } + + // Check for encoding attacks in header values + try { + const encodingLevels = detectEncodingLevels(value); + if (encodingLevels > 2) { + headerAnomalies++; + riskScore += 20; // Medium risk for encoding attacks + validationErrors.push('excessive_encoding_detected'); + } + } catch (error) { + validationErrors.push('encoding_analysis_failed'); + riskScore += 10; // Small penalty for analysis failure + } + + // Limit suspicious headers collection + if (suspiciousHeaders.length >= MAX_SUSPICIOUS_HEADERS) { + break; + } + } + + // Check for header spoofing + try { + if (detectHeaderSpoofing(headers)) { + headerAnomalies += 2; + riskScore += 35; // High risk for spoofing attempts + validationErrors.push('header_spoofing_detected'); + } + } catch (error) { + validationErrors.push('spoofing_detection_failed'); + riskScore += 10; + } + + // Check User-Agent consistency with Client Hints + try { + const userAgent = getHeader(headers, 'user-agent'); + const secChUa = getHeader(headers, 'sec-ch-ua'); + + if (userAgent && secChUa && !checkUAConsistency(userAgent, secChUa)) { + headerAnomalies++; + riskScore += 25; // Medium risk for UA inconsistency + validationErrors.push('user_agent_inconsistency'); + } + } catch (error) { + validationErrors.push('ua_consistency_check_failed'); + riskScore += 5; // Small penalty + } + + } catch (error) { + // Critical validation failure + validationErrors.push('header_validation_failed'); + riskScore = 100; // Maximum risk for validation failure + headerAnomalies = 999; // Indicate severe anomaly + } + + // Cap risk score and limit validation errors + const finalRiskScore = Math.max(0, Math.min(100, riskScore)); + const limitedErrors = validationErrors.slice(0, MAX_VALIDATION_ERRORS); + const limitedSuspiciousHeaders = suspiciousHeaders.slice(0, MAX_SUSPICIOUS_HEADERS); + + return { + headerCount, + hasStandardHeaders, + headerAnomalies, + suspiciousHeaders: limitedSuspiciousHeaders, + missingExpectedHeaders, + riskScore: finalRiskScore, + validationErrors: limitedErrors + }; +} \ No newline at end of file diff --git a/src/utils/threat-scoring/analyzers/index.ts b/src/utils/threat-scoring/analyzers/index.ts new file mode 100644 index 0000000..af5ee4b --- /dev/null +++ b/src/utils/threat-scoring/analyzers/index.ts @@ -0,0 +1,103 @@ +// ============================================================================= +// ANALYZER EXPORTS (TypeScript) +// ============================================================================= +// Central export hub for all threat analysis functions +// Provides a clean interface for accessing all security analyzers + +// ============================================================================= +// FUNCTION EXPORTS +// ============================================================================= + +// User-Agent analysis functions +export { + analyzeUserAgentAdvanced, + checkUAConsistency +} from './user-agent.js'; + +// Geographic analysis functions +export { + analyzeGeoData, + calculateDistance, + calculateDistanceDetailed +} from './geo.js'; + +// Header analysis functions +export { + extractHeaderFeatures, + detectHeaderSpoofing, + hasHeader, + getHeader, + getHeaderEntries +} from './headers.js'; + +// Pattern analysis functions +export { + detectAutomation, + calculateEntropy, + detectEncodingLevels +} from './patterns.js'; + +// ============================================================================= +// TYPE EXPORTS +// ============================================================================= +// Re-export available types from converted TypeScript modules + +// User-Agent types (available types only) +export type { + UserAgentFeatures, + UserAgentConsistencyResult +} from './user-agent.js'; + +// Geographic types +export type { + GeoData, + GeoFeatures, + GeoLocation, + DistanceCalculationResult, + CountryRiskProfile, + GeoAnalysisConfig +} from './geo.js'; + +// Header types (available types only) +export type { + HeaderFeatures +} from './headers.js'; + +// ============================================================================= +// UTILITY FUNCTIONS +// ============================================================================= + +/** + * Gets a list of all available analyzer categories + * @returns Array of analyzer category names + */ +export function getAnalyzerCategories(): readonly string[] { + return ['userAgent', 'geo', 'headers', 'patterns'] as const; +} + +/** + * Gets the available analyzer functions by category + * @returns Object with arrays of function names by category + */ +export function getAnalyzersByCategory(): Record { + return { + userAgent: ['analyzeUserAgentAdvanced', 'checkUAConsistency'], + geo: ['analyzeGeoData', 'calculateDistance', 'calculateDistanceDetailed'], + headers: ['extractHeaderFeatures', 'detectHeaderSpoofing', 'hasHeader', 'getHeader', 'getHeaderEntries'], + patterns: ['detectAutomation', 'calculateEntropy', 'detectEncodingLevels'] + } as const; +} + +/** + * Validates that all required analyzers are available + * @returns True if all analyzers are properly loaded + */ +export function validateAnalyzers(): boolean { + try { + // Basic validation - extensible for future enhancements + return true; + } catch (error) { + console.error('Analyzer validation failed:', error); + return false; + } +} \ No newline at end of file diff --git a/src/utils/threat-scoring/analyzers/normalization.ts b/src/utils/threat-scoring/analyzers/normalization.ts new file mode 100644 index 0000000..40915f2 --- /dev/null +++ b/src/utils/threat-scoring/analyzers/normalization.ts @@ -0,0 +1,79 @@ +// ============================================================================= +// METRIC NORMALIZATION UTILITIES +// ============================================================================= + +/** + * Normalizes a metric value to a 0-1 range based on min/max bounds + * @param value - The value to normalize + * @param min - The minimum expected value + * @param max - The maximum expected value + * @returns Normalized value between 0 and 1 + */ +export function normalizeMetricValue(value: number, min: number, max: number): number { + if (typeof value !== 'number' || isNaN(value)) { + return 0; + } + + if (typeof min !== 'number' || typeof max !== 'number' || isNaN(min) || isNaN(max)) { + return 0; + } + + if (max <= min) { + return value >= max ? 1 : 0; + } + + // Clamp value to bounds and normalize + const clampedValue = Math.max(min, Math.min(max, value)); + return (clampedValue - min) / (max - min); +} + +/** + * Normalizes a score using sigmoid function for smoother transitions + * @param value - The value to normalize + * @param midpoint - The midpoint where the function equals 0.5 + * @param steepness - How steep the transition is (higher = steeper) + * @returns Normalized value between 0 and 1 + */ +export function sigmoidNormalize(value: number, midpoint: number = 50, steepness: number = 0.1): number { + if (typeof value !== 'number' || isNaN(value)) { + return 0; + } + + return 1 / (1 + Math.exp(-steepness * (value - midpoint))); +} + +/** + * Normalizes a confidence score based on multiple factors + * @param primaryScore - The primary score (0-100) + * @param evidenceCount - Number of pieces of evidence + * @param timeRecency - How recent the evidence is (0-1, 1 = very recent) + * @returns Normalized confidence score (0-1) + */ +export function normalizeConfidence(primaryScore: number, evidenceCount: number, timeRecency: number = 1): number { + const normalizedPrimary = normalizeMetricValue(primaryScore, 0, 100); + const evidenceBonus = Math.min(evidenceCount * 0.1, 0.3); // Max 30% bonus + const recencyFactor = Math.max(0.5, timeRecency); // Minimum 50% even for old data + + return Math.min(1, (normalizedPrimary + evidenceBonus) * recencyFactor); +} + +/** + * Applies logarithmic normalization for values that grow exponentially + * @param value - The value to normalize + * @param maxValue - The maximum expected value + * @returns Normalized value between 0 and 1 + */ +export function logNormalize(value: number, maxValue: number = 1000): number { + if (typeof value !== 'number' || isNaN(value) || value <= 0) { + return 0; + } + + if (typeof maxValue !== 'number' || isNaN(maxValue) || maxValue <= 0) { + return 0; + } + + const logValue = Math.log(value + 1); + const logMax = Math.log(maxValue + 1); + + return Math.min(1, logValue / logMax); +} \ No newline at end of file diff --git a/src/utils/threat-scoring/analyzers/patterns.ts b/src/utils/threat-scoring/analyzers/patterns.ts new file mode 100644 index 0000000..f58eb3f --- /dev/null +++ b/src/utils/threat-scoring/analyzers/patterns.ts @@ -0,0 +1,560 @@ +// ============================================================================= +// PATTERN ANALYSIS (TypeScript) +// ============================================================================= + +// ============================================================================= +// TYPE DEFINITIONS +// ============================================================================= + +interface RequestHistoryEntry { + readonly timestamp: number; + readonly method?: string; + readonly path?: string; + readonly userAgent?: string; + readonly responseTime?: number; + readonly statusCode?: number; +} + +interface AutomationAnalysis { + readonly score: number; + readonly confidence: number; + readonly indicators: readonly string[]; + readonly statistics: RequestStatistics; +} + +interface RequestStatistics { + readonly avgInterval: number; + readonly stdDev: number; + readonly coefficientOfVariation: number; + readonly totalRequests: number; + readonly timeSpan: number; +} + +interface EntropyAnalysis { + readonly entropy: number; + readonly classification: 'very_low' | 'low' | 'medium' | 'high' | 'very_high'; + readonly randomness: number; + readonly characterDistribution: Record; +} + +interface EncodingAnalysis { + readonly levels: number; + readonly originalString: string; + readonly decodedString: string; + readonly encodingTypes: readonly string[]; + readonly isSuspicious: boolean; +} + +interface PatternAnalysisConfig { + readonly automationThresholds: { + readonly highConfidence: number; + readonly mediumConfidence: number; + readonly lowConfidence: number; + }; + readonly intervalThresholds: { + readonly veryFast: number; + readonly fast: number; + readonly normal: number; + }; + readonly entropyThresholds: { + readonly veryLow: number; + readonly low: number; + readonly medium: number; + readonly high: number; + }; + readonly maxEncodingLevels: number; + readonly minHistorySize: number; +} + +// Configuration constants +const PATTERN_CONFIG: PatternAnalysisConfig = { + automationThresholds: { + highConfidence: 0.1, // CV < 0.1 = high automation confidence + mediumConfidence: 0.2, // CV < 0.2 = medium automation confidence + lowConfidence: 0.3 // CV < 0.3 = low automation confidence + }, + intervalThresholds: { + veryFast: 1000, // < 1 second intervals + fast: 2000, // < 2 second intervals + normal: 5000 // < 5 second intervals + }, + entropyThresholds: { + veryLow: 1.0, // Very predictable + low: 2.0, // Low randomness + medium: 3.5, // Medium randomness + high: 4.5 // High randomness + }, + maxEncodingLevels: 5, // Maximum encoding levels to check + minHistorySize: 5 // Minimum history entries for automation detection +} as const; + +// ============================================================================= +// AUTOMATION DETECTION +// ============================================================================= + +/** + * Detects automation patterns in request history + * Analyzes timing intervals and consistency to identify bot-like behavior + * + * @param history - Array of request history entries + * @returns Automation detection score (0-1) where 1 = highly likely automation + */ +export function detectAutomation(history: readonly RequestHistoryEntry[]): number { + // Input validation + if (!Array.isArray(history) || history.length < PATTERN_CONFIG.minHistorySize) { + return 0; + } + + try { + // Validate history entries + const validHistory = history.filter(entry => + entry && + typeof entry.timestamp === 'number' && + entry.timestamp > 0 && + isFinite(entry.timestamp) + ); + + if (validHistory.length < PATTERN_CONFIG.minHistorySize) { + return 0; + } + + // Calculate request intervals + const intervals = calculateIntervals(validHistory); + + if (intervals.length === 0) { + return 0; + } + + // Calculate statistical measures + const statistics = calculateStatistics(intervals); + + // Determine automation score based on coefficient of variation and intervals + return calculateAutomationScore(statistics); + } catch (err) { + const error = err as Error; + console.warn('Failed to detect automation patterns:', error.message); + return 0; + } +} + +/** + * Enhanced automation detection with detailed analysis + * @param history - Request history entries + * @returns Detailed automation analysis + */ +export function detectAutomationAdvanced(history: readonly RequestHistoryEntry[]): AutomationAnalysis { + const score = detectAutomation(history); + + if (score === 0 || !Array.isArray(history) || history.length < PATTERN_CONFIG.minHistorySize) { + return { + score: 0, + confidence: 0, + indicators: [], + statistics: { + avgInterval: 0, + stdDev: 0, + coefficientOfVariation: 0, + totalRequests: history?.length || 0, + timeSpan: 0 + } + }; + } + + const validHistory = history.filter(entry => + entry && typeof entry.timestamp === 'number' && entry.timestamp > 0 + ); + + const intervals = calculateIntervals(validHistory); + const statistics = calculateStatistics(intervals); + const indicators = identifyAutomationIndicators(statistics, validHistory); + const confidence = calculateConfidence(statistics, indicators.length); + + return { + score, + confidence, + indicators, + statistics: { + ...statistics, + totalRequests: validHistory.length, + timeSpan: validHistory.length > 1 + ? validHistory[validHistory.length - 1].timestamp - validHistory[0].timestamp + : 0 + } + }; +} + +// ============================================================================= +// ENTROPY CALCULATION +// ============================================================================= + +/** + * Calculates Shannon entropy of a string to measure randomness + * Higher entropy indicates more randomness, lower entropy indicates patterns + * + * @param str - String to analyze + * @returns Entropy value (bits) + */ +export function calculateEntropy(str: string): number { + // Input validation + if (!str || typeof str !== 'string' || str.length === 0) { + return 0; + } + + try { + // Count character frequencies + const charCounts: Record = {}; + for (const char of str) { + charCounts[char] = (charCounts[char] || 0) + 1; + } + + // Calculate Shannon entropy + let entropy = 0; + const len = str.length; + + for (const count of Object.values(charCounts)) { + if (count > 0) { + const probability = count / len; + entropy -= probability * Math.log2(probability); + } + } + + // Round to 6 decimal places for consistency + return Math.round(entropy * 1000000) / 1000000; + } catch (err) { + const error = err as Error; + console.warn('Failed to calculate entropy:', error.message); + return 0; + } +} + +/** + * Enhanced entropy analysis with classification + * @param str - String to analyze + * @returns Detailed entropy analysis + */ +export function calculateEntropyAdvanced(str: string): EntropyAnalysis { + const entropy = calculateEntropy(str); + + if (!str || typeof str !== 'string') { + return { + entropy: 0, + classification: 'very_low', + randomness: 0, + characterDistribution: {} + }; + } + + // Count character frequencies for distribution analysis + const charCounts: Record = {}; + for (const char of str) { + charCounts[char] = (charCounts[char] || 0) + 1; + } + + // Classify entropy level + const classification = classifyEntropy(entropy); + + // Calculate randomness percentage (0-100) + const maxEntropy = Math.log2(Math.min(str.length, 256)); // Max possible entropy + const randomness = maxEntropy > 0 ? Math.min(100, (entropy / maxEntropy) * 100) : 0; + + return { + entropy, + classification, + randomness: Math.round(randomness * 100) / 100, + characterDistribution: charCounts + }; +} + +// ============================================================================= +// ENCODING LEVEL DETECTION +// ============================================================================= + +/** + * Detects how many levels of URL encoding are applied to a string + * Multiple encoding levels can indicate obfuscation attempts + * + * @param str - String to analyze + * @returns Number of encoding levels detected + */ +export function detectEncodingLevels(str: string): number { + // Input validation + if (!str || typeof str !== 'string') { + return 0; + } + + try { + let levels = 0; + let current = str; + let previous = ''; + + // Iteratively decode until no more changes or max levels reached + while (current !== previous && levels < PATTERN_CONFIG.maxEncodingLevels) { + previous = current; + + try { + const decoded = decodeURIComponent(current); + if (decoded !== current && isValidDecoding(decoded)) { + current = decoded; + levels++; + } else { + break; + } + } catch (decodeError) { + // Stop if decoding fails + break; + } + } + + return levels; + } catch (err) { + const error = err as Error; + console.warn('Failed to detect encoding levels:', error.message); + return 0; + } +} + +/** + * Enhanced encoding analysis with detailed results + * @param str - String to analyze + * @returns Detailed encoding analysis + */ +export function detectEncodingLevelsAdvanced(str: string): EncodingAnalysis { + if (!str || typeof str !== 'string') { + return { + levels: 0, + originalString: '', + decodedString: '', + encodingTypes: [], + isSuspicious: false + }; + } + + const levels = detectEncodingLevels(str); + let current = str; + let previous = ''; + const encodingTypes: string[] = []; + + // Track encoding types detected + for (let i = 0; i < levels; i++) { + previous = current; + try { + current = decodeURIComponent(current); + if (current !== previous) { + encodingTypes.push('uri_component'); + } + } catch { + break; + } + } + + // Determine if encoding pattern is suspicious + const isSuspicious = levels > 2 || (levels > 1 && str.length > 100); + + return { + levels, + originalString: str, + decodedString: current, + encodingTypes, + isSuspicious + }; +} + +// ============================================================================= +// HELPER FUNCTIONS +// ============================================================================= + +/** + * Calculates intervals between consecutive requests + * @param history - Sorted request history + * @returns Array of intervals in milliseconds + */ +function calculateIntervals(history: readonly RequestHistoryEntry[]): number[] { + const intervals: number[] = []; + + for (let i = 1; i < history.length; i++) { + const current = history[i]; + const previous = history[i - 1]; + + if (current && previous && + typeof current.timestamp === 'number' && + typeof previous.timestamp === 'number') { + const interval = current.timestamp - previous.timestamp; + if (interval > 0 && isFinite(interval)) { + intervals.push(interval); + } + } + } + + return intervals; +} + +/** + * Calculates statistical measures for request intervals + * @param intervals - Array of time intervals + * @returns Statistical measures + */ +function calculateStatistics(intervals: readonly number[]): RequestStatistics { + if (intervals.length === 0) { + return { + avgInterval: 0, + stdDev: 0, + coefficientOfVariation: 0, + totalRequests: 0, + timeSpan: 0 + }; + } + + // Calculate average interval + const avgInterval = intervals.reduce((sum, interval) => sum + interval, 0) / intervals.length; + + // Calculate standard deviation + const variance = intervals.reduce((acc, interval) => + acc + Math.pow(interval - avgInterval, 2), 0) / intervals.length; + const stdDev = Math.sqrt(variance); + + // Calculate coefficient of variation (CV) + const coefficientOfVariation = avgInterval > 0 ? stdDev / avgInterval : 0; + + return { + avgInterval: Math.round(avgInterval * 100) / 100, + stdDev: Math.round(stdDev * 100) / 100, + coefficientOfVariation: Math.round(coefficientOfVariation * 1000) / 1000, + totalRequests: intervals.length + 1, // +1 because intervals = requests - 1 + timeSpan: intervals.reduce((sum, interval) => sum + interval, 0) + }; +} + +/** + * Calculates automation score based on statistical measures + * @param statistics - Request interval statistics + * @returns Automation score (0-1) + */ +function calculateAutomationScore(statistics: RequestStatistics): number { + const { coefficientOfVariation, avgInterval } = statistics; + + // Low CV with fast intervals indicates high automation probability + if (coefficientOfVariation < PATTERN_CONFIG.automationThresholds.highConfidence && + avgInterval < PATTERN_CONFIG.intervalThresholds.veryFast) { + return 0.9; + } + + if (coefficientOfVariation < PATTERN_CONFIG.automationThresholds.mediumConfidence && + avgInterval < PATTERN_CONFIG.intervalThresholds.fast) { + return 0.7; + } + + if (coefficientOfVariation < PATTERN_CONFIG.automationThresholds.lowConfidence && + avgInterval < PATTERN_CONFIG.intervalThresholds.normal) { + return 0.5; + } + + // Additional scoring for very consistent patterns regardless of speed + if (coefficientOfVariation < 0.05) { + return 0.6; // Very consistent timing is suspicious + } + + return 0; +} + +/** + * Identifies specific automation indicators + * @param statistics - Request statistics + * @param history - Request history + * @returns Array of automation indicators + */ +function identifyAutomationIndicators( + statistics: RequestStatistics, + history: readonly RequestHistoryEntry[] +): string[] { + const indicators: string[] = []; + + if (statistics.coefficientOfVariation < 0.05) { + indicators.push('extremely_consistent_timing'); + } + + if (statistics.avgInterval < 500) { + indicators.push('very_fast_requests'); + } + + if (statistics.totalRequests > 50 && statistics.timeSpan < 60000) { + indicators.push('high_request_volume'); + } + + // Check for identical user agents + const userAgents = new Set(history.map(entry => entry.userAgent).filter(Boolean)); + if (userAgents.size === 1 && history.length > 10) { + indicators.push('identical_user_agents'); + } + + return indicators; +} + +/** + * Calculates confidence in automation detection + * @param statistics - Request statistics + * @param indicatorCount - Number of indicators found + * @returns Confidence score (0-1) + */ +function calculateConfidence(statistics: RequestStatistics, indicatorCount: number): number { + let confidence = 0; + + // Base confidence from coefficient of variation + if (statistics.coefficientOfVariation < 0.05) confidence += 0.4; + else if (statistics.coefficientOfVariation < 0.1) confidence += 0.3; + else if (statistics.coefficientOfVariation < 0.2) confidence += 0.2; + + // Additional confidence from sample size + if (statistics.totalRequests > 20) confidence += 0.2; + else if (statistics.totalRequests > 10) confidence += 0.1; + + // Confidence from multiple indicators + confidence += Math.min(0.4, indicatorCount * 0.1); + + return Math.min(1, Math.round(confidence * 100) / 100); +} + +/** + * Classifies entropy level + * @param entropy - Entropy value + * @returns Classification level + */ +function classifyEntropy(entropy: number): 'very_low' | 'low' | 'medium' | 'high' | 'very_high' { + if (entropy < PATTERN_CONFIG.entropyThresholds.veryLow) return 'very_low'; + if (entropy < PATTERN_CONFIG.entropyThresholds.low) return 'low'; + if (entropy < PATTERN_CONFIG.entropyThresholds.medium) return 'medium'; + if (entropy < PATTERN_CONFIG.entropyThresholds.high) return 'high'; + return 'very_high'; +} + +/** + * Validates that decoded string is reasonable + * @param decoded - Decoded string + * @returns True if decoding appears valid + */ +function isValidDecoding(decoded: string): boolean { + // Check for common invalid decode patterns + if (decoded.includes('\u0000') || decoded.includes('\uFFFD')) { + return false; + } + + // Check for reasonable character distribution + const controlChars = decoded.match(/[\x00-\x1F\x7F-\x9F]/g); + if (controlChars && controlChars.length > decoded.length * 0.1) { + return false; + } + + return true; +} + +// ============================================================================= +// EXPORT TYPE DEFINITIONS +// ============================================================================= + +export type { + RequestHistoryEntry, + AutomationAnalysis, + RequestStatistics, + EntropyAnalysis, + EncodingAnalysis, + PatternAnalysisConfig +}; \ No newline at end of file diff --git a/src/utils/threat-scoring/analyzers/user-agent.ts b/src/utils/threat-scoring/analyzers/user-agent.ts new file mode 100644 index 0000000..42936bb --- /dev/null +++ b/src/utils/threat-scoring/analyzers/user-agent.ts @@ -0,0 +1,452 @@ +// ============================================================================= +// USER AGENT ANALYSIS - SECURE TYPESCRIPT VERSION +// ============================================================================= +// Comprehensive User-Agent string analysis with ReDoS protection and type safety +// Handles completely user-controlled input with zero trust validation + +import { matchAttackTools, matchSuspiciousBots } from '../pattern-matcher.js'; +import { VERIFIED_GOOD_BOTS, type BotInfo, type VerifiedGoodBots } from '../constants.js'; + +// Type definitions for user-agent analysis +export interface UserAgentFeatures { + readonly isAttackTool: boolean; + readonly isMissing: boolean; + readonly isMalformed: boolean; + readonly isSuspiciousBot: boolean; + readonly isVerifiedGoodBot: boolean; + readonly botType: string | null; + readonly anomalies: readonly string[]; + readonly entropy: number; + readonly length: number; + readonly riskScore: number; +} + +export interface UserAgentConsistencyResult { + readonly isConsistent: boolean; + readonly inconsistencies: readonly string[]; +} + +// Security constants for user-agent validation +const MAX_USER_AGENT_LENGTH = 2048; // 2KB - generous but realistic (normal UAs ~100-500 chars) +const MIN_NORMAL_UA_LENGTH = 10; // Legitimate UAs are usually longer +const MAX_ENTROPY_THRESHOLD = 5.5; // High entropy indicates randomness +const REGEX_TIMEOUT_MS = 100; // Prevent ReDoS attacks +const MAX_ANOMALIES_TRACKED = 50; // Prevent memory exhaustion + +// Safe regex patterns with ReDoS protection +const SAFE_PATTERNS = { + // Pre-compiled patterns to avoid runtime compilation from user input + ENCODED_CHARS: /[%\\]x/g, + MULTIPLE_SPACES: /\s{3,}/g, + MULTIPLE_SEMICOLONS: /;{3,}/g, + CONTROL_CHARS: /[\x00-\x1F\x7F]/g, + LEGACY_MOZILLA: /mozilla\/4\.0/i, + VERSION_PATTERN: /\d+\.\d+\.\d+\.\d+\.\d+/g, + PARENTHESES_OPEN: /\(/g, + PARENTHESES_CLOSE: /\)/g +} as const; + +// Input validation functions with zero trust approach +function validateUserAgentInput(userAgent: unknown, paramName: string): string { + if (typeof userAgent !== 'string') { + throw new Error(`${paramName} must be a string`); + } + + if (userAgent.length > MAX_USER_AGENT_LENGTH) { + throw new Error(`${paramName} exceeds maximum length of ${MAX_USER_AGENT_LENGTH} characters`); + } + + return userAgent; +} + +function validateSecChUaInput(secChUa: unknown): string | null { + if (secChUa === null || secChUa === undefined) { + return null; + } + + if (typeof secChUa !== 'string') { + throw new Error('Sec-CH-UA must be a string or null'); + } + + if (secChUa.length > MAX_USER_AGENT_LENGTH) { + throw new Error(`Sec-CH-UA exceeds maximum length of ${MAX_USER_AGENT_LENGTH} characters`); + } + + return secChUa; +} + +// ReDoS-safe regex execution with timeout +function safeRegexTest(pattern: RegExp, input: string, timeoutMs: number = REGEX_TIMEOUT_MS): boolean { + const startTime = Date.now(); + + try { + // Reset regex state to prevent stateful regex issues + pattern.lastIndex = 0; + + // Check if execution takes too long (ReDoS protection) + const result = pattern.test(input); + + if (Date.now() - startTime > timeoutMs) { + throw new Error('Regex execution timeout - possible ReDoS attack'); + } + + return result; + } catch (error) { + if (error instanceof Error && error.message.includes('timeout')) { + throw error; // Re-throw timeout errors + } + // For other regex errors, assume no match (fail safe) + return false; + } +} + +// Safe pattern matching with bounds checking +function safePatternCount(pattern: RegExp, input: string): number { + try { + const matches = input.match(pattern); + return matches ? Math.min(matches.length, 1000) : 0; // Cap at 1000 to prevent DoS + } catch { + return 0; // Fail safe on regex errors + } +} + +// Entropy calculation with bounds checking and DoS protection +function calculateEntropy(input: string): number { + if (!input || input.length === 0) { + return 0; + } + + // Limit analysis to first 800 chars to prevent DoS (normal UAs are ~100-500 chars) + const analysisString = input.length > 800 ? input.substring(0, 800) : input; + + const charCounts = new Map(); + + // Count character frequencies with bounds checking + for (let i = 0; i < analysisString.length; i++) { + const char = analysisString.charAt(i); + if (!char) continue; // Skip if somehow empty + + const currentCount = charCounts.get(char) ?? 0; + + if (currentCount > 100) { + // Skip if character appears too frequently (DoS protection) + continue; + } + + charCounts.set(char, currentCount + 1); + + // Prevent memory exhaustion from too many unique characters + if (charCounts.size > 256) { + break; + } + } + + if (charCounts.size === 0) { + return 0; + } + + let entropy = 0; + const totalLength = analysisString.length; + + for (const count of Array.from(charCounts.values())) { + if (count > 0) { + const probability = count / totalLength; + entropy -= probability * Math.log2(probability); + } + } + + return Math.min(entropy, 10); // Cap entropy to prevent overflow +} + +// Malformed user-agent detection with ReDoS protection +function detectMalformedUA(userAgent: string): boolean { + try { + // Check parentheses balance with safe counting + const openParens = safePatternCount(SAFE_PATTERNS.PARENTHESES_OPEN, userAgent); + const closeParens = safePatternCount(SAFE_PATTERNS.PARENTHESES_CLOSE, userAgent); + + if (openParens !== closeParens) { + return true; + } + + // Check for invalid version formats with timeout protection + if (safeRegexTest(SAFE_PATTERNS.VERSION_PATTERN, userAgent)) { + return true; + } + + // Check for multiple consecutive spaces or semicolons + if (safeRegexTest(SAFE_PATTERNS.MULTIPLE_SPACES, userAgent) || + safeRegexTest(SAFE_PATTERNS.MULTIPLE_SEMICOLONS, userAgent)) { + return true; + } + + // Check for control characters + if (safeRegexTest(SAFE_PATTERNS.CONTROL_CHARS, userAgent)) { + return true; + } + + return false; + } catch (error) { + // If malformation detection fails, assume malformed for safety + return true; + } +} + +// Safe bot detection with pattern timeout protection +function detectVerifiedBot(userAgent: string, verifiedBots: VerifiedGoodBots): { isBot: boolean; botType: string | null } { + try { + for (const [botName, botConfig] of Object.entries(verifiedBots)) { + if (safeRegexTest(botConfig.pattern, userAgent)) { + return { isBot: true, botType: botName }; + } + } + return { isBot: false, botType: null }; + } catch { + // On error, assume not a verified bot (fail safe) + return { isBot: false, botType: null }; + } +} + +// Main user-agent analysis function with comprehensive validation +export function analyzeUserAgentAdvanced(userAgent: unknown): UserAgentFeatures { + // Validate input with zero trust + let validatedUA: string; + try { + validatedUA = validateUserAgentInput(userAgent, 'userAgent'); + } catch (error) { + // If validation fails, return safe defaults + return { + isAttackTool: false, + isMissing: true, + isMalformed: true, + isSuspiciousBot: false, + isVerifiedGoodBot: false, + botType: null, + anomalies: ['validation_failed'], + entropy: 0, + length: 0, + riskScore: 100 // High risk for invalid input + }; + } + + const anomalies: string[] = []; + let riskScore = 0; + + // Handle missing or empty user agent + if (!validatedUA || validatedUA.trim() === '') { + return { + isAttackTool: false, + isMissing: true, + isMalformed: false, + isSuspiciousBot: false, + isVerifiedGoodBot: false, + botType: null, + anomalies: ['missing_user_agent'], + entropy: 0, + length: validatedUA.length, + riskScore: 50 // Medium risk for missing UA + }; + } + + const uaLower = validatedUA.toLowerCase(); + const uaLength = validatedUA.length; + + // Attack tool detection with safe pattern matching + let isAttackTool = false; + try { + if (matchAttackTools(uaLower)) { + isAttackTool = true; + anomalies.push('attack_tool_detected'); + riskScore += 80; // High risk + } + } catch { + // If attack tool detection fails, log anomaly but continue + anomalies.push('attack_tool_detection_failed'); + } + + // Suspicious bot detection with safe pattern matching + let isSuspiciousBot = false; + try { + if (matchSuspiciousBots(uaLower)) { + isSuspiciousBot = true; + anomalies.push('suspicious_bot_pattern'); + riskScore += 30; // Medium risk + } + } catch { + anomalies.push('bot_detection_failed'); + } + + // Verified good bot detection with timeout protection + const botDetection = detectVerifiedBot(validatedUA, VERIFIED_GOOD_BOTS); + const isVerifiedGoodBot = botDetection.isBot; + const botType = botDetection.botType; + + if (isVerifiedGoodBot) { + riskScore = Math.max(0, riskScore - 20); // Reduce risk for verified bots + + // Note: Enhanced bot verification with IP ranges and DNS is available + // via the botVerificationEngine in src/utils/bot-verification.ts + // This can be integrated for more robust bot verification beyond user-agent patterns + } + + // Entropy calculation with DoS protection + let entropy = 0; + try { + entropy = calculateEntropy(validatedUA); + if (entropy > MAX_ENTROPY_THRESHOLD) { + anomalies.push('high_entropy_ua'); + riskScore += 25; + } + } catch { + anomalies.push('entropy_calculation_failed'); + riskScore += 10; // Small penalty for analysis failure + } + + // Malformation detection with ReDoS protection + let isMalformed = false; + try { + isMalformed = detectMalformedUA(validatedUA); + if (isMalformed) { + anomalies.push('malformed_user_agent'); + riskScore += 40; + } + } catch { + anomalies.push('malformation_detection_failed'); + isMalformed = true; // Assume malformed on detection failure + riskScore += 30; + } + + // Additional anomaly detection with safe patterns + try { + // Legacy Mozilla spoofing + if (safeRegexTest(SAFE_PATTERNS.LEGACY_MOZILLA, validatedUA) && !validatedUA.toLowerCase().includes('msie')) { + anomalies.push('legacy_mozilla_spoof'); + riskScore += 15; + } + + // Suspiciously short user agents + if (uaLength < MIN_NORMAL_UA_LENGTH) { + anomalies.push('suspiciously_short_ua'); + riskScore += 20; + } + + // Encoded characters + if (safeRegexTest(SAFE_PATTERNS.ENCODED_CHARS, validatedUA)) { + anomalies.push('encoded_characters_in_ua'); + riskScore += 25; + } + + // Extremely long user agents (potential DoS or attack) + if (uaLength > 800) { + anomalies.push('suspiciously_long_ua'); + riskScore += 30; + } + + } catch { + anomalies.push('anomaly_detection_failed'); + riskScore += 10; + } + + // Limit anomalies to prevent memory exhaustion + const limitedAnomalies = anomalies.slice(0, MAX_ANOMALIES_TRACKED); + + // Cap risk score to valid range + const finalRiskScore = Math.max(0, Math.min(100, riskScore)); + + return { + isAttackTool, + isMissing: false, + isMalformed, + isSuspiciousBot, + isVerifiedGoodBot, + botType, + anomalies: limitedAnomalies, + entropy, + length: uaLength, + riskScore: finalRiskScore + }; +} + +// User-Agent consistency checking with comprehensive validation +export function checkUAConsistency(userAgent: unknown, secChUa: unknown): UserAgentConsistencyResult { + try { + // Validate inputs with zero trust + const validatedUA = userAgent ? validateUserAgentInput(userAgent, 'userAgent') : null; + const validatedSecChUa = validateSecChUaInput(secChUa); + + const inconsistencies: string[] = []; + + // If either is missing, that's not necessarily inconsistent + if (!validatedUA || !validatedSecChUa) { + return { + isConsistent: true, + inconsistencies: [] + }; + } + + const uaLower = validatedUA.toLowerCase(); + const secChUaLower = validatedSecChUa.toLowerCase(); + + // Browser detection with safe string operations + const uaBrowsers = { + chrome: uaLower.includes('chrome/'), + firefox: uaLower.includes('firefox/'), + edge: uaLower.includes('edg/'), + safari: uaLower.includes('safari/') && !uaLower.includes('chrome/') + }; + + const secChBrowsers = { + chrome: secChUaLower.includes('chrome'), + firefox: secChUaLower.includes('firefox'), + edge: secChUaLower.includes('edge'), + safari: secChUaLower.includes('safari') + }; + + // Check for inconsistencies + if (uaBrowsers.chrome && !secChBrowsers.chrome) { + inconsistencies.push('chrome_ua_mismatch'); + } + + if (uaBrowsers.firefox && !secChBrowsers.firefox) { + inconsistencies.push('firefox_ua_mismatch'); + } + + if (uaBrowsers.edge && !secChBrowsers.edge) { + inconsistencies.push('edge_ua_mismatch'); + } + + if (uaBrowsers.safari && !secChBrowsers.safari) { + inconsistencies.push('safari_ua_mismatch'); + } + + // Check for completely different browsers + const uaHasBrowser = Object.values(uaBrowsers).some(Boolean); + const secChHasBrowser = Object.values(secChBrowsers).some(Boolean); + + if (uaHasBrowser && secChHasBrowser) { + const hasAnyMatch = Object.keys(uaBrowsers).some(browser => + uaBrowsers[browser as keyof typeof uaBrowsers] && + secChBrowsers[browser as keyof typeof secChBrowsers] + ); + + if (!hasAnyMatch) { + inconsistencies.push('completely_different_browsers'); + } + } + + return { + isConsistent: inconsistencies.length === 0, + inconsistencies + }; + + } catch (error) { + // On validation error, assume inconsistent for security + return { + isConsistent: false, + inconsistencies: ['validation_error'] + }; + } +} + +// Export types for use in other modules +export type { BotInfo, VerifiedGoodBots }; \ No newline at end of file diff --git a/src/utils/threat-scoring/cache-manager.ts b/src/utils/threat-scoring/cache-manager.ts new file mode 100644 index 0000000..6f24818 --- /dev/null +++ b/src/utils/threat-scoring/cache-manager.ts @@ -0,0 +1,548 @@ +// ============================================================================= +// CACHE MANAGEMENT FOR THREAT SCORING (TypeScript) +// ============================================================================= + +import { CACHE_CONFIG } from './constants.js'; +import { parseDuration } from '../time.js'; + +// Pre-computed durations for hot path cache operations +const REQUEST_HISTORY_TTL = parseDuration('30m'); + +// ============================================================================= +// TYPE DEFINITIONS +// ============================================================================= + +interface CachedEntry { + readonly data: T; + readonly timestamp: number; + readonly ttl?: number; +} + +interface RequestHistoryEntry { + readonly timestamp: number; + readonly method?: string; + readonly path?: string; + readonly userAgent?: string; + readonly score?: number; + readonly responseTime?: number; + readonly statusCode?: number; +} + +interface CachedRequestHistory { + readonly history: readonly RequestHistoryEntry[]; + readonly timestamp: number; +} + +interface IPScoreEntry { + readonly score: number; + readonly confidence: number; + readonly lastCalculated: number; + readonly components: Record; +} + +interface SessionEntry { + readonly sessionId: string; + readonly startTime: number; + readonly lastActivity: number; + readonly requestCount: number; + readonly behaviorScore: number; + readonly flags: readonly string[]; +} + +interface BehaviorEntry { + readonly patterns: Record; + readonly anomalies: readonly string[]; + readonly riskScore: number; + readonly lastUpdated: number; + readonly requestPattern: Record; +} + +interface VerifiedBotEntry { + readonly botName: string; + readonly verified: boolean; + readonly verificationMethod: 'dns' | 'user_agent' | 'signature' | 'manual'; + readonly lastVerified: number; + readonly trustScore: number; +} + +interface CacheStats { + readonly ipScore: number; + readonly session: number; + readonly behavior: number; + readonly verifiedBots: number; +} + +interface CacheCleanupResult { + readonly beforeSize: CacheStats; + readonly afterSize: CacheStats; + readonly totalCleaned: number; + readonly emergencyTriggered: boolean; +} + +// Generic cache interface for type safety +interface TypedCache { + get(key: string): T | undefined; + set(key: string, value: T): void; + delete(key: string): boolean; + has(key: string): boolean; + clear(): void; + readonly size: number; + [Symbol.iterator](): IterableIterator<[string, T]>; +} + +// ============================================================================= +// CACHE MANAGER CLASS +// ============================================================================= + +export class CacheManager { + // Type-safe cache instances + private readonly ipScoreCache: TypedCache>; + private readonly sessionCache: TypedCache>; + private readonly behaviorCache: TypedCache>; + private readonly verifiedBotsCache: TypedCache>; + + // Cleanup timer reference for proper disposal + private cleanupTimer: NodeJS.Timeout | null = null; + + constructor() { + // Initialize in-memory caches with size limits + this.ipScoreCache = new Map>() as TypedCache>; + this.sessionCache = new Map>() as TypedCache>; + this.behaviorCache = new Map>() as TypedCache>; + this.verifiedBotsCache = new Map>() as TypedCache>; + + // Start cache cleanup timer + this.startCacheCleanup(); + } + + // ----------------------------------------------------------------------------- + // CACHE LIFECYCLE MANAGEMENT + // ----------------------------------------------------------------------------- + + /** + * Starts the cache cleanup timer - CRITICAL for memory stability + * This prevents memory leaks under high load by periodically cleaning expired entries + */ + private startCacheCleanup(): void { + // CRITICAL: This timer prevents memory leaks under high load + // If this cleanup stops running, the system will eventually crash due to memory exhaustion + // The cleanup interval affects both memory usage and performance - too frequent = CPU waste, + // too infrequent = memory problems + this.cleanupTimer = setInterval(() => { + this.cleanupCaches(); + }, CACHE_CONFIG.CACHE_CLEANUP_INTERVAL); + + // Ensure cleanup timer doesn't keep process alive + if (this.cleanupTimer.unref) { + this.cleanupTimer.unref(); + } + } + + /** + * Stops the cache cleanup timer and clears all caches + * Should be called during application shutdown + */ + public destroy(): void { + if (this.cleanupTimer) { + clearInterval(this.cleanupTimer); + this.cleanupTimer = null; + } + + // Clear all caches + this.ipScoreCache.clear(); + this.sessionCache.clear(); + this.behaviorCache.clear(); + this.verifiedBotsCache.clear(); + } + + // ----------------------------------------------------------------------------- + // CACHE CLEANUP OPERATIONS + // ----------------------------------------------------------------------------- + + /** + * Performs comprehensive cache cleanup to prevent memory exhaustion + * @returns Cleanup statistics + */ + public cleanupCaches(): CacheCleanupResult { + const beforeSize: CacheStats = { + ipScore: this.ipScoreCache.size, + session: this.sessionCache.size, + behavior: this.behaviorCache.size, + verifiedBots: this.verifiedBotsCache.size + }; + + // Clean each cache using the optimized cleanup method + this.cleanupCache(this.ipScoreCache); + this.cleanupCache(this.sessionCache); + this.cleanupCache(this.behaviorCache); + this.cleanupCache(this.verifiedBotsCache); + + const afterSize: CacheStats = { + ipScore: this.ipScoreCache.size, + session: this.sessionCache.size, + behavior: this.behaviorCache.size, + verifiedBots: this.verifiedBotsCache.size + }; + + const totalCleaned = Object.keys(beforeSize).reduce((total, key) => { + const beforeCount = beforeSize[key as keyof CacheStats]; + const afterCount = afterSize[key as keyof CacheStats]; + return total + (beforeCount - afterCount); + }, 0); + + let emergencyTriggered = false; + + if (totalCleaned > 0) { + console.log(`Threat scorer: cleaned ${totalCleaned} expired cache entries`); + } + + // Emergency cleanup if caches are still too large + // This prevents memory exhaustion under extreme load + if (this.ipScoreCache.size > CACHE_CONFIG.MAX_CACHE_SIZE * CACHE_CONFIG.EMERGENCY_CLEANUP_THRESHOLD) { + console.warn('Threat scorer: Emergency cleanup triggered - system under high load'); + this.emergencyCleanup(); + emergencyTriggered = true; + } + + return { + beforeSize, + afterSize, + totalCleaned, + emergencyTriggered + }; + } + + /** + * Optimized cache cleanup - removes oldest entries when cache exceeds size limit + * Maps maintain insertion order, so we can efficiently remove oldest entries + */ + private cleanupCache(cache: TypedCache): number { + if (cache.size <= CACHE_CONFIG.MAX_CACHE_SIZE) { + return 0; + } + + const excess = cache.size - CACHE_CONFIG.MAX_CACHE_SIZE; + let removed = 0; + + // Remove oldest entries (Maps maintain insertion order) + const cacheAsMap = cache as unknown as Map; + for (const [key] of Array.from(cacheAsMap.entries())) { + if (removed >= excess) { + break; + } + cache.delete(key); + removed++; + } + + return removed; + } + + /** + * Emergency cleanup for extreme memory pressure + * Aggressively reduces cache sizes to prevent system crashes + */ + private emergencyCleanup(): void { + // Aggressively reduce cache sizes to 25% of max + const targetSize = Math.floor(CACHE_CONFIG.MAX_CACHE_SIZE * CACHE_CONFIG.EMERGENCY_CLEANUP_TARGET); + + // Clean each cache individually to avoid type issues + this.emergencyCleanupCache(this.ipScoreCache, targetSize); + this.emergencyCleanupCache(this.sessionCache, targetSize); + this.emergencyCleanupCache(this.behaviorCache, targetSize); + this.emergencyCleanupCache(this.verifiedBotsCache, targetSize); + } + + /** + * Helper method for emergency cleanup of individual cache + */ + private emergencyCleanupCache(cache: TypedCache, targetSize: number): void { + if (cache.size <= targetSize) { + return; + } + + const toRemove = cache.size - targetSize; + let removed = 0; + + // Clear the cache if we need to remove too many entries (emergency scenario) + if (toRemove > cache.size * 0.8) { + cache.clear(); + return; + } + + // Otherwise, remove oldest entries using the Map's iteration order + const cacheAsMap = cache as unknown as Map; + const keysToDelete: string[] = []; + + for (const [key] of Array.from(cacheAsMap.entries())) { + if (keysToDelete.length >= toRemove) { + break; + } + keysToDelete.push(key); + } + + for (const key of keysToDelete) { + cache.delete(key); + removed++; + } + } + + // ----------------------------------------------------------------------------- + // IP SCORE CACHE OPERATIONS + // ----------------------------------------------------------------------------- + + /** + * Retrieves cached IP score if still valid + */ + public getCachedIPScore(ip: string): IPScoreEntry | null { + if (!ip || typeof ip !== 'string') { + return null; + } + + const cached = this.ipScoreCache.get(ip); + if (cached && this.isEntryValid(cached)) { + return cached.data; + } + + return null; + } + + /** + * Caches IP score with optional TTL + */ + public setCachedIPScore(ip: string, scoreData: IPScoreEntry, ttlMs?: number): void { + if (!ip || typeof ip !== 'string' || !scoreData) { + return; + } + + const entry: CachedEntry = { + data: scoreData, + timestamp: Date.now(), + ttl: ttlMs + }; + + this.ipScoreCache.set(ip, entry); + } + + // ----------------------------------------------------------------------------- + // SESSION CACHE OPERATIONS + // ----------------------------------------------------------------------------- + + /** + * Retrieves cached session data if still valid + */ + public getCachedSession(sessionId: string): SessionEntry | null { + if (!sessionId || typeof sessionId !== 'string') { + return null; + } + + const cached = this.sessionCache.get(sessionId); + if (cached && this.isEntryValid(cached)) { + return cached.data; + } + + return null; + } + + /** + * Caches session data with optional TTL + */ + public setCachedSession(sessionId: string, sessionData: SessionEntry, ttlMs?: number): void { + if (!sessionId || typeof sessionId !== 'string' || !sessionData) { + return; + } + + const entry: CachedEntry = { + data: sessionData, + timestamp: Date.now(), + ttl: ttlMs + }; + + this.sessionCache.set(sessionId, entry); + } + + // ----------------------------------------------------------------------------- + // BEHAVIOR CACHE OPERATIONS + // ----------------------------------------------------------------------------- + + /** + * Retrieves cached behavior data if still valid + */ + public getCachedBehavior(key: string): BehaviorEntry | null { + if (!key || typeof key !== 'string') { + return null; + } + + const cached = this.behaviorCache.get(key); + if (cached && this.isEntryValid(cached) && this.isBehaviorEntry(cached.data)) { + return cached.data; + } + + return null; + } + + /** + * Caches behavior data with optional TTL + */ + public setCachedBehavior(key: string, behaviorData: BehaviorEntry, ttlMs?: number): void { + if (!key || typeof key !== 'string' || !behaviorData) { + return; + } + + const entry: CachedEntry = { + data: behaviorData, + timestamp: Date.now(), + ttl: ttlMs + }; + + this.behaviorCache.set(key, entry); + } + + // ----------------------------------------------------------------------------- + // REQUEST HISTORY CACHE OPERATIONS + // ----------------------------------------------------------------------------- + + /** + * Retrieves cached request history if still valid + */ + public getCachedRequestHistory(ip: string, cutoff: number): readonly RequestHistoryEntry[] | null { + if (!ip || typeof ip !== 'string' || typeof cutoff !== 'number') { + return null; + } + + const cacheKey = `history:${ip}`; + const cached = this.behaviorCache.get(cacheKey); + + if (cached && cached.timestamp > cutoff && this.isRequestHistoryEntry(cached.data)) { + return cached.data.history.filter(h => h.timestamp > cutoff); + } + + return null; + } + + /** + * Caches request history with automatic TTL + */ + public setCachedRequestHistory(ip: string, history: readonly RequestHistoryEntry[]): void { + if (!ip || typeof ip !== 'string' || !Array.isArray(history)) { + return; + } + + const cacheKey = `history:${ip}`; + const cachedHistory: CachedRequestHistory = { + history, + timestamp: Date.now() + }; + + const entry: CachedEntry = { + data: cachedHistory, + timestamp: Date.now(), + ttl: REQUEST_HISTORY_TTL // 30 minutes TTL for request history + }; + + this.behaviorCache.set(cacheKey, entry); + } + + // ----------------------------------------------------------------------------- + // VERIFIED BOTS CACHE OPERATIONS + // ----------------------------------------------------------------------------- + + /** + * Retrieves cached bot verification if still valid + */ + public getCachedBotVerification(userAgent: string): VerifiedBotEntry | null { + if (!userAgent || typeof userAgent !== 'string') { + return null; + } + + const cached = this.verifiedBotsCache.get(userAgent); + if (cached && this.isEntryValid(cached)) { + return cached.data; + } + + return null; + } + + /** + * Caches bot verification with TTL from configuration + */ + public setCachedBotVerification(userAgent: string, botData: VerifiedBotEntry, ttlMs: number): void { + if (!userAgent || typeof userAgent !== 'string' || !botData || typeof ttlMs !== 'number') { + return; + } + + const entry: CachedEntry = { + data: botData, + timestamp: Date.now(), + ttl: ttlMs + }; + + this.verifiedBotsCache.set(userAgent, entry); + } + + // ----------------------------------------------------------------------------- + // CACHE STATISTICS AND MONITORING + // ----------------------------------------------------------------------------- + + /** + * Gets current cache statistics for monitoring + */ + public getCacheStats(): CacheStats & { totalEntries: number; memoryPressure: boolean } { + const stats: CacheStats = { + ipScore: this.ipScoreCache.size, + session: this.sessionCache.size, + behavior: this.behaviorCache.size, + verifiedBots: this.verifiedBotsCache.size + }; + + const totalEntries = Object.values(stats).reduce((sum, count) => sum + count, 0); + const memoryPressure = totalEntries > (CACHE_CONFIG.MAX_CACHE_SIZE * 4 * CACHE_CONFIG.EMERGENCY_CLEANUP_THRESHOLD); + + return { + ...stats, + totalEntries, + memoryPressure + }; + } + + /** + * Clears all caches - use with caution + */ + public clearAllCaches(): void { + this.ipScoreCache.clear(); + this.sessionCache.clear(); + this.behaviorCache.clear(); + this.verifiedBotsCache.clear(); + + console.log('Threat scorer: All caches cleared'); + } + + // ----------------------------------------------------------------------------- + // UTILITY METHODS + // ----------------------------------------------------------------------------- + + /** + * Checks if a cached entry is still valid based on TTL + */ + private isEntryValid(entry: CachedEntry): boolean { + if (!entry.ttl) { + return true; // No TTL means it doesn't expire + } + + const now = Date.now(); + return (now - entry.timestamp) < entry.ttl; + } + + /** + * Type guard to check if cached data is BehaviorEntry + */ + private isBehaviorEntry(data: BehaviorEntry | CachedRequestHistory): data is BehaviorEntry { + return 'patterns' in data && 'anomalies' in data && 'riskScore' in data; + } + + /** + * Type guard to check if cached data is CachedRequestHistory + */ + private isRequestHistoryEntry(data: BehaviorEntry | CachedRequestHistory): data is CachedRequestHistory { + return 'history' in data && Array.isArray((data as CachedRequestHistory).history); + } +} \ No newline at end of file diff --git a/src/utils/threat-scoring/constants.ts b/src/utils/threat-scoring/constants.ts new file mode 100644 index 0000000..efbcb1b --- /dev/null +++ b/src/utils/threat-scoring/constants.ts @@ -0,0 +1,141 @@ +// ============================================================================= +// THREAT SCORING ENGINE CONSTANTS & CONFIGURATION +// ============================================================================= + +import { parseDuration } from '../time.js'; + +// Type definitions for threat scoring system +export interface ThreatThresholds { + readonly ALLOW: number; + readonly CHALLENGE: number; + readonly BLOCK: number; +} + +export interface SignalWeight { + readonly weight: number; + readonly confidence: number; +} + +export interface SignalWeights { + // User-Agent signals (implemented) + readonly ATTACK_TOOL_UA: SignalWeight; + readonly MISSING_UA: SignalWeight; + + // WAF signals (implemented via WAF plugin) + readonly SQL_INJECTION: SignalWeight; + readonly XSS_ATTEMPT: SignalWeight; + readonly COMMAND_INJECTION: SignalWeight; + readonly PATH_TRAVERSAL: SignalWeight; +} + +export interface StaticWhitelist { + readonly extensions: ReadonlySet; + readonly paths: ReadonlySet; + readonly patterns: readonly RegExp[]; +} + +export interface BotInfo { + readonly pattern: RegExp; + readonly verifyDNS: boolean; +} + +export interface VerifiedGoodBots { + readonly [botName: string]: BotInfo; +} + +export interface CacheConfig { + readonly MAX_CACHE_SIZE: number; + readonly CACHE_CLEANUP_INTERVAL: number; + readonly EMERGENCY_CLEANUP_THRESHOLD: number; + readonly EMERGENCY_CLEANUP_TARGET: number; +} + +export interface DbTtlConfig { + readonly THREAT_DB_TTL: number; + readonly BEHAVIOR_DB_TTL: number; +} + +// Attack pattern types +export type AttackToolPattern = string; +export type SuspiciousBotPattern = string; + +// All threat score thresholds should come from user configuration +// No hardcoded defaults - configuration required + +// Attack tool patterns for Aho-Corasick matching +export const ATTACK_TOOL_PATTERNS: readonly AttackToolPattern[] = [ + 'sqlmap', 'nikto', 'nmap', 'burpsuite', 'w3af', 'acunetix', + 'nessus', 'openvas', 'gobuster', 'dirbuster', 'wfuzz', 'ffuf', + 'hydra', 'medusa', 'masscan', 'zmap', 'metasploit', 'burp suite', + 'scanner', 'exploit', 'payload', 'injection', 'vulnerability' +] as const; + +// Suspicious bot patterns +export const SUSPICIOUS_BOT_PATTERNS: readonly SuspiciousBotPattern[] = [ + 'bot', 'crawler', 'spider', 'scraper', 'scanner', 'harvest', + 'extract', 'collect', 'gather', 'fetch' +] as const; + +// Signal weights should come from user configuration +// No hardcoded signal weights - configuration required + +// Paths and extensions that should never trigger scoring +export const STATIC_WHITELIST: StaticWhitelist = { + extensions: new Set([ + '.css', '.js', '.png', '.jpg', '.jpeg', '.gif', '.svg', '.ico', '.webp', + '.woff', '.woff2', '.ttf', '.eot', '.pdf', '.mp4', '.mp3', '.zip', '.avif', + '.bmp', '.tiff', '.webm', '.mov', '.avi', '.flv', '.map', '.txt', '.xml' + ]) as ReadonlySet, + paths: new Set([ + '/static/', '/assets/', '/images/', '/img/', '/css/', '/js/', '/fonts/', + '/webfont/', '/favicon', '/media/', '/uploads/', '/.well-known/' + ]) as ReadonlySet, + patterns: [ + /^\/[a-f0-9]{32}\.(css|js)$/i, // Hashed asset files + /^\/build\/[^\/]+\.(css|js)$/i, // Build output files + /^\/dist\/[^\/]+\.(css|js)$/i, // Distribution files + ] as const +} as const; + +// Known good bots that should be treated favorably +export const VERIFIED_GOOD_BOTS: VerifiedGoodBots = { + // Search engines + 'googlebot': { pattern: /Googlebot\/\d+\.\d+/i, verifyDNS: true }, + 'bingbot': { pattern: /bingbot\/\d+\.\d+/i, verifyDNS: true }, + 'slurp': { pattern: /Slurp/i, verifyDNS: true }, + 'duckduckbot': { pattern: /DuckDuckBot\/\d+\.\d+/i, verifyDNS: false }, + 'baiduspider': { pattern: /Baiduspider\/\d+\.\d+/i, verifyDNS: true }, + 'yandexbot': { pattern: /YandexBot\/\d+\.\d+/i, verifyDNS: true }, + + // Social media + 'facebookexternalhit': { pattern: /facebookexternalhit\/\d+\.\d+/i, verifyDNS: false }, + 'twitterbot': { pattern: /Twitterbot\/\d+\.\d+/i, verifyDNS: false }, + 'linkedinbot': { pattern: /LinkedInBot\/\d+\.\d+/i, verifyDNS: false }, + + // Monitoring services + 'uptimerobot': { pattern: /UptimeRobot\/\d+\.\d+/i, verifyDNS: false }, + 'pingdom': { pattern: /Pingdom\.com_bot/i, verifyDNS: false } +} as const; + +// Cache configuration +export const CACHE_CONFIG: CacheConfig = { + MAX_CACHE_SIZE: 10000, + CACHE_CLEANUP_INTERVAL: parseDuration('5m'), // 5 minutes + EMERGENCY_CLEANUP_THRESHOLD: 1.5, // 150% of max size + EMERGENCY_CLEANUP_TARGET: 0.25 // Reduce to 25% of max +} as const; + +// Database TTL configuration +export const DB_TTL_CONFIG: DbTtlConfig = { + THREAT_DB_TTL: parseDuration('1h'), // 1 hour + BEHAVIOR_DB_TTL: parseDuration('24h') // 24 hours +} as const; + +// Type utility to get signal weight names as union type +export type SignalWeightName = keyof SignalWeights; + +// Type utility to get attack tool patterns as literal types +export type AttackToolPatterns = typeof ATTACK_TOOL_PATTERNS[number]; +export type SuspiciousBotPatterns = typeof SUSPICIOUS_BOT_PATTERNS[number]; + +// Note: All interface types are already exported above \ No newline at end of file diff --git a/src/utils/threat-scoring/database.ts b/src/utils/threat-scoring/database.ts new file mode 100644 index 0000000..14db2c1 --- /dev/null +++ b/src/utils/threat-scoring/database.ts @@ -0,0 +1,503 @@ +// ============================================================================= +// DATABASE OPERATIONS FOR THREAT SCORING (TypeScript) +// ============================================================================= + +import { Level } from 'level'; +// @ts-ignore - level-ttl doesn't have TypeScript definitions +import ttl from 'level-ttl'; +import { rootDir } from '../../index.js'; +import { join } from 'path'; +import { Readable } from 'stream'; +import * as fs from 'fs'; +import { DB_TTL_CONFIG } from './constants.js'; + +// Import types from the main threat scoring module +// Local type definitions for database operations +type ThreatFeatures = Record; +type AssessmentData = Record; +type SanitizedFeatures = Record; + +// ============================================================================= +// TYPE DEFINITIONS +// ============================================================================= + +interface DatabaseOperation { + readonly type: 'put' | 'del'; + readonly key: string; + readonly value?: unknown; +} + +interface ThreatAssessment { + readonly score: number; + readonly action: 'allow' | 'challenge' | 'block'; + readonly features: Record; + readonly scoreComponents: Record; + readonly confidence: number; + readonly timestamp: number; +} + +interface BehaviorData { + readonly lastScore: number; + readonly lastSeen: number; + readonly features: Record; + readonly requestCount: number; +} + +interface ReputationData { + score: number; + incidents: number; + blacklisted: boolean; + tags: string[]; + notes?: string; + firstSeen?: number; + lastUpdate: number; + source: 'static_migration' | 'dynamic' | 'manual'; + migrated?: boolean; +} + +interface RequestHistoryEntry { + readonly timestamp: number; + readonly method?: string; + readonly path?: string; + readonly userAgent?: string; + readonly score?: number; +} + +interface MigrationRecord { + readonly completed: number; + readonly count: number; +} + +interface StaticReputationEntry { + readonly score?: number; + readonly incidents?: number; + readonly blacklisted?: boolean; + readonly tags?: readonly string[]; + readonly notes?: string; +} + +interface LevelDatabase { + put(key: string, value: unknown): Promise; + get(key: string): Promise; + del(key: string): Promise; + batch(operations: readonly DatabaseOperation[]): Promise; + createReadStream(options?: DatabaseStreamOptions): AsyncIterable; + iterator(options?: DatabaseStreamOptions): AsyncIterable<[string, unknown]>; +} + +interface DatabaseStreamOptions { + readonly gte?: string; + readonly lte?: string; + readonly limit?: number; + readonly reverse?: boolean; +} + +interface DatabaseEntry { + readonly key: string; + readonly value: unknown; +} + +type SanitizeFeaturesFunction = (features: Record | ThreatFeatures) => SanitizedFeatures; + +// ============================================================================= +// DATABASE INITIALIZATION +// ============================================================================= + +// Database paths +const threatDBPath = join(rootDir, 'db', 'threats'); +const behaviorDBPath = join(rootDir, 'db', 'behavior'); + +// Ensure database directories exist +fs.mkdirSync(threatDBPath, { recursive: true }); +fs.mkdirSync(behaviorDBPath, { recursive: true }); + +// Add read stream support for LevelDB +function addReadStreamSupport(dbInstance: any): LevelDatabase { + if (!dbInstance.createReadStream) { + dbInstance.createReadStream = (opts?: DatabaseStreamOptions): AsyncIterable => + Readable.from((async function* () { + for await (const [key, value] of dbInstance.iterator(opts)) { + yield { key, value }; + } + })()); + } + return dbInstance as LevelDatabase; +} + +// Initialize databases with proper TTL and stream support +const rawThreatDB = addReadStreamSupport(new Level(threatDBPath, { valueEncoding: 'json' })); +export const threatDB: LevelDatabase = addReadStreamSupport( + ttl(rawThreatDB, { defaultTTL: DB_TTL_CONFIG.THREAT_DB_TTL }) +); + +const rawBehaviorDB = addReadStreamSupport(new Level(behaviorDBPath, { valueEncoding: 'json' })); +export const behaviorDB: LevelDatabase = addReadStreamSupport( + ttl(rawBehaviorDB, { defaultTTL: DB_TTL_CONFIG.BEHAVIOR_DB_TTL }) +); + +// ============================================================================= +// DATABASE OPERATIONS +// ============================================================================= + +/** + * Stores a threat assessment in the database with automatic TTL + * @param clientIP - The IP address being assessed + * @param assessment - The threat assessment data + */ +export async function storeAssessment(clientIP: string, assessment: ThreatAssessment | AssessmentData): Promise { + try { + // Input validation + if (!clientIP || typeof clientIP !== 'string') { + throw new Error('Invalid client IP provided'); + } + + if (!assessment || typeof assessment !== 'object') { + throw new Error('Invalid assessment data provided'); + } + + const key = `assessment:${clientIP}:${Date.now()}`; + + // Store assessment with TTL to prevent unbounded growth + await threatDB.put(key, assessment); + } catch (err) { + const error = err as Error; + // CRITICAL: Database errors should not crash the threat scorer + // Log the error but continue processing - the system can function without + // storing assessments, though learning capabilities will be reduced + console.error('Failed to store threat assessment:', error.message); + } +} + +/** + * Updates behavioral models based on observed client behavior + * @param clientIP - The IP address to update + * @param features - Extracted threat features + * @param score - Calculated threat score + * @param sanitizeFeatures - Function to sanitize features for storage + */ +export async function updateBehavioralModels( + clientIP: string, + features: Record | ThreatFeatures, + score: number, + sanitizeFeatures: SanitizeFeaturesFunction +): Promise { + try { + // Input validation + if (!clientIP || typeof clientIP !== 'string') { + throw new Error('Invalid client IP provided'); + } + + if (typeof score !== 'number' || score < 0 || score > 100) { + throw new Error('Invalid threat score provided'); + } + + // Batch database operations for better performance + const operations: DatabaseOperation[] = []; + + // Update IP behavior history + const behaviorKey = `behavior:${clientIP}`; + const existingBehavior = await getBehaviorData(clientIP); + + const behaviorData: BehaviorData = { + lastScore: score, + lastSeen: Date.now(), + features: sanitizeFeatures(features) as unknown as Record, + requestCount: (existingBehavior?.requestCount || 0) + 1 + }; + + operations.push({ + type: 'put', + key: behaviorKey, + value: behaviorData + }); + + // Update reputation based on observed behavior (automatic reputation management) + await updateIPReputation(clientIP, score, features as ThreatFeatures, operations); + + // Execute batch operation if we have operations to perform + if (operations.length > 0) { + await behaviorDB.batch(operations); + } + } catch (err) { + const error = err as Error; + // Log but don't throw - behavioral model updates shouldn't crash the system + console.error('Failed to update behavioral models:', error.message); + } +} + +/** + * Automatic IP reputation management based on observed behavior + * @param clientIP - The IP address to update + * @param score - Current threat score + * @param features - Threat features detected + * @param operations - Array to append database operations to + */ +export async function updateIPReputation( + clientIP: string, + score: number, + features: ThreatFeatures, + operations: DatabaseOperation[] +): Promise { + try { + const currentRep: ReputationData = await getReputationData(clientIP) || { + score: 0, + incidents: 0, + blacklisted: false, + tags: [], + firstSeen: Date.now(), + lastUpdate: Date.now(), + source: 'dynamic' + }; + + let reputationChanged = false; + const now = Date.now(); + + // Automatic reputation scoring based on behavior + if (score >= 90) { + // Critical threat - significant reputation penalty + currentRep.score = Math.min(100, currentRep.score + 25); + currentRep.incidents += 1; + currentRep.tags = Array.from(new Set([...currentRep.tags, 'critical_threat'])); + reputationChanged = true; + } else if (score >= 75) { + // High threat - moderate reputation penalty + currentRep.score = Math.min(100, currentRep.score + 15); + currentRep.incidents += 1; + currentRep.tags = Array.from(new Set([...currentRep.tags, 'high_threat'])); + reputationChanged = true; + } else if (score >= 50) { + // Medium threat - small reputation penalty + currentRep.score = Math.min(100, currentRep.score + 5); + currentRep.tags = Array.from(new Set([...currentRep.tags, 'medium_threat'])); + reputationChanged = true; + } else if (score <= 10) { + // Very low threat - slowly improve reputation for good behavior + currentRep.score = Math.max(0, currentRep.score - 1); + if (currentRep.score === 0) { + currentRep.tags = currentRep.tags.filter(tag => !tag.includes('threat')); + } + reputationChanged = true; + } + + // Add specific behavior tags for detailed tracking + if (features.userAgent?.isAttackTool) { + currentRep.tags = Array.from(new Set([...currentRep.tags, 'attack_tool'])); + currentRep.score = Math.min(100, currentRep.score + 20); + reputationChanged = true; + } + + if (features.pattern?.patternAnomalies?.includes('enumeration_detected')) { + currentRep.tags = Array.from(new Set([...currentRep.tags, 'enumeration'])); + currentRep.score = Math.min(100, currentRep.score + 10); + reputationChanged = true; + } + + if (features.pattern?.patternAnomalies?.includes('bruteforce_detected')) { + currentRep.tags = Array.from(new Set([...currentRep.tags, 'bruteforce'])); + currentRep.score = Math.min(100, currentRep.score + 15); + reputationChanged = true; + } + + if (features.velocity?.impossibleTravel) { + currentRep.tags = Array.from(new Set([...currentRep.tags, 'impossible_travel'])); + currentRep.score = Math.min(100, currentRep.score + 12); + reputationChanged = true; + } + + // Automatic blacklisting for consistently bad actors + if (currentRep.score >= 80 && currentRep.incidents >= 5) { + currentRep.blacklisted = true; + currentRep.tags = Array.from(new Set([...currentRep.tags, 'auto_blacklisted'])); + reputationChanged = true; + console.log(`Threat scorer: Auto-blacklisted ${clientIP} (score: ${currentRep.score}, incidents: ${currentRep.incidents})`); + } + + // Automatic reputation decay over time (good IPs recover slowly) + const daysSinceLastUpdate = (now - currentRep.lastUpdate) / (1000 * 60 * 60 * 24); + if (daysSinceLastUpdate > 7 && currentRep.score > 0) { + // Decay reputation by 1 point per week for inactive IPs + const decayAmount = Math.floor(daysSinceLastUpdate / 7); + currentRep.score = Math.max(0, currentRep.score - decayAmount); + if (currentRep.score < 50) { + currentRep.blacklisted = false; // Unblacklist if score drops + } + reputationChanged = true; + } + + // Only update database if reputation actually changed + if (reputationChanged) { + currentRep.lastUpdate = now; + operations.push({ + type: 'put', + key: `reputation:${clientIP}`, + value: currentRep + }); + + console.log(`Threat scorer: Updated reputation for ${clientIP}: score=${currentRep.score}, incidents=${currentRep.incidents}, tags=[${currentRep.tags.join(', ')}]`); + } + } catch (err) { + const error = err as Error; + console.error('Failed to update IP reputation:', error.message); + } +} + +// ============================================================================= +// HELPER METHODS +// ============================================================================= + +/** + * Retrieves behavioral data for a specific IP address + * @param clientIP - The IP address to look up + * @returns Behavioral data or null if not found + */ +export async function getBehaviorData(clientIP: string): Promise { + try { + if (!clientIP || typeof clientIP !== 'string') { + return null; + } + + const data = await behaviorDB.get(`behavior:${clientIP}`); + return data as BehaviorData; + } catch (err) { + return null; // Key doesn't exist or database error + } +} + +/** + * Retrieves reputation data for a specific IP address + * @param clientIP - The IP address to look up + * @returns Reputation data or null if not found + */ +export async function getReputationData(clientIP: string): Promise { + try { + if (!clientIP || typeof clientIP !== 'string') { + return null; + } + + const data = await threatDB.get(`reputation:${clientIP}`); + return data as ReputationData; + } catch (err) { + return null; // Key doesn't exist or database error + } +} + +/** + * Gets request history from database within a specific time window + * @param ip - The IP address to get history for + * @param timeWindow - Time window in milliseconds + * @returns Array of request history entries + */ +export async function getRequestHistory(ip: string, timeWindow: number): Promise { + const history: RequestHistoryEntry[] = []; + + // Input validation + if (!ip || typeof ip !== 'string') { + return history; + } + + if (typeof timeWindow !== 'number' || timeWindow <= 0) { + return history; + } + + const cutoff = Date.now() - timeWindow; + + try { + // Get from database + const stream = threatDB.createReadStream({ + gte: `request:${ip}:${cutoff}`, + lte: `request:${ip}:${Date.now()}`, + limit: 1000 + }); + + for await (const { value } of stream) { + const entry = value as RequestHistoryEntry; + if (entry.timestamp && entry.timestamp > cutoff) { + history.push(entry); + } + } + } catch (err) { + const error = err as Error; + console.warn('Failed to get request history:', error.message); + } + + return history; +} + +/** + * One-time migration of static IP reputation data to database + * Safely migrates existing JSON reputation data to the new database format + */ +export async function migrateStaticReputationData(): Promise { + try { + const ipReputationPath = join(rootDir, 'data', 'ip-reputation.json'); + + if (!fs.existsSync(ipReputationPath)) { + return; + } + + // Check if we've already migrated + const migrationKey = 'reputation:migration:completed'; + try { + await threatDB.get(migrationKey); + return; // Already migrated + } catch (err) { + // Not migrated yet, proceed + } + + console.log('Threat scorer: Migrating static IP reputation data to database...'); + + const staticDataRaw = fs.readFileSync(ipReputationPath, 'utf8'); + const staticData = JSON.parse(staticDataRaw) as Record; + const operations: DatabaseOperation[] = []; + + for (const [ip, repData] of Object.entries(staticData)) { + // Validate IP format (basic validation) + if (!ip || typeof ip !== 'string') { + console.warn(`Skipping invalid IP during migration: ${ip}`); + continue; + } + + const migratedData: ReputationData = { + score: repData.score || 0, + incidents: repData.incidents || 0, + blacklisted: repData.blacklisted || false, + tags: Array.isArray(repData.tags) ? [...repData.tags] : [], + notes: repData.notes || '', + lastUpdate: Date.now(), + source: 'static_migration', + migrated: true + }; + + operations.push({ + type: 'put', + key: `reputation:${ip}`, + value: migratedData + }); + } + + // Mark migration as complete + const migrationRecord: MigrationRecord = { + completed: Date.now(), + count: operations.length + }; + + operations.push({ + type: 'put', + key: migrationKey, + value: migrationRecord + }); + + if (operations.length > 1) { + await threatDB.batch(operations); + console.log(`Threat scorer: Migrated ${operations.length - 1} IP reputation records to database`); + + // Optionally archive the static file + const archivePath = ipReputationPath + '.migrated'; + fs.renameSync(ipReputationPath, archivePath); + console.log(`Threat scorer: Static IP reputation file archived to ${archivePath}`); + } + } catch (err) { + const error = err as Error; + console.error('Failed to migrate static IP reputation data:', error.message); + } +} \ No newline at end of file diff --git a/src/utils/threat-scoring/feature-extractors/behavioral.ts b/src/utils/threat-scoring/feature-extractors/behavioral.ts new file mode 100644 index 0000000..75898d1 --- /dev/null +++ b/src/utils/threat-scoring/feature-extractors/behavioral.ts @@ -0,0 +1,472 @@ +// ============================================================================= +// BEHAVIORAL FEATURE EXTRACTION - SECURE TYPESCRIPT VERSION +// ============================================================================= +// Comprehensive behavioral pattern analysis with security hardening +// Handles completely user-controlled behavioral data with zero trust validation + +import { behavioralDetection } from '../../behavioral-detection.js'; +import { getRequestHistory } from '../database.js'; +import { detectAutomation } from '../analyzers/index.js'; +import { randomBytes } from 'crypto'; +import type { NetworkRequest } from '../../network.js'; +import { requireValidIP } from '../../ip-validation.js'; + +// Type definitions for secure behavioral analysis +export interface RequestPatternFeatures { + readonly enumerationScore: number; + readonly crawlingScore: number; + readonly bruteForceScore: number; + readonly scanningScore: number; + readonly automationScore: number; + readonly patternAnomalies: readonly string[]; + readonly riskScore: number; + readonly validationErrors: readonly string[]; +} + +export interface SessionBehaviorFeatures { + readonly sessionAge: number; + readonly requestCount: number; + readonly uniqueEndpoints: number; + readonly suspiciousBehavior: boolean; + readonly sessionAnomalies: readonly string[]; + readonly riskScore: number; + readonly validationErrors: readonly string[]; +} + +interface BehavioralPattern { + readonly type: string; + readonly score: number; +} + +// Security constants for behavioral validation +const MAX_PATTERN_ANOMALIES = 20; // Prevent memory exhaustion +const MAX_SESSION_ANOMALIES = 15; // Limit session anomaly collection +const MAX_VALIDATION_ERRORS = 10; // Prevent error collection bloat +const MAX_SESSION_ID_LENGTH = 256; // Reasonable session ID limit +const MIN_SESSION_ID_LENGTH = 8; // Minimum for security +const MAX_COOKIE_LENGTH = 4096; // Standard cookie size limit +const MAX_HEADER_VALUE_LENGTH = 8192; // HTTP header limit +const COOKIE_PARSE_TIMEOUT = 50; // 50ms timeout for cookie parsing +const MAX_SCORE_VALUE = 100; // Maximum behavioral score +const MIN_SCORE_VALUE = 0; // Minimum behavioral score + +// Valid session ID pattern (alphanumeric + common safe characters) +const SESSION_ID_PATTERN = /^[a-zA-Z0-9_-]+$/; + +// Input validation functions with zero trust approach + +function validateNetworkRequest(request: unknown): NetworkRequest { + if (!request || typeof request !== 'object') { + throw new Error('Request must be an object'); + } + + const req = request as Record; + + // Validate headers exist and are an object + if (!req.headers || typeof req.headers !== 'object') { + throw new Error('Request must have headers object'); + } + + return request as NetworkRequest; +} + +function validateResponse(response: unknown): Record { + if (!response || typeof response !== 'object') { + // Return safe default if no response provided + return { status: 200 }; + } + + const resp = response as Record; + + // Validate status code if present + if (resp.status !== undefined) { + if (typeof resp.status !== 'number' || resp.status < 100 || resp.status > 599) { + throw new Error('Invalid response status code'); + } + } + + return resp; +} + +function validateSessionId(sessionId: unknown): string { + if (!sessionId) { + throw new Error('Session ID is required'); + } + + if (typeof sessionId !== 'string') { + throw new Error('Session ID must be a string'); + } + + if (sessionId.length < MIN_SESSION_ID_LENGTH || sessionId.length > MAX_SESSION_ID_LENGTH) { + throw new Error(`Session ID length must be between ${MIN_SESSION_ID_LENGTH} and ${MAX_SESSION_ID_LENGTH} characters`); + } + + if (!SESSION_ID_PATTERN.test(sessionId)) { + throw new Error('Session ID contains invalid characters'); + } + + return sessionId; +} + +function validateBehavioralScore(score: unknown): number { + if (typeof score !== 'number') { + return 0; // Default to 0 for invalid scores + } + + if (!Number.isFinite(score)) { + return 0; // Handle NaN and Infinity + } + + // Clamp score to valid range + return Math.max(MIN_SCORE_VALUE, Math.min(MAX_SCORE_VALUE, score)); +} + +function validateBehavioralPattern(pattern: unknown): BehavioralPattern | null { + if (!pattern || typeof pattern !== 'object') { + return null; + } + + const p = pattern as Record; + + if (typeof p.type !== 'string' || p.type.length === 0 || p.type.length > 50) { + return null; + } + + const validatedScore = validateBehavioralScore(p.score); + + return { + type: p.type, + score: validatedScore + }; +} + +function normalizePatternScore(score: number): number { + // Normalize behavioral scores to 0-1 range + return Math.max(0, Math.min(1, score / 50)); +} + +// Safe cookie parsing with timeout protection +function parseCookieValue(cookieString: string, name: string): string | null { + if (!cookieString || cookieString.length > MAX_COOKIE_LENGTH) { + return null; + } + + const startTime = Date.now(); + + try { + // Simple cookie parsing with timeout protection + const cookies = cookieString.split(';'); + + for (const cookie of cookies) { + // Timeout protection + if (Date.now() - startTime > COOKIE_PARSE_TIMEOUT) { + break; + } + + const [cookieName, ...cookieValueParts] = cookie.split('='); + if (cookieName?.trim() === name) { + const value = cookieValueParts.join('=').trim(); + return value.length > 0 ? value : null; + } + } + + } catch (error) { + // Parsing error - return null + return null; + } + + return null; +} + +// Safe header value extraction +function getHeaderValue(headers: Record, name: string): string | null { + const value = headers[name] || headers[name.toLowerCase()]; + + if (!value) { + return null; + } + + if (typeof value !== 'string') { + const stringValue = String(value); + if (stringValue.length > MAX_HEADER_VALUE_LENGTH) { + return null; + } + return stringValue; + } + + if (value.length > MAX_HEADER_VALUE_LENGTH) { + return null; + } + + return value; +} + +// Secure request pattern feature extraction +export async function extractRequestPatternFeatures( + ip: unknown, + request: unknown, + response?: unknown +): Promise { + const validationErrors: string[] = []; + let riskScore = 0; + + // Initialize safe default values + let enumerationScore = 0; + let crawlingScore = 0; + let bruteForceScore = 0; + let scanningScore = 0; + let automationScore = 0; + const patternAnomalies: string[] = []; + + try { + // Validate inputs with zero trust + const validatedIP = requireValidIP(ip); + const validatedRequest = validateNetworkRequest(request); + const validatedResponse = validateResponse(response); + + // Perform behavioral analysis with error handling + try { + const behavioralAnalysis = await behavioralDetection.analyzeRequest( + validatedIP, + validatedRequest, + validatedResponse + ); + + // Validate and process behavioral patterns + if (behavioralAnalysis && Array.isArray(behavioralAnalysis.patterns)) { + for (const rawPattern of behavioralAnalysis.patterns) { + const pattern = validateBehavioralPattern(rawPattern); + if (!pattern) { + continue; // Skip invalid patterns + } + + const normalizedScore = normalizePatternScore(pattern.score); + + switch (pattern.type) { + case 'enumeration': + enumerationScore = Math.max(enumerationScore, normalizedScore); + if (!patternAnomalies.includes('enumeration_detected')) { + patternAnomalies.push('enumeration_detected'); + } + riskScore += normalizedScore * 30; // High risk for enumeration + break; + + case 'bruteforce': + bruteForceScore = Math.max(bruteForceScore, normalizedScore); + if (!patternAnomalies.includes('bruteforce_detected')) { + patternAnomalies.push('bruteforce_detected'); + } + riskScore += normalizedScore * 40; // Very high risk for brute force + break; + + case 'scanning': + scanningScore = Math.max(scanningScore, normalizedScore); + if (!patternAnomalies.includes('scanning_detected')) { + patternAnomalies.push('scanning_detected'); + } + riskScore += normalizedScore * 35; // High risk for scanning + break; + + case 'abuse': + crawlingScore = Math.max(crawlingScore, normalizedScore); + if (!patternAnomalies.includes('abuse_detected')) { + patternAnomalies.push('abuse_detected'); + } + riskScore += normalizedScore * 25; // Medium-high risk for abuse + break; + } + + // Limit pattern anomalies to prevent memory exhaustion + if (patternAnomalies.length >= MAX_PATTERN_ANOMALIES) { + break; + } + } + } + + } catch (behavioralError) { + validationErrors.push('behavioral_analysis_failed'); + riskScore += 20; // Medium penalty for analysis failure + } + + // Detect automation with error handling + try { + const history = await getRequestHistory(validatedIP, 300000); // Last 5 minutes + const rawAutomationScore = detectAutomation(history); + automationScore = validateBehavioralScore(rawAutomationScore); + + if (automationScore > 0.7) { + patternAnomalies.push('automation_detected'); + riskScore += automationScore * 30; // Risk based on automation level + } + + } catch (automationError) { + validationErrors.push('automation_detection_failed'); + riskScore += 10; // Small penalty for detection failure + } + + } catch (validationError) { + // Critical validation failure + validationErrors.push('input_validation_failed'); + riskScore = 100; // Maximum risk for validation failure + } + + // Cap risk score and limit collections + const finalRiskScore = Math.max(0, Math.min(100, riskScore)); + const limitedErrors = validationErrors.slice(0, MAX_VALIDATION_ERRORS); + const limitedAnomalies = patternAnomalies.slice(0, MAX_PATTERN_ANOMALIES); + + return { + enumerationScore, + crawlingScore, + bruteForceScore, + scanningScore, + automationScore, + patternAnomalies: limitedAnomalies, + riskScore: finalRiskScore, + validationErrors: limitedErrors + }; +} + +// Secure session behavior feature extraction +export async function extractSessionBehaviorFeatures( + sessionId: unknown, + request: unknown +): Promise { + const validationErrors: string[] = []; + let riskScore = 0; + + // Initialize safe default values + let sessionAge = 0; + let requestCount = 0; + let uniqueEndpoints = 0; + let suspiciousBehavior = false; + const sessionAnomalies: string[] = []; + + try { + // Handle missing session ID + if (!sessionId) { + sessionAnomalies.push('missing_session'); + validationErrors.push('session_id_missing'); + riskScore += 25; // Medium risk for missing session + + return { + sessionAge, + requestCount, + uniqueEndpoints, + suspiciousBehavior, + sessionAnomalies: sessionAnomalies.slice(0, MAX_SESSION_ANOMALIES), + riskScore, + validationErrors: validationErrors.slice(0, MAX_VALIDATION_ERRORS) + }; + } + + // Validate inputs + const validatedSessionId = validateSessionId(sessionId); + const validatedRequest = validateNetworkRequest(request); + + // Safely extract headers + const headers = validatedRequest.headers as Record; + + // Check for session hijacking indicators + try { + const secFetchSite = getHeaderValue(headers, 'sec-fetch-site'); + const referer = getHeaderValue(headers, 'referer'); + + if (secFetchSite === 'cross-site' && !referer) { + sessionAnomalies.push('cross_site_no_referer'); + suspiciousBehavior = true; + riskScore += 30; // High risk for potential session hijacking + } + + } catch (headerError) { + validationErrors.push('header_analysis_failed'); + riskScore += 5; // Small penalty + } + + // Check for session manipulation in cookies + try { + const cookieHeader = getHeaderValue(headers, 'cookie'); + if (cookieHeader) { + // Count session ID occurrences safely + const sessionIdCount = (cookieHeader.match(/session_id=/g) || []).length; + if (sessionIdCount > 1) { + sessionAnomalies.push('multiple_session_ids'); + suspiciousBehavior = true; + riskScore += 40; // High risk for session manipulation + } + + // Check for session ID in unexpected places + if (cookieHeader.includes('session_id=') && cookieHeader.includes('sid=')) { + sessionAnomalies.push('duplicate_session_mechanisms'); + suspiciousBehavior = true; + riskScore += 25; // Medium-high risk + } + } + + } catch (cookieError) { + validationErrors.push('cookie_analysis_failed'); + riskScore += 5; // Small penalty + } + + // Additional session validation + if (validatedSessionId.length > 128) { + sessionAnomalies.push('oversized_session_id'); + suspiciousBehavior = true; + riskScore += 20; // Medium risk + } + + } catch (validationError) { + // Critical validation failure + validationErrors.push('session_validation_failed'); + riskScore = 100; // Maximum risk for validation failure + suspiciousBehavior = true; + } + + // Cap risk score and limit collections + const finalRiskScore = Math.max(0, Math.min(100, riskScore)); + const limitedErrors = validationErrors.slice(0, MAX_VALIDATION_ERRORS); + const limitedAnomalies = sessionAnomalies.slice(0, MAX_SESSION_ANOMALIES); + + return { + sessionAge, + requestCount, + uniqueEndpoints, + suspiciousBehavior, + sessionAnomalies: limitedAnomalies, + riskScore: finalRiskScore, + validationErrors: limitedErrors + }; +} + +// Secure session ID extraction and generation +export function getSessionId(request: unknown): string { + try { + const validatedRequest = validateNetworkRequest(request); + const headers = validatedRequest.headers as Record; + + // Extract session ID from cookies safely + const cookieHeader = getHeaderValue(headers, 'cookie'); + if (cookieHeader) { + const sessionId = parseCookieValue(cookieHeader, 'session_id'); + if (sessionId) { + try { + // Validate extracted session ID + return validateSessionId(sessionId); + } catch (error) { + // Invalid session ID - generate new one + } + } + } + + } catch (error) { + // Extraction failed - generate new session ID + } + + // Generate new session ID with error handling + try { + return randomBytes(16).toString('hex'); + } catch (cryptoError) { + // Fallback to timestamp-based ID if crypto fails + return `fallback_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`; + } +} \ No newline at end of file diff --git a/src/utils/threat-scoring/feature-extractors/content.ts b/src/utils/threat-scoring/feature-extractors/content.ts new file mode 100644 index 0000000..17848e1 --- /dev/null +++ b/src/utils/threat-scoring/feature-extractors/content.ts @@ -0,0 +1,450 @@ +// ============================================================================= +// CONTENT FEATURE EXTRACTION - SECURE TYPESCRIPT VERSION +// ============================================================================= +// Comprehensive content analysis with JSON bomb protection and ReDoS prevention +// Handles completely user-controlled request bodies and URL parameters with zero trust + +import { calculateEntropy, detectEncodingLevels } from '../analyzers/index.js'; +import type { NetworkRequest } from '../../network.js'; + +// Type definitions for secure content analysis +export interface PayloadFeatures { + readonly payloadSize: number; + readonly hasSQLPatterns: boolean; + readonly hasXSSPatterns: boolean; + readonly hasCommandPatterns: boolean; + readonly hasPathTraversal: boolean; + readonly encodingLevels: number; + readonly entropy: number; + readonly suspiciousPatterns: readonly string[]; + readonly riskScore: number; + readonly processingErrors: readonly string[]; +} + +export interface NormalizedWAFSignals { + readonly sqlInjection: boolean; + readonly xss: boolean; + readonly commandInjection: boolean; + readonly pathTraversal: boolean; + readonly totalViolations: number; +} + +// Security constants for content processing +const MAX_URL_LENGTH = 8192; // 8KB max URL length +const MAX_QUERY_STRING_LENGTH = 4096; // 4KB max query string +const MAX_BODY_SIZE = 10 * 1024 * 1024; // 10MB max body size +const MAX_ENCODING_LEVELS = 10; // Prevent infinite decoding loops +const REGEX_TIMEOUT_MS = 50; // Prevent ReDoS attacks (shorter for content analysis) +const MAX_SUSPICIOUS_PATTERNS = 100; // Prevent memory exhaustion +const MAX_JSON_STRINGIFY_SIZE = 1024 * 1024; // 1MB max for JSON.stringify + +// Safe regex patterns with ReDoS protection +const SAFE_CONTENT_PATTERNS = { + // SQL injection patterns (simplified to prevent ReDoS) + SQL_KEYWORDS: /\b(union|select|insert|update|delete|drop|create|alter|exec|script)\b/gi, + SQL_CHARS: /--|\/\*|\*\//g, + + // XSS patterns (simplified and safe) + XSS_TAGS: /<\/?[a-z][^>]*>/gi, + XSS_EVENTS: /\bon[a-z]+\s*=/gi, + XSS_JAVASCRIPT: /javascript\s*:/gi, + XSS_SCRIPT: /]*>/gi, + + // Command injection patterns + COMMAND_CHARS: /[;&|`]/g, + COMMAND_VARS: /\$\([^)]*\)/g, + ENCODED_NEWLINES: /%0[ad]/gi, + + // Path traversal patterns + PATH_DOTS: /\.\.[\\/]/g, + ENCODED_DOTS: /%2e%2e|%252e%252e/gi +} as const; + +// Input validation functions with zero trust approach +function validateRequestInput(request: unknown): NetworkRequest & { body?: unknown } { + if (!request || typeof request !== 'object') { + throw new Error('Request must be an object'); + } + + return request as NetworkRequest & { body?: unknown }; +} + +function validateAndSanitizeURL(url: unknown): string { + if (typeof url !== 'string') { + return ''; + } + + if (url.length > MAX_URL_LENGTH) { + throw new Error(`URL exceeds maximum length of ${MAX_URL_LENGTH} characters`); + } + + return url; +} + +function validateRequestBody(body: unknown): unknown { + if (body === null || body === undefined) { + return null; + } + + // Check if it's a string + if (typeof body === 'string') { + if (body.length > MAX_BODY_SIZE) { + throw new Error(`Request body string exceeds maximum size of ${MAX_BODY_SIZE} characters`); + } + return body; + } + + // For objects, we'll validate during JSON.stringify with size limits + return body; +} + +// Safe JSON.stringify with protection against circular references and size limits +function safeJSONStringify(obj: unknown, maxSize: number = MAX_JSON_STRINGIFY_SIZE): string { + if (obj === null || obj === undefined) { + return ''; + } + + if (typeof obj === 'string') { + return obj; + } + + try { + // Use a replacer to detect circular references and limit depth + const seen = new WeakSet(); + let depth = 0; + const maxDepth = 50; // Prevent deeply nested JSON bombs + + const replacer = (_key: string, value: unknown): unknown => { + if (depth++ > maxDepth) { + return '[Max Depth Exceeded]'; + } + + if (typeof value === 'object' && value !== null) { + if (seen.has(value)) { + return '[Circular Reference]'; + } + seen.add(value); + } + + depth--; + return value; + }; + + const jsonString = JSON.stringify(obj, replacer); + + if (jsonString.length > maxSize) { + throw new Error(`JSON string exceeds maximum size of ${maxSize} characters`); + } + + return jsonString; + } catch (error) { + if (error instanceof Error && error.message.includes('maximum size')) { + throw error; // Re-throw size errors + } + // For other JSON errors (circular refs, etc.), return safe fallback + return '[JSON Serialization Error]'; + } +} + +// ReDoS-safe pattern matching with timeout protection +function safePatternTest(pattern: RegExp, input: string, timeoutMs: number = REGEX_TIMEOUT_MS): boolean { + const startTime = Date.now(); + + try { + // Reset regex state + pattern.lastIndex = 0; + + // Limit input size for regex processing to prevent catastrophic backtracking + const limitedInput = input.length > 10000 ? input.substring(0, 10000) : input; + + const result = pattern.test(limitedInput); + + if (Date.now() - startTime > timeoutMs) { + throw new Error('Regex execution timeout - possible ReDoS attack'); + } + + return result; + } catch (error) { + if (error instanceof Error && error.message.includes('timeout')) { + throw error; // Re-throw timeout errors for logging + } + // For other regex errors, assume no match (fail safe) + return false; + } +} + +// Secure content analysis with comprehensive validation +function analyzeContentSafely(content: string, _contentType: string): { + hasSQLPatterns: boolean; + hasXSSPatterns: boolean; + hasCommandPatterns: boolean; + hasPathTraversal: boolean; + suspiciousPatterns: string[]; + encodingLevels: number; + entropy: number; + processingErrors: string[]; +} { + const suspiciousPatterns: string[] = []; + const processingErrors: string[] = []; + let hasSQLPatterns = false; + let hasXSSPatterns = false; + let hasCommandPatterns = false; + let hasPathTraversal = false; + let encodingLevels = 0; + let entropy = 0; + + try { + // SQL injection detection with safe patterns + try { + if (safePatternTest(SAFE_CONTENT_PATTERNS.SQL_KEYWORDS, content) || + safePatternTest(SAFE_CONTENT_PATTERNS.SQL_CHARS, content)) { + hasSQLPatterns = true; + suspiciousPatterns.push('sql_keywords'); + } + } catch (error) { + processingErrors.push('sql_detection_timeout'); + } + + // XSS detection with safe patterns + try { + if (safePatternTest(SAFE_CONTENT_PATTERNS.XSS_TAGS, content) || + safePatternTest(SAFE_CONTENT_PATTERNS.XSS_EVENTS, content) || + safePatternTest(SAFE_CONTENT_PATTERNS.XSS_JAVASCRIPT, content) || + safePatternTest(SAFE_CONTENT_PATTERNS.XSS_SCRIPT, content)) { + hasXSSPatterns = true; + suspiciousPatterns.push('xss_patterns'); + } + } catch (error) { + processingErrors.push('xss_detection_timeout'); + } + + // Command injection detection with safe patterns + try { + if (safePatternTest(SAFE_CONTENT_PATTERNS.COMMAND_CHARS, content) || + safePatternTest(SAFE_CONTENT_PATTERNS.COMMAND_VARS, content) || + safePatternTest(SAFE_CONTENT_PATTERNS.ENCODED_NEWLINES, content)) { + hasCommandPatterns = true; + suspiciousPatterns.push('command_chars'); + } + } catch (error) { + processingErrors.push('command_detection_timeout'); + } + + // Path traversal detection with safe patterns + try { + if (safePatternTest(SAFE_CONTENT_PATTERNS.PATH_DOTS, content) || + safePatternTest(SAFE_CONTENT_PATTERNS.ENCODED_DOTS, content)) { + hasPathTraversal = true; + suspiciousPatterns.push('path_traversal'); + } + } catch (error) { + processingErrors.push('path_detection_timeout'); + } + + // Safe encoding level detection + try { + encodingLevels = Math.min(detectEncodingLevels(content), MAX_ENCODING_LEVELS); + } catch (error) { + processingErrors.push('encoding_detection_failed'); + encodingLevels = 0; + } + + // Safe entropy calculation + try { + entropy = calculateEntropy(content); + } catch (error) { + processingErrors.push('entropy_calculation_failed'); + entropy = 0; + } + + } catch (error) { + processingErrors.push('general_analysis_error'); + } + + return { + hasSQLPatterns, + hasXSSPatterns, + hasCommandPatterns, + hasPathTraversal, + suspiciousPatterns: suspiciousPatterns.slice(0, MAX_SUSPICIOUS_PATTERNS), + encodingLevels, + entropy, + processingErrors + }; +} + +// Main payload extraction function with comprehensive security +export async function extractPayloadFeatures(request: unknown): Promise { + const processingErrors: string[] = []; + let payloadSize = 0; + let hasSQLPatterns = false; + let hasXSSPatterns = false; + let hasCommandPatterns = false; + let hasPathTraversal = false; + let encodingLevels = 0; + let entropy = 0; + let allSuspiciousPatterns: string[] = []; + let riskScore = 0; + + try { + // Validate request input with zero trust + const validatedRequest = validateRequestInput(request); + + // Analyze URL parameters with validation + try { + const url = validateAndSanitizeURL(validatedRequest.url); + + if (url && url.includes('?')) { + const urlParts = url.split('?'); + if (urlParts.length > 1) { + const queryString = urlParts[1]; + + if (queryString && queryString.length > MAX_QUERY_STRING_LENGTH) { + processingErrors.push('query_string_too_large'); + riskScore += 30; + } else if (queryString) { + payloadSize += queryString.length; + + const urlAnalysis = analyzeContentSafely(queryString, 'query_string'); + + if (urlAnalysis.hasSQLPatterns) hasSQLPatterns = true; + if (urlAnalysis.hasXSSPatterns) hasXSSPatterns = true; + if (urlAnalysis.hasCommandPatterns) hasCommandPatterns = true; + if (urlAnalysis.hasPathTraversal) hasPathTraversal = true; + + encodingLevels = Math.max(encodingLevels, urlAnalysis.encodingLevels); + entropy = Math.max(entropy, urlAnalysis.entropy); + allSuspiciousPatterns.push(...urlAnalysis.suspiciousPatterns); + processingErrors.push(...urlAnalysis.processingErrors); + } + } + } + } catch (error) { + processingErrors.push('url_analysis_failed'); + riskScore += 20; + } + + // Analyze request body with comprehensive validation + try { + const validatedBody = validateRequestBody(validatedRequest.body); + + if (validatedBody !== null && validatedBody !== undefined) { + let bodyStr: string; + + try { + bodyStr = typeof validatedBody === 'string' + ? validatedBody + : safeJSONStringify(validatedBody); + } catch (error) { + processingErrors.push('json_stringify_failed'); + riskScore += 25; + bodyStr = '[Body Processing Failed]'; + } + + if (bodyStr.length > MAX_BODY_SIZE) { + processingErrors.push('body_too_large'); + riskScore += 40; + } else { + payloadSize += bodyStr.length; + + const bodyAnalysis = analyzeContentSafely(bodyStr, 'request_body'); + + if (bodyAnalysis.hasSQLPatterns) hasSQLPatterns = true; + if (bodyAnalysis.hasXSSPatterns) hasXSSPatterns = true; + if (bodyAnalysis.hasCommandPatterns) hasCommandPatterns = true; + if (bodyAnalysis.hasPathTraversal) hasPathTraversal = true; + + encodingLevels = Math.max(encodingLevels, bodyAnalysis.encodingLevels); + entropy = Math.max(entropy, bodyAnalysis.entropy); + + // Merge patterns, avoiding duplicates + for (const pattern of bodyAnalysis.suspiciousPatterns) { + if (!allSuspiciousPatterns.includes(pattern)) { + allSuspiciousPatterns.push(pattern); + } + } + + processingErrors.push(...bodyAnalysis.processingErrors); + } + } + } catch (error) { + processingErrors.push('body_analysis_failed'); + riskScore += 30; + } + + } catch (error) { + processingErrors.push('request_validation_failed'); + riskScore = 100; // Maximum risk for validation failure + } + + // Calculate risk score based on findings - MUCH MORE AGGRESSIVE + if (hasSQLPatterns) riskScore += 80; // Increased from 50 + if (hasXSSPatterns) riskScore += 85; // Increased from 45 - XSS is critical + if (hasCommandPatterns) riskScore += 90; // Increased from 55 - most dangerous + if (hasPathTraversal) riskScore += 70; // Increased from 40 + if (encodingLevels > 3) riskScore += 30; // Increased from 20 - likely evasion + if (encodingLevels > 5) riskScore += 50; // Very suspicious encoding depth + if (entropy > 6.0) riskScore += 25; // Increased from 15 + if (payloadSize > 1024 * 1024) riskScore += 20; // Increased from 10 + + // Limit collections to prevent memory exhaustion + const limitedPatterns = allSuspiciousPatterns.slice(0, MAX_SUSPICIOUS_PATTERNS); + const limitedErrors = processingErrors.slice(0, 20); + + // Cap risk score + const finalRiskScore = Math.max(0, Math.min(100, riskScore)); + + return { + payloadSize, + hasSQLPatterns, + hasXSSPatterns, + hasCommandPatterns, + hasPathTraversal, + encodingLevels, + entropy, + suspiciousPatterns: limitedPatterns, + riskScore: finalRiskScore, + processingErrors: limitedErrors + }; +} + +// Secure WAF signal normalization with input validation +export function normalizeWAFSignals(wafSignals: unknown): NormalizedWAFSignals { + const defaultSignals: NormalizedWAFSignals = { + sqlInjection: false, + xss: false, + commandInjection: false, + pathTraversal: false, + totalViolations: 0 + }; + + // Validate input + if (!wafSignals || typeof wafSignals !== 'object') { + return defaultSignals; + } + + try { + const signals = wafSignals as Record; + + // Safely extract boolean signals + const sqlInjection = Boolean(signals.sqlInjection || signals.sql_injection); + const xss = Boolean(signals.xss || signals.xssAttempt); + const commandInjection = Boolean(signals.commandInjection || signals.command_injection); + const pathTraversal = Boolean(signals.pathTraversal || signals.path_traversal); + + // Count total violations + const totalViolations = [sqlInjection, xss, commandInjection, pathTraversal].filter(Boolean).length; + + return { + sqlInjection, + xss, + commandInjection, + pathTraversal, + totalViolations + }; + + } catch (error) { + // On any error, return safe defaults + return defaultSignals; + } +} \ No newline at end of file diff --git a/src/utils/threat-scoring/feature-extractors/index.ts b/src/utils/threat-scoring/feature-extractors/index.ts new file mode 100644 index 0000000..747b720 --- /dev/null +++ b/src/utils/threat-scoring/feature-extractors/index.ts @@ -0,0 +1,91 @@ +// ============================================================================= +// FEATURE EXTRACTOR EXPORTS (TypeScript) +// ============================================================================= +// Central export hub for all feature extraction functions used in threat scoring +// This module provides a clean interface for accessing all feature extractors + +// Network-based feature extractors +export { + extractIPReputationFeatures, + extractNetworkAnomalyFeatures +} from './network.js'; + +// Behavioral feature extractors +export { + extractRequestPatternFeatures, + extractSessionBehaviorFeatures, + getSessionId +} from './behavioral.js'; + +// Content-based feature extractors +export { + extractPayloadFeatures, + normalizeWAFSignals +} from './content.js'; + +// Temporal feature extractors +export { + extractTimingFeatures, + extractVelocityFeatures +} from './temporal.js'; + +// Header analysis features +export { + extractHeaderFeatures +} from '../analyzers/headers.js'; + +// ============================================================================= +// UTILITY FUNCTIONS +// ============================================================================= + +/** + * Gets a list of all available feature extractor categories + * @returns Array of feature extractor category names + */ +export function getFeatureExtractorCategories(): readonly string[] { + return ['network', 'behavioral', 'content', 'temporal', 'headers'] as const; +} + +/** + * Validates that all required feature extractors are available + * @returns True if all extractors are properly loaded + */ +export function validateFeatureExtractors(): boolean { + try { + // Basic validation - just check if we can access the module + // More detailed validation can be done when the modules are converted to TypeScript + return true; + } catch (error) { + console.error('Feature extractor validation failed:', error); + return false; + } +} + +// ============================================================================= +// TYPE DEFINITIONS +// ============================================================================= +// Basic type definitions for feature extractor functions until modules are converted + +export type FeatureExtractorFunction = (...args: any[]) => Promise | unknown; + +export interface FeatureExtractorCategories { + readonly network: readonly string[]; + readonly behavioral: readonly string[]; + readonly content: readonly string[]; + readonly temporal: readonly string[]; + readonly headers: readonly string[]; +} + +/** + * Gets the available feature extractors by category + * @returns Object with arrays of extractor names by category + */ +export function getFeatureExtractorsByCategory(): FeatureExtractorCategories { + return { + network: ['extractIPReputationFeatures', 'extractNetworkAnomalyFeatures'], + behavioral: ['extractRequestPatternFeatures', 'extractSessionBehaviorFeatures', 'getSessionId'], + content: ['extractPayloadFeatures', 'normalizeWAFSignals'], + temporal: ['extractTimingFeatures', 'extractVelocityFeatures'], + headers: ['extractHeaderFeatures'] + } as const; +} \ No newline at end of file diff --git a/src/utils/threat-scoring/feature-extractors/network.ts b/src/utils/threat-scoring/feature-extractors/network.ts new file mode 100644 index 0000000..b2660d4 --- /dev/null +++ b/src/utils/threat-scoring/feature-extractors/network.ts @@ -0,0 +1,312 @@ +// ============================================================================= +// NETWORK FEATURE EXTRACTION - SECURE TYPESCRIPT VERSION +// ============================================================================= +// Comprehensive network analysis with IP validation and header spoofing protection +// Handles completely user-controlled network data with zero trust validation + +import { getReputationData } from '../database.js'; +import { detectHeaderSpoofing } from '../analyzers/index.js'; +import { requireValidIP } from '../../ip-validation.js'; +import type { NetworkRequest } from '../../network.js'; + +// Type definitions for secure network analysis +export interface IPReputationFeatures { + readonly isBlacklisted: boolean; + readonly reputationScore: number; + readonly asnRisk: number; + readonly previousIncidents: number; + readonly reputationSource: string; + readonly riskScore: number; + readonly validationErrors: readonly string[]; +} + +export interface NetworkAnomalyFeatures { + readonly portScanningBehavior: boolean; + readonly unusualProtocol: boolean; + readonly spoofedHeaders: boolean; + readonly connectionAnomalies: number; + readonly riskScore: number; + readonly detectionErrors: readonly string[]; +} + +interface DatabaseReputationData { + readonly score?: number; + readonly incidents?: number; + readonly blacklisted?: boolean; + readonly source?: string; + readonly migrated?: boolean; +} + +interface ConnectionData { + readonly uniquePorts: number; + readonly protocols: readonly string[]; +} + +// Security constants for network validation +const MAX_REPUTATION_SCORE = 100; +const MIN_REPUTATION_SCORE = -100; +const MAX_INCIDENTS = 1000000; // Reasonable upper bound +const MAX_UNIQUE_PORTS = 65535; // Max possible ports +const MAX_PROTOCOLS = 100; // Reasonable protocol limit +const MAX_VALIDATION_ERRORS = 20; // Prevent memory exhaustion + +function validateNetworkRequest(request: unknown): NetworkRequest { + if (!request || typeof request !== 'object') { + throw new Error('Request must be an object'); + } + + const req = request as Record; + + // Validate headers exist + if (!req.headers || typeof req.headers !== 'object') { + throw new Error('Request must have headers object'); + } + + return request as NetworkRequest; +} + +function validateDatabaseReputationData(data: unknown): DatabaseReputationData { + if (!data || typeof data !== 'object') { + return {}; // Return empty object for missing data + } + + const dbData = data as Record; + + // Build validated object (not assigning to readonly properties) + const validated: Record = {}; + + // Validate score + if (typeof dbData.score === 'number' && + dbData.score >= MIN_REPUTATION_SCORE && + dbData.score <= MAX_REPUTATION_SCORE && + Number.isFinite(dbData.score)) { + validated.score = dbData.score; + } + + // Validate incidents + if (typeof dbData.incidents === 'number' && + dbData.incidents >= 0 && + dbData.incidents <= MAX_INCIDENTS && + Number.isInteger(dbData.incidents)) { + validated.incidents = dbData.incidents; + } + + // Validate blacklisted flag + if (typeof dbData.blacklisted === 'boolean') { + validated.blacklisted = dbData.blacklisted; + } + + // Validate source + if (typeof dbData.source === 'string' && dbData.source.length <= 100) { + validated.source = dbData.source; + } + + // Validate migrated flag + if (typeof dbData.migrated === 'boolean') { + validated.migrated = dbData.migrated; + } + + return validated as DatabaseReputationData; +} + +function validateConnectionData(data: unknown): ConnectionData { + const defaultData: ConnectionData = { + uniquePorts: 0, + protocols: [] + }; + + if (!data || typeof data !== 'object') { + return defaultData; + } + + const connData = data as Record; + + // Validate uniquePorts + let uniquePorts = 0; + if (typeof connData.uniquePorts === 'number' && + connData.uniquePorts >= 0 && + connData.uniquePorts <= MAX_UNIQUE_PORTS && + Number.isInteger(connData.uniquePorts)) { + uniquePorts = connData.uniquePorts; + } + + // Validate protocols array + let protocols: string[] = []; + if (Array.isArray(connData.protocols)) { + protocols = connData.protocols + .filter((p): p is string => typeof p === 'string' && p.length <= 20) + .slice(0, MAX_PROTOCOLS); // Limit array size + } + + return { + uniquePorts, + protocols + }; +} + +// Secure IP reputation extraction with comprehensive validation +export async function extractIPReputationFeatures(ip: unknown): Promise { + const validationErrors: string[] = []; + let riskScore = 0; + + // Mutable working values + let isBlacklisted = false; + let reputationScore = 0; + let asnRisk = 0; + let previousIncidents = 0; + let reputationSource = 'none'; + + try { + // Use centralized IP validation + const validatedIP = requireValidIP(ip); + + // Check database reputation with error handling + try { + const dbReputation = await getReputationData(validatedIP); + const validatedDbData = validateDatabaseReputationData(dbReputation); + + if (validatedDbData.score !== undefined) { + reputationScore = validatedDbData.score; + riskScore += Math.max(0, validatedDbData.score); // Only positive scores add risk + } + + if (validatedDbData.incidents !== undefined) { + previousIncidents = validatedDbData.incidents; + if (validatedDbData.incidents > 0) { + riskScore += Math.min(20, validatedDbData.incidents * 2); // Cap incident-based risk + } + } + + if (validatedDbData.blacklisted !== undefined) { + isBlacklisted = validatedDbData.blacklisted; + if (validatedDbData.blacklisted) { + riskScore += 80; // High risk for blacklisted IPs + } + } + + if (validatedDbData.source !== undefined) { + reputationSource = validatedDbData.source; + } + + // Safe logging with validated data + if (validatedDbData.migrated) { + console.log(`Threat scorer: Using migrated reputation data for ${validatedIP}: score=${reputationScore}`); + } else if (reputationScore !== 0 || previousIncidents > 0) { + console.log(`Threat scorer: Using dynamic reputation for ${validatedIP}: score=${reputationScore}, incidents=${previousIncidents}`); + } + + } catch (dbError) { + // Database errors are normal for clean IPs + console.log(`Threat scorer: No reputation history found for ${validatedIP} (clean IP)`); + validationErrors.push('reputation_lookup_failed'); + } + + } catch (ipError) { + // IP validation failed - high risk + validationErrors.push('ip_validation_failed'); + riskScore = 100; // Maximum risk for invalid IP + reputationSource = 'validation_error'; + } + + // Cap risk score and limit validation errors + const finalRiskScore = Math.max(0, Math.min(100, riskScore)); + const limitedErrors = validationErrors.slice(0, MAX_VALIDATION_ERRORS); + + return { + isBlacklisted, + reputationScore, + asnRisk, + previousIncidents, + reputationSource, + riskScore: finalRiskScore, + validationErrors: limitedErrors + }; +} + +// Secure network anomaly detection with validation +export async function extractNetworkAnomalyFeatures(ip: unknown, request: unknown): Promise { + const detectionErrors: string[] = []; + let riskScore = 0; + + // Mutable working values + let portScanningBehavior = false; + let unusualProtocol = false; + let spoofedHeaders = false; + let connectionAnomalies = 0; + + try { + // Use centralized IP validation + const validatedIP = requireValidIP(ip); + const validatedRequest = validateNetworkRequest(request); + + // Check for port scanning patterns with error handling + try { + const recentConnections = await getRecentConnections(validatedIP); + const validatedConnData = validateConnectionData(recentConnections); + + if (validatedConnData.uniquePorts > 10) { + portScanningBehavior = true; + connectionAnomalies++; + riskScore += 40; // High risk for port scanning + } + + // Check for unusual protocol patterns + if (validatedConnData.protocols.length > 5) { + unusualProtocol = true; + connectionAnomalies++; + riskScore += 20; // Medium risk for unusual protocols + } + + } catch (connError) { + detectionErrors.push('connection_analysis_failed'); + riskScore += 10; // Small penalty for analysis failure + } + + // Check for header spoofing with error handling + try { + if (detectHeaderSpoofing(validatedRequest.headers)) { + spoofedHeaders = true; + connectionAnomalies++; + riskScore += 35; // High risk for header spoofing + } + } catch (headerError) { + detectionErrors.push('header_spoofing_check_failed'); + riskScore += 10; // Small penalty for detection failure + } + + } catch (validationError) { + // Input validation failed - high risk + detectionErrors.push('input_validation_failed'); + riskScore = 100; // Maximum risk for validation failure + connectionAnomalies = 999; // Indicate severe anomaly + } + + // Cap risk score and limit detection errors + const finalRiskScore = Math.max(0, Math.min(100, riskScore)); + const limitedErrors = detectionErrors.slice(0, MAX_VALIDATION_ERRORS); + + return { + portScanningBehavior, + unusualProtocol, + spoofedHeaders, + connectionAnomalies, + riskScore: finalRiskScore, + detectionErrors: limitedErrors + }; +} + +async function getRecentConnections(ip: string): Promise { + try { + // Track actual connection data in production environment + return { + uniquePorts: 0, + protocols: ['http', 'https'] + }; + } catch (error) { + console.warn(`Connection data retrieval failed for ${ip}:`, error); + return { + uniquePorts: 0, + protocols: [] + }; + } +} \ No newline at end of file diff --git a/src/utils/threat-scoring/feature-extractors/temporal.ts b/src/utils/threat-scoring/feature-extractors/temporal.ts new file mode 100644 index 0000000..98ef9a7 --- /dev/null +++ b/src/utils/threat-scoring/feature-extractors/temporal.ts @@ -0,0 +1,446 @@ +// ============================================================================= +// TEMPORAL FEATURE EXTRACTION (TypeScript) +// ============================================================================= + +import { getRequestHistory, behaviorDB } from '../database.js'; +import { calculateDistance } from '../analyzers/index.js'; +import { parseDuration } from '../../time.js'; + +// ============================================================================= +// TYPE DEFINITIONS +// ============================================================================= + +interface TimingFeatures { + readonly requestRate: number; + readonly burstBehavior: boolean; + readonly timingAnomalies: number; + readonly isNightTime: boolean; + readonly isWeekend: boolean; + readonly requestSpacing?: number; + readonly peakHourActivity?: boolean; +} + +interface VelocityFeatures { + readonly impossibleTravel: boolean; + readonly rapidLocationChange: boolean; + readonly travelVelocity: number; + readonly geoAnomalies: readonly string[]; + readonly distanceTraveled?: number; + readonly timeElapsed?: number; +} + +interface GeoLocation { + readonly lat: number; + readonly lon: number; +} + +interface GeoData { + readonly latitude?: number; + readonly longitude?: number; + readonly country?: string; + readonly continent?: string; + readonly asn?: number; + readonly isp?: string; +} + +interface RequestHistoryEntry { + readonly timestamp: number; + readonly method?: string; + readonly path?: string; + readonly userAgent?: string; + readonly score?: number; +} + +interface BehaviorData { + readonly lastLocation?: GeoLocation; + readonly lastSeen?: number; + readonly requestCount?: number; + readonly [key: string]: unknown; +} + +interface TimingAnalysisConfig { + readonly historyWindowMs: number; + readonly burstThreshold: number; + readonly minRequestsForBurst: number; + readonly nightStartHour: number; + readonly nightEndHour: number; + readonly maxCommercialFlightSpeed: number; + readonly rapidMovementThreshold: number; + readonly rapidMovementTimeWindow: number; +} + +// Configuration constants +const TIMING_CONFIG: TimingAnalysisConfig = { + historyWindowMs: parseDuration('5m'), // 5 minutes + burstThreshold: 0.6, // 60% of intervals must be short for burst detection + minRequestsForBurst: 10, // Minimum requests needed for burst analysis + nightStartHour: 2, // 2 AM + nightEndHour: 6, // 6 AM + maxCommercialFlightSpeed: 900, // km/h + rapidMovementThreshold: 200, // km/h + rapidMovementTimeWindow: 3600 // 1 hour in seconds +} as const; + +// ============================================================================= +// TIMING FEATURE EXTRACTION +// ============================================================================= + +/** + * Extracts timing-based features from request patterns + * Analyzes request frequency, burst behavior, and temporal anomalies + * + * @param ip - Client IP address for history lookup + * @param timestamp - Current request timestamp + * @returns Promise resolving to timing features + */ +export async function extractTimingFeatures(ip: string, timestamp: number): Promise { + // Input validation + if (!ip || typeof ip !== 'string') { + throw new Error('Invalid IP address provided to extractTimingFeatures'); + } + + if (!timestamp || typeof timestamp !== 'number' || timestamp <= 0) { + throw new Error('Invalid timestamp provided to extractTimingFeatures'); + } + + const features: TimingFeatures = { + requestRate: 0, + burstBehavior: false, + timingAnomalies: 0, + isNightTime: false, + isWeekend: false + }; + + try { + // Get request history for timing analysis + const history = await getRequestHistory(ip, TIMING_CONFIG.historyWindowMs); + + if (!Array.isArray(history) || history.length === 0) { + return features; + } + + // Calculate request rate (requests per minute) + const oldestRequest = Math.min(...history.map(h => h.timestamp)); + const timeSpan = Math.max(timestamp - oldestRequest, 1000); // Avoid division by zero + const requestRate = (history.length / timeSpan) * 60000; // Convert to per minute + + // Apply reasonable bounds to request rate + const boundedRequestRate = Math.min(requestRate, 1000); // Cap at 1000 requests/minute + + const updatedFeatures: TimingFeatures = { + ...features, + requestRate: Math.round(boundedRequestRate * 100) / 100, // Round to 2 decimal places + requestSpacing: timeSpan / history.length + }; + + // Detect burst behavior + if (history.length >= TIMING_CONFIG.minRequestsForBurst) { + const burstAnalysis = analyzeBurstBehavior(history, timestamp); + Object.assign(updatedFeatures, { + burstBehavior: burstAnalysis.isBurst, + timingAnomalies: updatedFeatures.timingAnomalies + (burstAnalysis.isBurst ? 1 : 0) + }); + } + + // Analyze temporal patterns + const temporalAnalysis = analyzeTemporalPatterns(timestamp); + Object.assign(updatedFeatures, { + isNightTime: temporalAnalysis.isNightTime, + isWeekend: temporalAnalysis.isWeekend, + peakHourActivity: temporalAnalysis.isPeakHour, + timingAnomalies: updatedFeatures.timingAnomalies + temporalAnalysis.anomalyCount + }); + + return updatedFeatures; + } catch (err) { + const error = err as Error; + console.warn(`Failed to extract timing features for IP ${ip}:`, error.message); + return features; + } +} + +/** + * Analyzes request patterns for burst behavior + * @param history - Array of request history entries + * @param currentTimestamp - Current request timestamp + * @returns Burst analysis results + */ +function analyzeBurstBehavior( + history: readonly RequestHistoryEntry[], + _currentTimestamp: number +): { isBurst: boolean; shortIntervalRatio: number } { + if (history.length < 2) { + return { isBurst: false, shortIntervalRatio: 0 }; + } + + // Calculate intervals between consecutive requests + const intervals: number[] = []; + const sortedHistory = [...history].sort((a, b) => a.timestamp - b.timestamp); + + for (let i = 1; i < sortedHistory.length; i++) { + const current = sortedHistory[i]; + const previous = sortedHistory[i - 1]; + if (current && previous && current.timestamp && previous.timestamp) { + const interval = current.timestamp - previous.timestamp; + if (interval > 0) { // Only include positive intervals + intervals.push(interval); + } + } + } + + if (intervals.length === 0) { + return { isBurst: false, shortIntervalRatio: 0 }; + } + + // Calculate average interval + const avgInterval = intervals.reduce((sum, interval) => sum + interval, 0) / intervals.length; + + // Define "short" intervals as those significantly below average + const shortIntervalThreshold = avgInterval * 0.2; + const shortIntervals = intervals.filter(interval => interval < shortIntervalThreshold); + const shortIntervalRatio = shortIntervals.length / intervals.length; + + // Burst detected if majority of intervals are short + const isBurst = shortIntervalRatio > TIMING_CONFIG.burstThreshold; + + return { isBurst, shortIntervalRatio }; +} + +/** + * Analyzes temporal patterns for unusual timing + * @param timestamp - Current request timestamp + * @returns Temporal analysis results + */ +function analyzeTemporalPatterns(timestamp: number): { + isNightTime: boolean; + isWeekend: boolean; + isPeakHour: boolean; + anomalyCount: number; +} { + const date = new Date(timestamp); + const hour = date.getHours(); + const day = date.getDay(); + + let anomalyCount = 0; + + // Night time detection (2 AM - 6 AM) + const isNightTime = hour >= TIMING_CONFIG.nightStartHour && hour <= TIMING_CONFIG.nightEndHour; + if (isNightTime) { + anomalyCount++; + } + + // Weekend detection (Saturday = 6, Sunday = 0) + const isWeekend = day === 0 || day === 6; + + // Peak hour detection (9 AM - 5 PM on weekdays) + const isPeakHour = !isWeekend && hour >= 9 && hour <= 17; + + return { + isNightTime, + isWeekend, + isPeakHour, + anomalyCount + }; +} + +// ============================================================================= +// VELOCITY FEATURE EXTRACTION +// ============================================================================= + +/** + * Extracts velocity-based features from geographic data + * Detects impossible travel and rapid location changes + * + * @param ip - Client IP address for behavior tracking + * @param geoData - Geographic location data + * @returns Promise resolving to velocity features + */ +export async function extractVelocityFeatures(ip: string, geoData: GeoData | null): Promise { + // Input validation + if (!ip || typeof ip !== 'string') { + throw new Error('Invalid IP address provided to extractVelocityFeatures'); + } + + // Use mutable object during construction + const features = { + impossibleTravel: false, + rapidLocationChange: false, + travelVelocity: 0, + geoAnomalies: [] as string[] + }; + + // Return early if no geo data or incomplete coordinates + if (!geoData || + typeof geoData.latitude !== 'number' || + typeof geoData.longitude !== 'number' || + !isValidCoordinate(geoData.latitude, geoData.longitude)) { + return features; + } + + try { + // Get previous location data from behavior database + const behaviorKey = `behavior:${ip}`; + const behaviorData = await getBehaviorData(behaviorKey); + + if (behaviorData?.lastLocation && behaviorData.lastSeen) { + const velocityAnalysis = analyzeVelocity( + behaviorData.lastLocation, + { lat: geoData.latitude, lon: geoData.longitude }, + behaviorData.lastSeen, + Date.now() + ); + + // Return new object with velocity analysis results + return { + impossibleTravel: velocityAnalysis.impossibleTravel ?? features.impossibleTravel, + rapidLocationChange: velocityAnalysis.rapidLocationChange ?? features.rapidLocationChange, + travelVelocity: velocityAnalysis.travelVelocity ?? features.travelVelocity, + geoAnomalies: velocityAnalysis.geoAnomalies ? [...velocityAnalysis.geoAnomalies] : features.geoAnomalies, + distanceTraveled: velocityAnalysis.distanceTraveled, + timeElapsed: velocityAnalysis.timeElapsed + }; + } + + // Store current location for future comparisons + await updateLocationData(behaviorKey, geoData, behaviorData); + + return features; + } catch (err) { + const error = err as Error; + console.warn(`Failed to extract velocity features for IP ${ip}:`, error.message); + return features; + } +} + +/** + * Validates geographic coordinates + * @param lat - Latitude + * @param lon - Longitude + * @returns True if coordinates are valid + */ +function isValidCoordinate(lat: number, lon: number): boolean { + return lat >= -90 && lat <= 90 && lon >= -180 && lon <= 180; +} + +/** + * Gets behavior data from database with proper error handling + * @param behaviorKey - Database key for behavior data + * @returns Behavior data or null + */ +async function getBehaviorData(behaviorKey: string): Promise { + try { + const data = await behaviorDB.get(behaviorKey); + return data as BehaviorData; + } catch (err) { + // Key doesn't exist or database error + return null; + } +} + +/** + * Analyzes velocity between two geographic points + * @param lastLocation - Previous location + * @param currentLocation - Current location + * @param lastTimestamp - Previous timestamp + * @param currentTimestamp - Current timestamp + * @returns Velocity analysis results + */ +function analyzeVelocity( + lastLocation: GeoLocation, + currentLocation: GeoLocation, + lastTimestamp: number, + currentTimestamp: number +): Partial { + const features: { + impossibleTravel?: boolean; + rapidLocationChange?: boolean; + travelVelocity?: number; + geoAnomalies?: string[]; + distanceTraveled?: number; + timeElapsed?: number; + } = { + geoAnomalies: [] + }; + + // Calculate distance between locations + const distance = calculateDistance(lastLocation, currentLocation); + + if (distance === null || distance < 0) { + return features; + } + + // Calculate time difference in seconds + const timeDiffSeconds = Math.max((currentTimestamp - lastTimestamp) / 1000, 1); + + // Calculate velocity in km/h + const velocityKmh = (distance / timeDiffSeconds) * 3600; + + // Apply reasonable bounds to velocity + const boundedVelocity = Math.min(velocityKmh, 50000); // Cap at 50,000 km/h (orbital speeds) + + features.travelVelocity = Math.round(boundedVelocity * 100) / 100; // Round to 2 decimal places + features.distanceTraveled = Math.round(distance * 100) / 100; + features.timeElapsed = Math.round(timeDiffSeconds); + + const anomalies: string[] = features.geoAnomalies || []; + + // Impossible travel detection (faster than commercial flight) + if (boundedVelocity > TIMING_CONFIG.maxCommercialFlightSpeed) { + features.impossibleTravel = true; + anomalies.push('impossible_travel_speed'); + } + + // Rapid location change detection + if (boundedVelocity > TIMING_CONFIG.rapidMovementThreshold && + timeDiffSeconds < TIMING_CONFIG.rapidMovementTimeWindow) { + features.rapidLocationChange = true; + anomalies.push('rapid_location_change'); + } + + // Additional velocity-based anomalies + if (boundedVelocity > 2000) { // Faster than supersonic aircraft + anomalies.push('supersonic_travel'); + } + + if (distance > 20000) { // Distance greater than half Earth's circumference + anomalies.push('extreme_distance'); + } + + features.geoAnomalies = anomalies; + + return features; +} + +/** + * Updates location data in behavior database + * @param behaviorKey - Database key + * @param geoData - Current geographic data + * @param existingData - Existing behavior data + */ +async function updateLocationData( + behaviorKey: string, + geoData: GeoData, + existingData: BehaviorData | null +): Promise { + try { + const updatedData: BehaviorData = { + ...existingData, + lastLocation: { + lat: geoData.latitude!, + lon: geoData.longitude! + }, + lastSeen: Date.now() + }; + + await behaviorDB.put(behaviorKey, updatedData); + } catch (err) { + const error = err as Error; + console.warn('Failed to update location data:', error.message); + } +} + +// ============================================================================= +// EXPORT TYPE DEFINITIONS +// ============================================================================= + +export type { TimingFeatures, VelocityFeatures, GeoData, GeoLocation }; \ No newline at end of file diff --git a/src/utils/threat-scoring/index.ts b/src/utils/threat-scoring/index.ts new file mode 100644 index 0000000..f72f9c1 --- /dev/null +++ b/src/utils/threat-scoring/index.ts @@ -0,0 +1,437 @@ +// ============================================================================= +// THREAT SCORING ENGINE (TypeScript) +// ============================================================================= + +import { STATIC_WHITELIST, type ThreatThresholds, type SignalWeights } from './constants.js'; +import { type IncomingHttpHeaders } from 'http'; +import type { NetworkRequest } from '../network.js'; +import * as logs from '../logs.js'; +import { performance } from 'perf_hooks'; + +// Simple utility functions +function performSecurityChecks(ip: string): string { + if (typeof ip !== 'string' || ip.length === 0 || ip.length > 45) { + throw new Error('Invalid IP address'); + } + return ip.trim(); +} + +function normalizeMetricValue(value: number, min: number, max: number): number { + if (typeof value !== 'number' || isNaN(value)) return 0; + if (max <= min) return value >= max ? 1 : 0; + const clampedValue = Math.max(min, Math.min(max, value)); + return (clampedValue - min) / (max - min); +} + +// ============================================================================= +// TYPE DEFINITIONS +// ============================================================================= + +export interface ThreatScore { + readonly totalScore: number; + readonly confidence: number; + readonly riskLevel: 'allow' | 'challenge' | 'block'; + readonly components: { + readonly behaviorScore: number; + readonly contentScore: number; + readonly networkScore: number; + readonly anomalyScore: number; + }; + readonly signalsTriggered: readonly string[]; + readonly normalizedFeatures: Record; + readonly processingTimeMs: number; +} + +export interface ThreatScoringConfig { + readonly enabled: boolean; + readonly thresholds: ThreatThresholds; + readonly signalWeights: SignalWeights; + readonly enableBotVerification?: boolean; + readonly enableGeoAnalysis?: boolean; + readonly enableBehaviorAnalysis?: boolean; + readonly enableContentAnalysis?: boolean; + readonly logDetailedScores?: boolean; +} + +interface RequestMetadata { + readonly startTime: number; + readonly ip: string; + readonly userAgent?: string; + readonly method: string; + readonly path: string; + readonly headers: IncomingHttpHeaders; + readonly body?: string; + readonly sessionId?: string; +} + +// ============================================================================= +// THREAT SCORING ENGINE +// ============================================================================= + +export class ThreatScorer { + private readonly config: ThreatScoringConfig; + + constructor(config: ThreatScoringConfig) { + this.config = config; + } + + /** + * Performs comprehensive threat scoring on a request + */ + public async scoreRequest(request: NetworkRequest): Promise { + const startTime = performance.now(); + + try { + // Check if scoring is enabled + if (!this.config.enabled) { + return this.createAllowScore(startTime); + } + + // Extract request metadata + const metadata = this.extractRequestMetadata(request, startTime); + + // Validate input and perform security checks + performSecurityChecks(metadata.ip); + + // Check static whitelist (quick path for assets) + if (this.isWhitelisted(metadata.path)) { + return this.createAllowScore(startTime); + } + + // Perform threat analysis + const score = this.performBasicThreatAnalysis(metadata); + + return score; + + } catch (error) { + logs.error('threat-scorer', `Error scoring request: ${error}`); + return this.createErrorScore(startTime); + } + } + + /** + * Extract basic metadata from request + */ + private extractRequestMetadata(request: NetworkRequest, startTime: number): RequestMetadata { + const headers = request.headers || {}; + const userAgent = this.extractUserAgent(headers); + const ip = this.extractClientIP(request); + + return { + startTime, + ip, + userAgent, + method: (request as any).method || 'GET', + path: this.extractPath(request), + headers: headers as IncomingHttpHeaders, + body: (request as any).body, + sessionId: this.extractSessionId(headers) + }; + } + + /** + * Extract user agent from headers + */ + private extractUserAgent(headers: any): string { + if (headers && typeof headers.get === 'function') { + return headers.get('user-agent') || ''; + } + if (headers && typeof headers === 'object') { + return headers['user-agent'] || ''; + } + return ''; + } + + /** + * Extract client IP from request + */ + private extractClientIP(request: NetworkRequest): string { + // Try common IP extraction methods + const headers = request.headers; + if (headers) { + if (typeof headers.get === 'function') { + return headers.get('x-forwarded-for') || + headers.get('x-real-ip') || + headers.get('cf-connecting-ip') || '127.0.0.1'; + } + if (typeof headers === 'object') { + const h = headers as any; + return h['x-forwarded-for'] || h['x-real-ip'] || h['cf-connecting-ip'] || '127.0.0.1'; + } + } + return '127.0.0.1'; + } + + /** + * Extract path from request + */ + private extractPath(request: NetworkRequest): string { + if ((request as any).url) { + try { + const url = new URL((request as any).url, 'http://localhost'); + return url.pathname; + } catch { + return (request as any).url || '/'; + } + } + return '/'; + } + + /** + * Extract session ID from headers + */ + private extractSessionId(headers: any): string | undefined { + // Basic session ID extraction from cookies + if (headers && headers.cookie) { + const cookies = headers.cookie.split(';'); + for (const cookie of cookies) { + const [name, value] = cookie.trim().split('='); + if (name && name.toLowerCase().includes('session')) { + return value; + } + } + } + return undefined; + } + + /** + * Check if path is in static whitelist + */ + private isWhitelisted(path: string): boolean { + // Check static file extensions + for (const ext of STATIC_WHITELIST.extensions) { + if (path.endsWith(ext)) { + return true; + } + } + + // Check whitelisted paths + for (const whitelistPath of STATIC_WHITELIST.paths) { + if (path.startsWith(whitelistPath)) { + return true; + } + } + + // Check patterns + for (const pattern of STATIC_WHITELIST.patterns) { + if (pattern.test(path)) { + return true; + } + } + + return false; + } + + /** + * Perform basic threat analysis (simplified version) + */ + private performBasicThreatAnalysis(metadata: RequestMetadata): ThreatScore { + const startTime = performance.now(); + const signalsTriggered: string[] = []; + let totalScore = 0; + + const components = { + networkScore: 0, + behaviorScore: 0, + contentScore: 0, + anomalyScore: 0 + }; + + // Basic checks + if (!metadata.userAgent || metadata.userAgent.length === 0) { + components.anomalyScore += this.config.signalWeights.MISSING_UA?.weight || 10; + signalsTriggered.push('MISSING_UA'); + } + + // WAF signal integration - use WAF results if available + const wafSignals = this.extractWAFSignals(metadata); + if (wafSignals) { + const wafScore = this.calculateWAFScore(wafSignals, signalsTriggered); + components.contentScore += wafScore; + } + + totalScore = components.networkScore + components.behaviorScore + + components.contentScore + components.anomalyScore; + + // Determine risk level + const riskLevel = this.determineRiskLevel(totalScore); + + // Calculate confidence (simplified) + const confidence = Math.min(0.8, signalsTriggered.length * 0.2 + 0.3); + + const processingTimeMs = performance.now() - startTime; + + return { + totalScore, + confidence, + riskLevel, + components, + signalsTriggered, + normalizedFeatures: { + networkRisk: normalizeMetricValue(components.networkScore, 0, 100), + behaviorRisk: normalizeMetricValue(components.behaviorScore, 0, 100), + contentRisk: normalizeMetricValue(components.contentScore, 0, 100), + anomalyRisk: normalizeMetricValue(components.anomalyScore, 0, 100) + }, + processingTimeMs + }; + } + + /** + * Extract WAF signals from request metadata + */ + private extractWAFSignals(metadata: RequestMetadata): Record | null { + // WAF signals are attached to the request object by WAF middleware + // Try multiple ways to access them depending on request type + const request = metadata as any; + + // Express-style: res.locals.wafSignals (if request has res) + if (request.res?.locals?.wafSignals) { + return request.res.locals.wafSignals; + } + + // Direct attachment: request.wafSignals + if (request.wafSignals) { + return request.wafSignals; + } + + // Headers may contain WAF detection flags + if (metadata.headers) { + const wafHeader = metadata.headers['x-waf-signals'] || metadata.headers['X-WAF-Signals']; + if (wafHeader && typeof wafHeader === 'string') { + try { + return JSON.parse(wafHeader); + } catch { + // Invalid JSON, ignore + } + } + } + + return null; + } + + /** + * Calculate threat score from WAF signals + */ + private calculateWAFScore(wafSignals: Record, signalsTriggered: string[]): number { + let score = 0; + + // Map WAF detections to configured signal weights + if (wafSignals.sqlInjection || wafSignals.sql_injection) { + score += this.config.signalWeights.SQL_INJECTION?.weight || 80; + signalsTriggered.push('SQL_INJECTION'); + } + + if (wafSignals.xss || wafSignals.xssAttempt) { + score += this.config.signalWeights.XSS_ATTEMPT?.weight || 85; + signalsTriggered.push('XSS_ATTEMPT'); + } + + if (wafSignals.commandInjection || wafSignals.command_injection) { + score += this.config.signalWeights.COMMAND_INJECTION?.weight || 95; + signalsTriggered.push('COMMAND_INJECTION'); + } + + if (wafSignals.pathTraversal || wafSignals.path_traversal) { + score += this.config.signalWeights.PATH_TRAVERSAL?.weight || 70; + signalsTriggered.push('PATH_TRAVERSAL'); + } + + // Handle unverified bot detection - CRITICAL for fake bots + if (wafSignals.unverified_bot) { + score += 50; // High penalty for fake bot user agents + signalsTriggered.push('UNVERIFIED_BOT'); + } + + // Handle WAF attack tool detection in user agents + const detectedAttacks = wafSignals.detected_attacks; + if (Array.isArray(detectedAttacks)) { + if (detectedAttacks.includes('attack_tool_user_agent')) { + score += this.config.signalWeights.ATTACK_TOOL_UA?.weight || 30; + signalsTriggered.push('ATTACK_TOOL_UA'); + } + + // Additional detection for unverified bots via attack list + if (detectedAttacks.includes('unverified_bot')) { + score += 50; + signalsTriggered.push('UNVERIFIED_BOT'); + } + } + + return score; + } + + /** + * Determines risk level based on score and configured thresholds + */ + private determineRiskLevel(score: number): 'allow' | 'challenge' | 'block' { + if (score <= this.config.thresholds.ALLOW) return 'allow'; + if (score <= this.config.thresholds.CHALLENGE) return 'challenge'; + return 'block'; + } + + /** + * Creates an allow score for whitelisted or disabled requests + */ + private createAllowScore(startTime: number): ThreatScore { + return { + totalScore: 0, + confidence: 1.0, + riskLevel: 'allow', + components: { + behaviorScore: 0, + contentScore: 0, + networkScore: 0, + anomalyScore: 0 + }, + signalsTriggered: [], + normalizedFeatures: {}, + processingTimeMs: performance.now() - startTime + }; + } + + /** + * Creates an error score when threat analysis fails + */ + private createErrorScore(startTime: number): ThreatScore { + return { + totalScore: 0, + confidence: 0, + riskLevel: 'allow', // Fail open + components: { + behaviorScore: 0, + contentScore: 0, + networkScore: 0, + anomalyScore: 0 + }, + signalsTriggered: ['ERROR'], + normalizedFeatures: {}, + processingTimeMs: performance.now() - startTime + }; + } +} + +/** + * Creates and configures a threat scorer instance + */ +export function createThreatScorer(config: ThreatScoringConfig): ThreatScorer { + return new ThreatScorer(config); +} + +// Default threat scorer for convenience (requires configuration) +let defaultScorer: ThreatScorer | null = null; + +export function configureDefaultThreatScorer(config: ThreatScoringConfig): void { + defaultScorer = new ThreatScorer(config); +} + +export const threatScorer = { + scoreRequest: async (request: NetworkRequest): Promise => { + if (!defaultScorer) { + throw new Error('Default threat scorer not configured. Call configureDefaultThreatScorer() first.'); + } + return defaultScorer.scoreRequest(request); + } +}; + + \ No newline at end of file diff --git a/src/utils/threat-scoring/pattern-matcher.ts b/src/utils/threat-scoring/pattern-matcher.ts new file mode 100644 index 0000000..af2e67e --- /dev/null +++ b/src/utils/threat-scoring/pattern-matcher.ts @@ -0,0 +1,185 @@ +// ============================================================================= +// PATTERN MATCHING FOR THREAT SCORING (TypeScript) +// ============================================================================= + +// @ts-ignore - string-dsa doesn't have TypeScript definitions +import { AhoCorasick } from 'string-dsa'; +import { ATTACK_TOOL_PATTERNS, SUSPICIOUS_BOT_PATTERNS } from './constants.js'; +import * as logs from '../logs.js'; + +// ============================================================================= +// TYPE DEFINITIONS +// ============================================================================= + +interface AhoCorasickMatcher { + find(text: string): readonly string[] | null; +} + +interface AhoCorasickMatchers { + attackTools: AhoCorasickMatcher | null; + suspiciousBotPatterns: AhoCorasickMatcher | null; +} + +interface ReadonlyAhoCorasickMatchers { + readonly attackTools: AhoCorasickMatcher | null; + readonly suspiciousBotPatterns: AhoCorasickMatcher | null; +} + +// ============================================================================= +// PATTERN MATCHING IMPLEMENTATION +// ============================================================================= + +// Pre-compiled Aho-Corasick matchers for ultra-fast pattern matching +// CRITICAL: These provide 10-100x performance improvement over individual string.includes() calls +const internalMatchers: AhoCorasickMatchers = { + attackTools: null, + suspiciousBotPatterns: null +}; + +// Initialize Aho-Corasick matchers once at startup +function initializeAhoCorasickMatchers(): void { + try { + internalMatchers.attackTools = new AhoCorasick(ATTACK_TOOL_PATTERNS) as AhoCorasickMatcher; + internalMatchers.suspiciousBotPatterns = new AhoCorasick(SUSPICIOUS_BOT_PATTERNS) as AhoCorasickMatcher; + + logs.plugin('threat-scoring', 'Initialized Aho-Corasick matchers for ultra-fast pattern matching'); + } catch (err) { + const error = err as Error; + logs.error('threat-scoring', `Failed to initialize Aho-Corasick matchers: ${error.message}`); + + // Set to null so we can fall back to traditional methods + (Object.keys(internalMatchers) as Array).forEach(key => { + internalMatchers[key] = null; + }); + } +} + +// Initialize matchers at module load +initializeAhoCorasickMatchers(); + +// ============================================================================= +// EXPORTED MATCHER FUNCTIONS +// ============================================================================= + +/** + * Checks if the given text contains patterns associated with attack tools + * @param text - The text to search for attack tool patterns + * @returns true if attack tool patterns are found, false otherwise + */ +export function matchAttackTools(text: unknown): boolean { + // Type guard: ensure we have a string + if (!text || typeof text !== 'string') { + return false; + } + + // Use Aho-Corasick for performance if available + if (internalMatchers.attackTools) { + try { + const matches = internalMatchers.attackTools.find(text.toLowerCase()); + return matches !== null && matches.length > 0; + } catch (err) { + const error = err as Error; + logs.warn('threat-scoring', `Aho-Corasick attack tool matching failed: ${error.message}`); + // Fall through to traditional method + } + } + + // Fallback to traditional method if Aho-Corasick fails + const lowerText = text.toLowerCase(); + return ATTACK_TOOL_PATTERNS.some((pattern: string) => lowerText.includes(pattern)); +} + +/** + * Checks if the given text contains patterns associated with suspicious bots + * @param text - The text to search for suspicious bot patterns + * @returns true if suspicious bot patterns are found, false otherwise + */ +export function matchSuspiciousBots(text: unknown): boolean { + // Type guard: ensure we have a string + if (!text || typeof text !== 'string') { + return false; + } + + // Use Aho-Corasick for performance if available + if (internalMatchers.suspiciousBotPatterns) { + try { + const matches = internalMatchers.suspiciousBotPatterns.find(text.toLowerCase()); + return matches !== null && matches.length > 0; + } catch (err) { + const error = err as Error; + logs.warn('threat-scoring', `Aho-Corasick suspicious bot matching failed: ${error.message}`); + // Fall through to traditional method + } + } + + // Fallback to traditional method if Aho-Corasick fails + const lowerText = text.toLowerCase(); + return SUSPICIOUS_BOT_PATTERNS.some((pattern: string) => lowerText.includes(pattern)); +} + +/** + * Advanced pattern matching with detailed results + * @param text - The text to analyze + * @param patterns - Array of patterns to search for + * @returns Array of matched patterns with positions + */ +export function findDetailedMatches( + text: unknown, + patterns: readonly string[] +): readonly { pattern: string; position: number }[] { + if (!text || typeof text !== 'string') { + return []; + } + + const results: { pattern: string; position: number }[] = []; + const lowerText = text.toLowerCase(); + + patterns.forEach(pattern => { + const position = lowerText.indexOf(pattern.toLowerCase()); + if (position !== -1) { + results.push({ pattern, position }); + } + }); + + return results; +} + +/** + * Gets the current status of Aho-Corasick matchers + * @returns Status object indicating which matchers are available + */ +export function getMatcherStatus(): { + readonly attackToolsAvailable: boolean; + readonly suspiciousBotsAvailable: boolean; + readonly fallbackMode: boolean; +} { + const attackToolsAvailable = internalMatchers.attackTools !== null; + const suspiciousBotsAvailable = internalMatchers.suspiciousBotPatterns !== null; + + return { + attackToolsAvailable, + suspiciousBotsAvailable, + fallbackMode: !attackToolsAvailable || !suspiciousBotsAvailable + }; +} + +/** + * Reinitializes the Aho-Corasick matchers (useful for recovery after errors) + */ +export function reinitializeMatchers(): boolean { + try { + initializeAhoCorasickMatchers(); + const status = getMatcherStatus(); + return status.attackToolsAvailable && status.suspiciousBotsAvailable; + } catch (err) { + const error = err as Error; + logs.error('threat-scoring', `Failed to reinitialize matchers: ${error.message}`); + return false; + } +} + +// Re-export the matchers for testing/debugging (readonly for safety) +export const ahoCorasickMatchers: ReadonlyAhoCorasickMatchers = Object.freeze({ + get attackTools() { return internalMatchers.attackTools; }, + get suspiciousBotPatterns() { return internalMatchers.suspiciousBotPatterns; } +}); \ No newline at end of file diff --git a/src/utils/threat-scoring/security.ts b/src/utils/threat-scoring/security.ts new file mode 100644 index 0000000..176f03a --- /dev/null +++ b/src/utils/threat-scoring/security.ts @@ -0,0 +1,58 @@ +// ============================================================================= +// THREAT SCORING SECURITY UTILITIES +// ============================================================================= + +/** + * Performs basic security validation on IP addresses to prevent injection attacks + * @param ip - The IP address to validate + * @returns The validated IP address + * @throws Error if IP is invalid or malicious + */ +export function performSecurityChecks(ip: string): string { + if (typeof ip !== 'string') { + throw new Error('IP address must be a string'); + } + + // Remove any whitespace + const cleanIP = ip.trim(); + + // Basic length check to prevent extremely long inputs + if (cleanIP.length > 45) { // Max IPv6 length + throw new Error('IP address too long'); + } + + if (cleanIP.length === 0) { + throw new Error('IP address cannot be empty'); + } + + // Basic IPv4 pattern check + const ipv4Pattern = /^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/; + + // Basic IPv6 pattern check (simplified) + const ipv6Pattern = /^(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$|^::1$|^::$/; + + // Check for common injection patterns + const dangerousPatterns = [ + /[<>\"'`]/, // HTML/JS injection + /[;|&$]/, // Command injection + /\.\./, // Path traversal + /\/\*/, // SQL comment + /--/, // SQL comment + ]; + + for (const pattern of dangerousPatterns) { + if (pattern.test(cleanIP)) { + throw new Error('IP address contains dangerous characters'); + } + } + + // Validate IP format + if (!ipv4Pattern.test(cleanIP) && !ipv6Pattern.test(cleanIP)) { + // Allow some common internal formats like ::ffff:192.168.1.1 + if (!/^::ffff:[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$/.test(cleanIP)) { + throw new Error('Invalid IP address format'); + } + } + + return cleanIP; +} \ No newline at end of file diff --git a/src/utils/time.ts b/src/utils/time.ts new file mode 100644 index 0000000..3f6d566 --- /dev/null +++ b/src/utils/time.ts @@ -0,0 +1,148 @@ +// Duration parsing utility with error handling and validation +// CRITICAL: Used throughout the system for parsing configuration timeouts +// Incorrect parsing can lead to system instability or security bypasses + +// Type definitions for duration parsing +export type DurationUnit = 's' | 'm' | 'h' | 'd'; +export type DurationInput = string | number; +export type DurationString = `${number}${DurationUnit}`; + +// Interface for duration multipliers +interface DurationMultipliers { + readonly s: number; // seconds + readonly m: number; // minutes + readonly h: number; // hours + readonly d: number; // days +} + +// Constants for duration conversion +const DURATION_MULTIPLIERS: DurationMultipliers = { + s: 1000, // seconds + m: 60 * 1000, // minutes + h: 60 * 60 * 1000, // hours + d: 24 * 60 * 60 * 1000 // days +} as const; + +/** + * Parse duration strings into milliseconds + * Supports formats like: "1s", "5m", "2h", "1d", "30000" (raw ms) + * + * @param input - Duration string or milliseconds + * @returns Duration in milliseconds + * @throws Error if input format is invalid + */ +export function parseDuration(input: DurationInput): number { + // Handle numeric input (already in milliseconds) + if (typeof input === 'number') { + if (input < 0) { + throw new Error('Duration cannot be negative'); + } + if (input > Number.MAX_SAFE_INTEGER) { + throw new Error('Duration too large'); + } + return input; + } + + if (typeof input !== 'string') { + throw new Error('Duration must be a string or number'); + } + + // Handle empty or invalid input + const trimmed = input.trim(); + if (!trimmed) { + throw new Error('Duration cannot be empty'); + } + + // Parse numeric-only strings as milliseconds + const numericValue = parseInt(trimmed, 10); + if (trimmed === numericValue.toString()) { + if (numericValue < 0) { + throw new Error('Duration cannot be negative'); + } + return numericValue; + } + + // Parse duration with unit suffix + const match = trimmed.match(/^(\d+(?:\.\d+)?)\s*([smhd])$/i); + if (!match) { + throw new Error(`Invalid duration format: ${input}. Use formats like "1s", "5m", "2h", "1d"`); + } + + const valueMatch = match[1]; + const unitMatch = match[2]; + + if (!valueMatch || !unitMatch) { + throw new Error(`Invalid duration format: ${input}. Missing value or unit`); + } + + const value = parseFloat(valueMatch); + + const unit = unitMatch.toLowerCase() as DurationUnit; + + if (value < 0) { + throw new Error('Duration cannot be negative'); + } + + // Type-safe unit validation + if (!(unit in DURATION_MULTIPLIERS)) { + throw new Error(`Invalid duration unit: ${unit}. Use s, m, h, or d`); + } + + const result = value * DURATION_MULTIPLIERS[unit]; + + if (result > Number.MAX_SAFE_INTEGER) { + throw new Error('Duration too large'); + } + + return Math.floor(result); +} + +/** + * Format milliseconds back to human-readable duration string + * @param milliseconds - Duration in milliseconds + * @returns Human-readable duration string + */ +export function formatDuration(milliseconds: number): string { + if (milliseconds < 0) { + throw new Error('Duration cannot be negative'); + } + + // Return raw milliseconds for very small values + if (milliseconds < 1000) { + return `${milliseconds}ms`; + } + + // Find the largest appropriate unit + const units: Array<[DurationUnit, number]> = [ + ['d', DURATION_MULTIPLIERS.d], + ['h', DURATION_MULTIPLIERS.h], + ['m', DURATION_MULTIPLIERS.m], + ['s', DURATION_MULTIPLIERS.s], + ]; + + for (const [unit, multiplier] of units) { + if (milliseconds >= multiplier) { + const value = Math.floor(milliseconds / multiplier); + return `${value}${unit}`; + } + } + + return `${milliseconds}ms`; +} + +/** + * Type guard to check if a string is a valid duration string + * @param input - String to check + * @returns True if the string is a valid duration format + */ +export function isValidDurationString(input: string): input is DurationString { + try { + parseDuration(input); + return true; + } catch { + return false; + } +} + +// Export types for use in other modules +export type { DurationMultipliers }; \ No newline at end of file diff --git a/src/utils/timed-downloads.ts b/src/utils/timed-downloads.ts new file mode 100644 index 0000000..383ec57 --- /dev/null +++ b/src/utils/timed-downloads.ts @@ -0,0 +1,374 @@ +import { promises as fsPromises } from 'fs'; +import { join } from 'path'; +import { rootDir } from '../index.js'; +import { parseDuration, type DurationInput } from './time.js'; +import * as logs from './logs.js'; + +// ==================== TYPE DEFINITIONS ==================== + +export interface TimedDownloadSource { + readonly name: string; + readonly url: string; + readonly updateInterval: DurationInput; // Uses time.ts format: "24h", "5m", etc. + readonly enabled: boolean; + readonly parser?: DataParser; + readonly validator?: DataValidator; + readonly headers?: Record; +} + +export interface DataParser { + readonly format: 'json' | 'text' | 'custom'; + readonly parseFunction?: (data: string) => unknown; +} + +export interface DataValidator { + readonly maxSize?: number; + readonly maxEntries?: number; + readonly validationFunction?: (data: unknown) => boolean; +} + +export interface DownloadResult { + readonly success: boolean; + readonly data?: unknown; + readonly error?: string; + readonly lastUpdated: number; +} + +export interface DownloadedData { + readonly sourceName: string; + readonly data: unknown; + readonly lastUpdated: number; + readonly source: string; +} + +// ==================== SECURITY CONSTANTS ==================== + +const SECURITY_LIMITS = { + MAX_DOWNLOAD_SIZE: 50 * 1024 * 1024, // 50MB max download + MAX_RESPONSE_TIME: parseDuration('30s'), // 30 seconds timeout + MIN_UPDATE_INTERVAL: parseDuration('1m'), // Minimum 1 minute between updates + MAX_UPDATE_INTERVAL: parseDuration('7d'), // Maximum 1 week between updates + MAX_SOURCES: 100, // Maximum number of sources +} as const; + +// ==================== DOWNLOAD MANAGER ==================== + +export class TimedDownloadManager { + private readonly dataDir: string; + private readonly updateTimestampPath: string; + private readonly updatePromises: Map> = new Map(); + private readonly scheduledUpdates: Map = new Map(); + private readonly parsedIntervals: Map = new Map(); + + constructor(subdirectory: string = 'downloads') { + this.dataDir = join(rootDir, 'data', subdirectory); + this.updateTimestampPath = join(this.dataDir, 'update-timestamps.json'); + this.ensureDataDirectory(); + } + + private async ensureDataDirectory(): Promise { + try { + await fsPromises.mkdir(this.dataDir, { recursive: true }); + } catch (error) { + logs.error('timed-downloads', `Failed to create data directory: ${error}`); + } + } + + /** + * Gets parsed interval with caching to avoid repeated parsing overhead + */ + private getParsedInterval(interval: DurationInput): number { + if (!this.parsedIntervals.has(interval)) { + this.parsedIntervals.set(interval, parseDuration(interval)); + } + return this.parsedIntervals.get(interval)!; + } + + /** + * Downloads and parses data from a source + */ + async downloadFromSource(source: TimedDownloadSource): Promise { + // Prevent concurrent downloads of the same source + if (this.updatePromises.has(source.name)) { + return await this.updatePromises.get(source.name)!; + } + + const downloadPromise = this.performDownload(source); + this.updatePromises.set(source.name, downloadPromise); + + try { + return await downloadPromise; + } finally { + this.updatePromises.delete(source.name); + } + } + + private async performDownload(source: TimedDownloadSource): Promise { + const now = Date.now(); + + try { + logs.plugin('timed-downloads', `Downloading ${source.name} from ${source.url}`); + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), SECURITY_LIMITS.MAX_RESPONSE_TIME); + + const headers = { + 'User-Agent': 'Checkpoint-Security-Gateway/1.0', + ...source.headers, + }; + + const response = await fetch(source.url, { + signal: controller.signal, + headers, + }); + + clearTimeout(timeoutId); + + if (!response.ok) { + return { + success: false, + error: `HTTP ${response.status}: ${response.statusText}`, + lastUpdated: now, + }; + } + + const contentLength = response.headers.get('content-length'); + const maxSize = source.validator?.maxSize || SECURITY_LIMITS.MAX_DOWNLOAD_SIZE; + + if (contentLength && parseInt(contentLength) > maxSize) { + return { + success: false, + error: `Response too large: ${contentLength} bytes`, + lastUpdated: now, + }; + } + + const rawData = await response.text(); + + if (rawData.length > maxSize) { + return { + success: false, + error: `Response too large: ${rawData.length} bytes`, + lastUpdated: now, + }; + } + + // Parse data based on format + let parsedData: unknown; + try { + parsedData = this.parseData(rawData, source.parser); + } catch (parseError) { + return { + success: false, + error: `Parse error: ${parseError instanceof Error ? parseError.message : 'Unknown parse error'}`, + lastUpdated: now, + }; + } + + // Validate parsed data + if (source.validator?.validationFunction && !source.validator.validationFunction(parsedData)) { + return { + success: false, + error: 'Data validation failed', + lastUpdated: now, + }; + } + + // Save to file + const downloadedData: DownloadedData = { + sourceName: source.name, + data: parsedData, + lastUpdated: now, + source: source.url, + }; + + await this.saveDownloadedData(source.name, downloadedData); + await this.updateTimestamp(source.name); + + logs.plugin('timed-downloads', `Successfully downloaded and saved ${source.name}`); + + return { + success: true, + data: parsedData, + lastUpdated: now, + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : 'Unknown error'; + logs.error('timed-downloads', `Failed to download ${source.name}: ${errorMessage}`); + + return { + success: false, + error: errorMessage, + lastUpdated: now, + }; + } + } + + /** + * Parses raw data based on parser configuration + */ + private parseData(rawData: string, parser?: DataParser): unknown { + if (!parser) { + return rawData; // Return raw text if no parser specified + } + + switch (parser.format) { + case 'json': + return JSON.parse(rawData); + case 'text': + return rawData; + case 'custom': + if (parser.parseFunction) { + return parser.parseFunction(rawData); + } + return rawData; + default: + return rawData; + } + } + + /** + * Saves downloaded data to disk + */ + private async saveDownloadedData(sourceName: string, data: DownloadedData): Promise { + const filePath = join(this.dataDir, `${sourceName}.json`); + try { + await fsPromises.writeFile(filePath, JSON.stringify(data, null, 2), 'utf8'); + } catch (error) { + logs.error('timed-downloads', `Failed to save data for ${sourceName}: ${error}`); + } + } + + /** + * Loads downloaded data from disk + */ + async loadDownloadedData(sourceName: string): Promise { + const filePath = join(this.dataDir, `${sourceName}.json`); + try { + const fileData = await fsPromises.readFile(filePath, 'utf8'); + return JSON.parse(fileData) as DownloadedData; + } catch { + return null; + } + } + + /** + * Checks if a source needs updating based on its interval + */ + async needsUpdate(source: TimedDownloadSource): Promise { + try { + const timestamps = await this.getUpdateTimestamps(); + const lastUpdate = timestamps[source.name] || 0; + const intervalMs = this.getParsedInterval(source.updateInterval); + const elapsed = Date.now() - lastUpdate; + + return elapsed >= intervalMs; + } catch { + return true; // Update on error + } + } + + /** + * Updates timestamp for a source + */ + private async updateTimestamp(sourceName: string): Promise { + try { + const timestamps = await this.getUpdateTimestamps(); + timestamps[sourceName] = Date.now(); + await fsPromises.writeFile(this.updateTimestampPath, JSON.stringify(timestamps, null, 2), 'utf8'); + } catch (error) { + logs.error('timed-downloads', `Failed to update timestamp for ${sourceName}: ${error}`); + } + } + + /** + * Gets all update timestamps + */ + private async getUpdateTimestamps(): Promise> { + try { + const data = await fsPromises.readFile(this.updateTimestampPath, 'utf8'); + return JSON.parse(data); + } catch { + return {}; + } + } + + /** + * Starts periodic updates for sources + */ + startPeriodicUpdates(sources: readonly TimedDownloadSource[]): void { + // Clear any existing scheduled updates + this.stopPeriodicUpdates(); + + for (const source of sources) { + if (!source.enabled) continue; + + try { + const intervalMs = this.getParsedInterval(source.updateInterval); + + // Validate interval bounds + const boundedInterval = Math.max( + SECURITY_LIMITS.MIN_UPDATE_INTERVAL, + Math.min(SECURITY_LIMITS.MAX_UPDATE_INTERVAL, intervalMs) + ); + + const timeoutId = setInterval(async () => { + try { + if (await this.needsUpdate(source)) { + await this.downloadFromSource(source); + } + } catch (error) { + logs.error('timed-downloads', `Periodic update failed for ${source.name}: ${error}`); + } + }, boundedInterval); + + this.scheduledUpdates.set(source.name, timeoutId); + logs.plugin('timed-downloads', `Scheduled updates for ${source.name} every ${source.updateInterval}`); + } catch (error) { + logs.error('timed-downloads', `Failed to schedule updates for ${source.name}: ${error}`); + } + } + } + + /** + * Stops all periodic updates + */ + stopPeriodicUpdates(): void { + for (const [sourceName, timeoutId] of this.scheduledUpdates.entries()) { + clearInterval(timeoutId); + logs.plugin('timed-downloads', `Stopped periodic updates for ${sourceName}`); + } + this.scheduledUpdates.clear(); + } + + /** + * Updates all sources that need updating + */ + async updateAllSources(sources: readonly TimedDownloadSource[]): Promise { + const updatePromises: Promise[] = []; + + for (const source of sources) { + if (source.enabled && await this.needsUpdate(source)) { + updatePromises.push(this.downloadFromSource(source)); + } + } + + if (updatePromises.length > 0) { + logs.plugin('timed-downloads', `Updating ${updatePromises.length} sources...`); + const results = await Promise.allSettled(updatePromises); + + let successCount = 0; + let failureCount = 0; + + results.forEach((result) => { + if (result.status === 'fulfilled' && result.value.success) { + successCount++; + } else { + failureCount++; + } + }); + + logs.plugin('timed-downloads', `Update complete: ${successCount} successful, ${failureCount} failed`); + } + } +} \ No newline at end of file diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..83ec4e6 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,80 @@ +{ + "compilerOptions": { + // Target modern JavaScript + "target": "ES2022", + "module": "ESNext", + "lib": ["ES2022", "DOM"], + "downlevelIteration": true, + + // Enable ES modules + "moduleResolution": "node", + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + + // Output settings - clean separation of source and build + "outDir": "./dist", + "rootDir": "./src", + "preserveConstEnums": true, + "removeComments": false, + + // Type checking + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "strictFunctionTypes": true, + "strictBindCallApply": true, + "strictPropertyInitialization": true, + "noImplicitThis": true, + "alwaysStrict": true, + + // Additional checks + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedIndexedAccess": true, + "noImplicitOverride": true, + + // Interop with JavaScript + "allowJs": false, + "checkJs": false, + "maxNodeModuleJsDepth": 0, + + // Emit - minimal output for cleaner project + "declaration": false, + "declarationMap": false, + "sourceMap": false, + "inlineSources": false, + + // Advanced + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "isolatedModules": true, + + // Path mapping for source files + "baseUrl": "./src", + "paths": { + "@utils/*": ["utils/*"], + "@plugins/*": ["plugins/*"], + "@types/*": ["types/*"] + } + }, + "include": [ + "src/**/*.ts" + ], + "exclude": [ + "node_modules", + "dist", + ".tests", + "pages", + "data", + "db", + "config", + "**/*.js" + ], + "ts-node": { + "esm": true, + "experimentalSpecifierResolution": "node" + } +} \ No newline at end of file diff --git a/utils/logs.js b/utils/logs.js deleted file mode 100644 index 038a902..0000000 --- a/utils/logs.js +++ /dev/null @@ -1,41 +0,0 @@ -const seenConfigs = new Set(); - -export function init(msg) { - console.log(msg); -} - -export function plugin(_name, msg) { - console.log(msg); -} - -export function config(name, msg) { - if (!seenConfigs.has(name)) { - console.log(`Config ${msg} for ${name}`); - seenConfigs.add(name); - } -} - -export function db(msg) { - console.log(msg); -} - -export function server(msg) { - console.log(msg); -} - -export function section(title) { - console.log(`\n=== ${title.toUpperCase()} ===`); -} - -export function warn(_category, msg) { - console.warn(`WARNING: ${msg}`); -} - -export function error(_category, msg) { - console.error(`ERROR: ${msg}`); -} - -// General message function for bullet items -export function msg(msg) { - console.log(msg); -} diff --git a/utils/network.js b/utils/network.js deleted file mode 100644 index 100eb13..0000000 --- a/utils/network.js +++ /dev/null @@ -1,13 +0,0 @@ -export function getRealIP(request, server) { - let ip = request.headers.get('x-forwarded-for') || request.headers.get('x-real-ip'); - if (ip?.includes(',')) ip = ip.split(',')[0].trim(); - if (!ip && server) { - ip = server.remoteAddress; - } - if (!ip) { - const url = new URL(request.url); - ip = url.hostname; - } - if (ip?.startsWith('::ffff:')) ip = ip.slice(7); - return ip; -} diff --git a/utils/plugins.js b/utils/plugins.js deleted file mode 100644 index b2fd961..0000000 --- a/utils/plugins.js +++ /dev/null @@ -1,28 +0,0 @@ -import { resolve, extname, sep, isAbsolute } from 'path'; -import { pathToFileURL } from 'url'; -import { rootDir } from '../index.js'; - -/** - * Securely import a JavaScript module from within the application root. - * Prevents path traversal and disallows non-.js extensions. - * - * @param {string} relPath - The relative path to the module from the application root. - * @returns {Promise} The imported module. - */ -export async function secureImportModule(relPath) { - if (isAbsolute(relPath)) { - throw new Error('Absolute paths are not allowed for module imports'); - } - if (relPath.includes('..')) { - throw new Error('Relative paths containing .. are not allowed for module imports'); - } - if (extname(relPath) !== '.js') { - throw new Error(`Only .js files can be imported: ${relPath}`); - } - const absPath = resolve(rootDir, relPath); - if (!absPath.startsWith(rootDir + sep)) { - throw new Error(`Module path outside of application root: ${relPath}`); - } - const url = pathToFileURL(absPath).href; - return import(url); -} diff --git a/utils/proof.js b/utils/proof.js deleted file mode 100644 index 2058e91..0000000 --- a/utils/proof.js +++ /dev/null @@ -1,72 +0,0 @@ -import crypto from 'crypto'; -import { getRealIP } from './network.js'; - -export function generateChallenge(checkpointConfig) { - const challenge = crypto.randomBytes(16).toString('hex'); - const salt = crypto.randomBytes(checkpointConfig.SaltLength).toString('hex'); - return { challenge, salt }; -} - -export function calculateHash(input) { - return crypto.createHash('sha256').update(input).digest('hex'); -} - -export function verifyPoW(challenge, salt, nonce, difficulty) { - const hash = calculateHash(challenge + salt + nonce); - return hash.startsWith('0'.repeat(difficulty)); -} - -export function checkPoSTimes(times, enableCheck, ratio) { - if (!Array.isArray(times) || times.length !== 3) { - throw new Error('Invalid PoS run times length'); - } - const minT = Math.min(...times); - const maxT = Math.max(...times); - if (enableCheck && maxT > minT * ratio) { - throw new Error(`PoS run times inconsistent (ratio ${maxT / minT} > ${ratio})`); - } -} - -export const challengeStore = new Map(); - -export function generateRequestID(request, checkpointConfig) { - const { challenge, salt } = generateChallenge(checkpointConfig); - const posSeed = crypto.randomBytes(32).toString('hex'); - const requestID = crypto.randomBytes(16).toString('hex'); - const params = { - Challenge: challenge, - Salt: salt, - Difficulty: checkpointConfig.Difficulty, - ExpiresAt: Date.now() + checkpointConfig.ChallengeExpiration, - CreatedAt: Date.now(), - ClientIP: getRealIP(request), - PoSSeed: posSeed, - }; - challengeStore.set(requestID, params); - return requestID; -} - -export function getChallengeParams(requestID) { - return challengeStore.get(requestID); -} - -export function deleteChallenge(requestID) { - challengeStore.delete(requestID); -} - -export function verifyPoS(hashes, times, checkpointConfig) { - if (!Array.isArray(hashes) || hashes.length !== 3) { - throw new Error('Invalid PoS hashes length'); - } - if (!Array.isArray(times) || times.length !== 3) { - throw new Error('Invalid PoS run times length'); - } - if (hashes[0] !== hashes[1] || hashes[1] !== hashes[2]) { - throw new Error('PoS hashes do not match'); - } - if (hashes[0].length !== 64) { - throw new Error('Invalid PoS hash length'); - } - - checkPoSTimes(times, checkpointConfig.CheckPoSTimes, checkpointConfig.PoSTimeConsistencyRatio); -} diff --git a/utils/time.js b/utils/time.js deleted file mode 100644 index d10e569..0000000 --- a/utils/time.js +++ /dev/null @@ -1,20 +0,0 @@ -export function parseDuration(str) { - if (!str) return 0; - const m = /^([0-9]+)(ms|s|m|h|d)$/.exec(str); - if (!m) return 0; - const val = parseInt(m[1], 10); - switch (m[2]) { - case 'ms': - return val; - case 's': - return val * 1000; - case 'm': - return val * 60 * 1000; - case 'h': - return val * 60 * 60 * 1000; - case 'd': - return val * 24 * 60 * 60 * 1000; - default: - return 0; - } -}