Initial commit of massive v2 rewrite

This commit is contained in:
Caileb 2025-08-02 14:26:52 -05:00
parent 1025f3b523
commit dc120fe78a
55 changed files with 21733 additions and 0 deletions

View file

@ -0,0 +1,168 @@
import { jest } from '@jest/globals';
// Mock behavioral detection and config loading before any imports
jest.unstable_mockModule('../dist/utils/behavioral-detection.js', () => ({
behavioralDetection: {
config: { enabled: false },
isBlocked: () => Promise.resolve({ blocked: false }),
getRateLimit: () => Promise.resolve(null),
analyzeRequest: () => Promise.resolve({ totalScore: 0, patterns: [] }),
loadRules: () => Promise.resolve(),
init: () => Promise.resolve()
},
BehavioralDetectionEngine: class MockBehavioralDetectionEngine {
constructor() {
this.config = { enabled: false };
}
async loadRules() { return Promise.resolve(); }
async init() { return Promise.resolve(); }
async isBlocked() { return Promise.resolve({ blocked: false }); }
async getRateLimit() { return Promise.resolve(null); }
async analyzeRequest() { return Promise.resolve({ totalScore: 0, patterns: [] }); }
}
}));
// Mock the main index loadConfig function to prevent TOML imports
jest.unstable_mockModule('../dist/index.js', () => ({
loadConfig: () => Promise.resolve(),
registerPlugin: () => {},
getRegisteredPluginNames: () => [],
loadPlugins: () => [],
freezePlugins: () => {},
rootDir: '/mock/root'
}));
import { threatScorer, configureDefaultThreatScorer, createThreatScorer } from '../dist/utils/threat-scoring.js';
describe('Threat Scoring (Re-export)', () => {
beforeEach(() => {
jest.clearAllMocks();
// Configure the default threat scorer with test config
const testConfig = {
enabled: true,
thresholds: {
ALLOW: 20,
CHALLENGE: 60,
BLOCK: 100
},
signalWeights: {
BLACKLISTED_IP: { weight: 50, confidence: 0.95 },
RAPID_ENUMERATION: { weight: 35, confidence: 0.80 },
BRUTE_FORCE_PATTERN: { weight: 45, confidence: 0.88 },
SQL_INJECTION: { weight: 60, confidence: 0.92 },
XSS_ATTEMPT: { weight: 50, confidence: 0.88 },
COMMAND_INJECTION: { weight: 65, confidence: 0.95 },
ATTACK_TOOL_UA: { weight: 30, confidence: 0.75 },
MISSING_UA: { weight: 10, confidence: 0.60 },
IMPOSSIBLE_TRAVEL: { weight: 30, confidence: 0.80 },
HIGH_RISK_COUNTRY: { weight: 15, confidence: 0.60 }
},
enableBotVerification: true,
enableGeoAnalysis: true,
enableBehaviorAnalysis: true,
enableContentAnalysis: true
};
configureDefaultThreatScorer(testConfig);
});
afterEach(async () => {
// Wait for any pending async operations to complete
await new Promise(resolve => setImmediate(resolve));
});
describe('exports', () => {
test('should export threatScorer instance', () => {
expect(threatScorer).toBeDefined();
expect(typeof threatScorer).toBe('object');
// Should have the new API methods
expect(typeof threatScorer.scoreRequest).toBe('function');
});
test('should export configuration functions', () => {
expect(configureDefaultThreatScorer).toBeDefined();
expect(typeof configureDefaultThreatScorer).toBe('function');
expect(createThreatScorer).toBeDefined();
expect(typeof createThreatScorer).toBe('function');
});
});
describe('threatScorer functionality', () => {
test('should score a simple request', async () => {
const mockRequest = {
headers: { 'user-agent': 'test-browser' },
method: 'GET',
url: '/test'
};
const result = await threatScorer.scoreRequest(mockRequest);
expect(result).toBeDefined();
expect(typeof result.totalScore).toBe('number');
expect(typeof result.confidence).toBe('number');
expect(['allow', 'challenge', 'block']).toContain(result.riskLevel);
expect(Array.isArray(result.signalsTriggered)).toBe(true);
expect(typeof result.processingTimeMs).toBe('number');
});
test('should handle disabled scoring', async () => {
const disabledConfig = {
enabled: false,
thresholds: { ALLOW: 20, CHALLENGE: 60, BLOCK: 100 },
signalWeights: {}
};
const disabledScorer = createThreatScorer(disabledConfig);
const mockRequest = {
headers: { 'user-agent': 'test-browser' },
method: 'GET',
url: '/test'
};
const result = await disabledScorer.scoreRequest(mockRequest);
expect(result.riskLevel).toBe('allow');
expect(result.totalScore).toBe(0);
});
test('should require configuration for default scorer', async () => {
// Test that unconfigured scorer behaves correctly
const unconfiguredScorer = createThreatScorer({ enabled: false, thresholds: {} });
const mockRequest = {
headers: { 'user-agent': 'test-browser' },
method: 'GET',
url: '/test'
};
// Unconfigured/disabled scorer should return allow with 0 score
const result = await unconfiguredScorer.scoreRequest(mockRequest);
expect(result.riskLevel).toBe('allow');
expect(result.totalScore).toBe(0);
});
});
describe('threat scoring configuration', () => {
test('should create scorer with custom config', () => {
const customConfig = {
enabled: true,
thresholds: {
ALLOW: 10,
CHALLENGE: 30,
BLOCK: 50
},
signalWeights: {
BLACKLISTED_IP: { weight: 100, confidence: 1.0 }
},
enableBotVerification: true
};
const customScorer = createThreatScorer(customConfig);
expect(customScorer).toBeDefined();
});
});
});