mirror of
https://github.com/bluewave-labs/Checkmate.git
synced 2026-05-13 13:08:41 -05:00
add heartbeat tests
This commit is contained in:
+3
-1
@@ -6,7 +6,9 @@
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"test": "NODE_OPTIONS=--experimental-vm-modules jest --coverage",
|
||||
"test:services": "NODE_OPTIONS=--experimental-vm-modules jest --coverage --collectCoverageFrom='src/service/**/*.ts' test/unit/services test/unit/providers/network test/unit/providers/notifications",
|
||||
"test:unit": "NODE_OPTIONS=--experimental-vm-modules jest --coverage --collectCoverageFrom='src/**/*.ts' test/unit",
|
||||
"test:unit:services": "NODE_OPTIONS=--experimental-vm-modules jest --coverage --collectCoverageFrom='src/service/**/*.ts' test/unit/services test/unit/providers/network test/unit/providers/notifications",
|
||||
"test:integration": "NODE_OPTIONS=--experimental-vm-modules jest --no-coverage test/integration",
|
||||
"dev": "nodemon --exec tsx src/index.js",
|
||||
"start": "node --watch ./dist/index.js",
|
||||
"build": "tsc && tsc-alias && cp -r src/templates dist/templates",
|
||||
|
||||
@@ -0,0 +1,138 @@
|
||||
import type { IIncidentsRepository } from "../../src/repositories/incidents/IIncidentsRepository.ts";
|
||||
import type { Incident, IncidentSummary } from "../../src/types/index.ts";
|
||||
import { randomUUID } from "crypto";
|
||||
|
||||
export class InMemoryIncidentsRepository implements IIncidentsRepository {
|
||||
private incidents: Incident[] = [];
|
||||
|
||||
async create(incident: Partial<Incident>): Promise<Incident> {
|
||||
const now = new Date().toISOString();
|
||||
const full: Incident = {
|
||||
id: randomUUID(),
|
||||
monitorId: incident.monitorId ?? "",
|
||||
teamId: incident.teamId ?? "",
|
||||
startTime: incident.startTime ?? now,
|
||||
endTime: incident.endTime ?? null,
|
||||
status: incident.status ?? true,
|
||||
message: incident.message ?? null,
|
||||
statusCode: incident.statusCode ?? null,
|
||||
resolutionType: incident.resolutionType ?? null,
|
||||
resolvedBy: incident.resolvedBy ?? null,
|
||||
resolvedByEmail: incident.resolvedByEmail ?? null,
|
||||
comment: incident.comment ?? null,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
};
|
||||
this.incidents.push(full);
|
||||
return { ...full };
|
||||
}
|
||||
|
||||
async findById(incidentId: string, teamId: string): Promise<Incident> {
|
||||
const incident = this.incidents.find((i) => i.id === incidentId && i.teamId === teamId);
|
||||
if (!incident) {
|
||||
throw new Error(`Incident ${incidentId} not found`);
|
||||
}
|
||||
return { ...incident };
|
||||
}
|
||||
|
||||
async findActiveByIncidentId(incidentId: string, teamId: string): Promise<Incident | null> {
|
||||
const incident = this.incidents.find((i) => i.id === incidentId && i.teamId === teamId && i.status === true);
|
||||
return incident ? { ...incident } : null;
|
||||
}
|
||||
|
||||
async findActiveByMonitorId(monitorId: string, teamId: string): Promise<Incident | null> {
|
||||
const incident = this.incidents.find((i) => i.monitorId === monitorId && i.teamId === teamId && i.status === true);
|
||||
return incident ? { ...incident } : null;
|
||||
}
|
||||
|
||||
async findByTeamId(
|
||||
teamId: string,
|
||||
startDate: Date | undefined,
|
||||
page: number,
|
||||
rowsPerPage: number,
|
||||
sortOrder?: string,
|
||||
status?: boolean,
|
||||
monitorId?: string,
|
||||
resolutionType?: string
|
||||
): Promise<Incident[]> {
|
||||
let results = this.incidents.filter((i) => i.teamId === teamId);
|
||||
if (startDate) {
|
||||
results = results.filter((i) => new Date(i.startTime) >= startDate);
|
||||
}
|
||||
if (status !== undefined) {
|
||||
results = results.filter((i) => i.status === status);
|
||||
}
|
||||
if (monitorId) {
|
||||
results = results.filter((i) => i.monitorId === monitorId);
|
||||
}
|
||||
if (resolutionType) {
|
||||
results = results.filter((i) => i.resolutionType === resolutionType);
|
||||
}
|
||||
results.sort((a, b) => {
|
||||
const cmp = new Date(a.startTime).getTime() - new Date(b.startTime).getTime();
|
||||
return sortOrder === "desc" ? -cmp : cmp;
|
||||
});
|
||||
return results.slice(page * rowsPerPage, (page + 1) * rowsPerPage).map((i) => ({ ...i }));
|
||||
}
|
||||
|
||||
async findSummaryByTeamId(_teamId: string, _limit?: number): Promise<IncidentSummary> {
|
||||
throw new Error("Not implemented");
|
||||
}
|
||||
|
||||
async countByTeamId(teamId: string, startDate: Date | undefined, status?: boolean, monitorId?: string, resolutionType?: string): Promise<number> {
|
||||
let results = this.incidents.filter((i) => i.teamId === teamId);
|
||||
if (startDate) {
|
||||
results = results.filter((i) => new Date(i.startTime) >= startDate);
|
||||
}
|
||||
if (status !== undefined) {
|
||||
results = results.filter((i) => i.status === status);
|
||||
}
|
||||
if (monitorId) {
|
||||
results = results.filter((i) => i.monitorId === monitorId);
|
||||
}
|
||||
if (resolutionType) {
|
||||
results = results.filter((i) => i.resolutionType === resolutionType);
|
||||
}
|
||||
return results.length;
|
||||
}
|
||||
|
||||
async updateById(incidentId: string, teamId: string, updateData: Partial<Incident>): Promise<Incident> {
|
||||
const index = this.incidents.findIndex((i) => i.id === incidentId && i.teamId === teamId);
|
||||
if (index === -1) {
|
||||
throw new Error(`Incident ${incidentId} not found`);
|
||||
}
|
||||
const updated: Incident = {
|
||||
...this.incidents[index],
|
||||
...updateData,
|
||||
id: this.incidents[index].id,
|
||||
monitorId: this.incidents[index].monitorId,
|
||||
teamId: this.incidents[index].teamId,
|
||||
createdAt: this.incidents[index].createdAt,
|
||||
updatedAt: new Date().toISOString(),
|
||||
};
|
||||
this.incidents[index] = updated;
|
||||
return { ...updated };
|
||||
}
|
||||
|
||||
async deleteByMonitorId(monitorId: string, teamId: string): Promise<number> {
|
||||
const before = this.incidents.length;
|
||||
this.incidents = this.incidents.filter((i) => !(i.monitorId === monitorId && i.teamId === teamId));
|
||||
return before - this.incidents.length;
|
||||
}
|
||||
|
||||
async deleteByMonitorIdsNotIn(monitorIds: string[]): Promise<number> {
|
||||
const before = this.incidents.length;
|
||||
this.incidents = this.incidents.filter((i) => monitorIds.includes(i.monitorId));
|
||||
return before - this.incidents.length;
|
||||
}
|
||||
|
||||
// Test helpers — not part of the interface
|
||||
|
||||
getAll(): Incident[] {
|
||||
return this.incidents.map((i) => ({ ...i }));
|
||||
}
|
||||
|
||||
clear(): void {
|
||||
this.incidents = [];
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,116 @@
|
||||
import type { IMonitorsRepository, TeamQueryConfig, SummaryConfig } from "../../src/repositories/monitors/IMonitorsRepository.ts";
|
||||
import type { Monitor, MonitorsSummary } from "../../src/types/index.ts";
|
||||
|
||||
export class InMemoryMonitorsRepository implements IMonitorsRepository {
|
||||
private monitors: Monitor[] = [];
|
||||
|
||||
async create(monitor: Monitor, _teamId: string, _userId: string): Promise<Monitor> {
|
||||
this.monitors.push({ ...monitor });
|
||||
return { ...monitor };
|
||||
}
|
||||
|
||||
async createMonitors(monitors: Monitor[]): Promise<Monitor[]> {
|
||||
const created = monitors.map((m) => ({ ...m }));
|
||||
this.monitors.push(...created);
|
||||
return created;
|
||||
}
|
||||
|
||||
async findById(monitorId: string, teamId: string): Promise<Monitor> {
|
||||
const monitor = this.monitors.find((m) => m.id === monitorId && m.teamId === teamId);
|
||||
if (!monitor) {
|
||||
throw new Error(`Monitor ${monitorId} not found`);
|
||||
}
|
||||
return { ...monitor };
|
||||
}
|
||||
|
||||
async findAll(): Promise<Monitor[]> {
|
||||
return this.monitors.map((m) => ({ ...m }));
|
||||
}
|
||||
|
||||
async findByTeamId(_teamId: string, _config: TeamQueryConfig): Promise<Monitor[]> {
|
||||
return this.monitors.filter((m) => m.teamId === _teamId).map((m) => ({ ...m }));
|
||||
}
|
||||
|
||||
async findByIds(monitorIds: string[]): Promise<Monitor[]> {
|
||||
return this.monitors.filter((m) => monitorIds.includes(m.id)).map((m) => ({ ...m }));
|
||||
}
|
||||
|
||||
async findByIdsWithChecks(monitorIds: string[], _checksCount?: number): Promise<Monitor[]> {
|
||||
return this.findByIds(monitorIds);
|
||||
}
|
||||
|
||||
async updateById(monitorId: string, teamId: string, updates: Partial<Monitor>): Promise<Monitor> {
|
||||
const index = this.monitors.findIndex((m) => m.id === monitorId && m.teamId === teamId);
|
||||
if (index === -1) {
|
||||
throw new Error(`Monitor ${monitorId} not found`);
|
||||
}
|
||||
const updated = { ...this.monitors[index], ...updates, id: this.monitors[index].id, teamId: this.monitors[index].teamId };
|
||||
this.monitors[index] = updated;
|
||||
return { ...updated };
|
||||
}
|
||||
|
||||
async togglePauseById(monitorId: string, teamId: string): Promise<Monitor> {
|
||||
const monitor = await this.findById(monitorId, teamId);
|
||||
const newStatus = monitor.status === "paused" ? "up" : "paused";
|
||||
return this.updateById(monitorId, teamId, { status: newStatus });
|
||||
}
|
||||
|
||||
async deleteById(monitorId: string, teamId: string): Promise<Monitor> {
|
||||
const index = this.monitors.findIndex((m) => m.id === monitorId && m.teamId === teamId);
|
||||
if (index === -1) {
|
||||
throw new Error(`Monitor ${monitorId} not found`);
|
||||
}
|
||||
const [deleted] = this.monitors.splice(index, 1);
|
||||
return { ...deleted };
|
||||
}
|
||||
|
||||
async deleteByTeamId(teamId: string): Promise<{ monitors: Monitor[]; deletedCount: number }> {
|
||||
const toDelete = this.monitors.filter((m) => m.teamId === teamId);
|
||||
this.monitors = this.monitors.filter((m) => m.teamId !== teamId);
|
||||
return { monitors: toDelete, deletedCount: toDelete.length };
|
||||
}
|
||||
|
||||
async findMonitorCountByTeamIdAndType(teamId: string, _config: TeamQueryConfig): Promise<number> {
|
||||
return this.monitors.filter((m) => m.teamId === teamId).length;
|
||||
}
|
||||
|
||||
async findMonitorsSummaryByTeamId(_teamId: string, _config?: SummaryConfig): Promise<MonitorsSummary> {
|
||||
throw new Error("Not implemented");
|
||||
}
|
||||
|
||||
async findGroupsByTeamId(_teamId: string): Promise<string[]> {
|
||||
throw new Error("Not implemented");
|
||||
}
|
||||
|
||||
async removeNotificationFromMonitors(_notificationId: string): Promise<void> {
|
||||
throw new Error("Not implemented");
|
||||
}
|
||||
|
||||
async updateNotifications(_teamId: string, _monitorIds: string[], _notificationIds: string[], _action: "add" | "remove" | "set"): Promise<number> {
|
||||
throw new Error("Not implemented");
|
||||
}
|
||||
|
||||
async deleteByTeamIdsNotIn(teamIds: string[]): Promise<number> {
|
||||
const before = this.monitors.length;
|
||||
this.monitors = this.monitors.filter((m) => teamIds.includes(m.teamId));
|
||||
return before - this.monitors.length;
|
||||
}
|
||||
|
||||
async findAllMonitorIds(): Promise<string[]> {
|
||||
return this.monitors.map((m) => m.id);
|
||||
}
|
||||
|
||||
// Test helpers
|
||||
|
||||
seed(monitor: Monitor): void {
|
||||
this.monitors.push({ ...monitor });
|
||||
}
|
||||
|
||||
getAll(): Monitor[] {
|
||||
return this.monitors.map((m) => ({ ...m }));
|
||||
}
|
||||
|
||||
clear(): void {
|
||||
this.monitors = [];
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,179 @@
|
||||
import { jest } from "@jest/globals";
|
||||
import { SuperSimpleQueueHelper } from "../../src/service/infrastructure/SuperSimpleQueue/SuperSimpleQueueHelper.ts";
|
||||
import { StatusService } from "../../src/service/infrastructure/statusService.ts";
|
||||
import { IncidentService } from "../../src/service/business/incidentService.ts";
|
||||
import { InMemoryMonitorsRepository } from "./InMemoryMonitorsRepository.ts";
|
||||
import { InMemoryIncidentsRepository } from "./InMemoryIncidentsRepository.ts";
|
||||
import { createMockLogger } from "./createMockLogger.ts";
|
||||
import type { Monitor } from "../../src/types/monitor.ts";
|
||||
import type { MonitorStatusResponse } from "../../src/types/network.ts";
|
||||
import type { Check } from "../../src/types/check.ts";
|
||||
import type { MaintenanceWindow } from "../../src/types/maintenanceWindow.ts";
|
||||
|
||||
let checkCounter = 0;
|
||||
|
||||
export const makeMonitor = (overrides?: Partial<Monitor>): Monitor =>
|
||||
({
|
||||
id: "mon-1",
|
||||
teamId: "team-1",
|
||||
name: "Test Monitor",
|
||||
type: "http",
|
||||
url: "https://example.com",
|
||||
status: "up",
|
||||
// Pre-fill window so threshold evaluation runs immediately.
|
||||
// Without this, the warm-up path sets monitor.status directly
|
||||
// from each check, bypassing the threshold logic.
|
||||
statusWindow: [true, true, true, true, true],
|
||||
statusWindowSize: 5,
|
||||
statusWindowThreshold: 60,
|
||||
recentChecks: [],
|
||||
notifications: [],
|
||||
...overrides,
|
||||
}) as Monitor;
|
||||
|
||||
export const makeStatusResponse = (status: boolean, code: number): MonitorStatusResponse => ({
|
||||
monitorId: "mon-1",
|
||||
teamId: "team-1",
|
||||
type: "http",
|
||||
status,
|
||||
code,
|
||||
message: status ? "OK" : "Service Unavailable",
|
||||
responseTime: status ? 150 : 0,
|
||||
});
|
||||
|
||||
export const makeCheck = (status: boolean, code: number): Check => {
|
||||
const now = new Date().toISOString();
|
||||
return {
|
||||
id: `check-${++checkCounter}`,
|
||||
metadata: { monitorId: "mon-1", teamId: "team-1", type: "http" },
|
||||
status,
|
||||
statusCode: code,
|
||||
responseTime: status ? 150 : 0,
|
||||
message: status ? "OK" : "Service Unavailable",
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
};
|
||||
};
|
||||
|
||||
const createStubMonitorStatsRepo = () => ({
|
||||
findByMonitorId: jest.fn().mockRejectedValue(new Error("no stats")),
|
||||
create: jest.fn().mockResolvedValue({}),
|
||||
updateByMonitorId: jest.fn().mockResolvedValue({}),
|
||||
deleteByMonitorId: jest.fn(),
|
||||
deleteByMonitorIds: jest.fn(),
|
||||
deleteByMonitorIdsNotIn: jest.fn(),
|
||||
});
|
||||
|
||||
const createStubChecksRepo = () => ({
|
||||
create: jest.fn(),
|
||||
createChecks: jest.fn(),
|
||||
findByMonitorId: jest.fn(),
|
||||
findByTeamId: jest.fn(),
|
||||
findLatestByMonitorIds: jest.fn(),
|
||||
findByDateRangeAndMonitorId: jest.fn(),
|
||||
findSummaryByTeamId: jest.fn(),
|
||||
deleteByMonitorId: jest.fn(),
|
||||
deleteByTeamId: jest.fn(),
|
||||
deleteByMonitorIdsNotIn: jest.fn(),
|
||||
deleteOlderThan: jest.fn(),
|
||||
});
|
||||
|
||||
export interface HeartbeatTestHarness {
|
||||
monitorsRepo: InMemoryMonitorsRepository;
|
||||
incidentsRepo: InMemoryIncidentsRepository;
|
||||
statusService: StatusService;
|
||||
incidentService: IncidentService;
|
||||
notificationsService: { handleNotifications: jest.Mock };
|
||||
networkService: { requestStatus: jest.Mock };
|
||||
bufferStub: { addToBuffer: jest.Mock; addGeoCheckToBuffer: jest.Mock; scheduleNextFlush: jest.Mock };
|
||||
maintenanceWindowsRepo: { findByMonitorId: jest.Mock };
|
||||
messageBuilder: { extractThresholdBreaches: jest.Mock };
|
||||
heartbeatJob: (monitor: Monitor) => Promise<void>;
|
||||
setNextResponse: (status: boolean, code: number) => void;
|
||||
setNextResponseFull: (response: MonitorStatusResponse) => void;
|
||||
}
|
||||
|
||||
export function createHeartbeatTestHarness(): HeartbeatTestHarness {
|
||||
checkCounter = 0;
|
||||
|
||||
const monitorsRepo = new InMemoryMonitorsRepository();
|
||||
const incidentsRepo = new InMemoryIncidentsRepository();
|
||||
const logger = createMockLogger() as any;
|
||||
const bufferStub = { addToBuffer: jest.fn(), addGeoCheckToBuffer: jest.fn(), scheduleNextFlush: jest.fn() };
|
||||
|
||||
const statusService = new StatusService(
|
||||
logger,
|
||||
bufferStub as any,
|
||||
monitorsRepo as any,
|
||||
createStubMonitorStatsRepo() as any,
|
||||
createStubChecksRepo() as any
|
||||
);
|
||||
|
||||
const messageBuilder = { extractThresholdBreaches: jest.fn().mockReturnValue([]) };
|
||||
const incidentService = new IncidentService(logger, incidentsRepo, monitorsRepo as any, { findById: jest.fn() } as any, messageBuilder as any);
|
||||
|
||||
const notificationsService = { handleNotifications: jest.fn().mockResolvedValue(true) };
|
||||
|
||||
let nextResponse: MonitorStatusResponse | null = null;
|
||||
let nextStatus = true;
|
||||
let nextCode = 200;
|
||||
const networkService = {
|
||||
requestStatus: jest.fn().mockImplementation(() => {
|
||||
if (nextResponse) {
|
||||
return Promise.resolve(nextResponse);
|
||||
}
|
||||
return Promise.resolve(makeStatusResponse(nextStatus, nextCode));
|
||||
}),
|
||||
};
|
||||
const checkService = {
|
||||
buildCheck: jest.fn().mockImplementation((response: MonitorStatusResponse) => {
|
||||
return makeCheck(response.status, response.code);
|
||||
}),
|
||||
};
|
||||
|
||||
const setNextResponse = (status: boolean, code: number) => {
|
||||
nextResponse = null;
|
||||
nextStatus = status;
|
||||
nextCode = code;
|
||||
};
|
||||
|
||||
const setNextResponseFull = (response: MonitorStatusResponse) => {
|
||||
nextResponse = response;
|
||||
};
|
||||
|
||||
const maintenanceWindowsRepo = { findByMonitorId: jest.fn().mockResolvedValue([]) };
|
||||
|
||||
const helper = new SuperSimpleQueueHelper(
|
||||
logger,
|
||||
networkService as any,
|
||||
statusService as any,
|
||||
notificationsService as any,
|
||||
checkService as any,
|
||||
{ getDBSettings: jest.fn() } as any,
|
||||
bufferStub as any,
|
||||
incidentService as any,
|
||||
maintenanceWindowsRepo as any,
|
||||
monitorsRepo as any,
|
||||
{ findAllTeamIds: jest.fn() } as any,
|
||||
createStubMonitorStatsRepo() as any,
|
||||
createStubChecksRepo() as any,
|
||||
incidentsRepo as any,
|
||||
{ buildGeoCheck: jest.fn() } as any,
|
||||
{ deleteByMonitorIdsNotIn: jest.fn() } as any
|
||||
);
|
||||
|
||||
return {
|
||||
monitorsRepo,
|
||||
incidentsRepo,
|
||||
statusService,
|
||||
incidentService,
|
||||
notificationsService,
|
||||
networkService,
|
||||
bufferStub,
|
||||
maintenanceWindowsRepo,
|
||||
messageBuilder,
|
||||
heartbeatJob: helper.getHeartbeatJob(),
|
||||
setNextResponse,
|
||||
setNextResponseFull,
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
import { describe, expect, it, beforeEach } from "@jest/globals";
|
||||
import { createHeartbeatTestHarness, makeMonitor, type HeartbeatTestHarness } from "../helpers/heartbeatTestHarness.ts";
|
||||
|
||||
describe("Heartbeat job: down detection", () => {
|
||||
let h: HeartbeatTestHarness;
|
||||
|
||||
beforeEach(() => {
|
||||
h = createHeartbeatTestHarness();
|
||||
});
|
||||
|
||||
it("transitions monitor to down and creates incident after threshold failures", async () => {
|
||||
// Monitor starts with full passing window: [true, true, true, true, true]
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// 2 failures: window = [true, true, true, false, false] = 40% < 60%
|
||||
h.setNextResponse(false, 503);
|
||||
await h.heartbeatJob(monitor);
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
let storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("up");
|
||||
expect(h.incidentsRepo.getAll()).toHaveLength(0);
|
||||
|
||||
// 3rd failure: window = [true, true, false, false, false] = 60% >= 60%
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("down");
|
||||
|
||||
// Incident should have been created
|
||||
const incidents = h.incidentsRepo.getAll();
|
||||
expect(incidents).toHaveLength(1);
|
||||
expect(incidents[0].monitorId).toBe("mon-1");
|
||||
expect(incidents[0].status).toBe(true);
|
||||
expect(incidents[0].statusCode).toBe(503);
|
||||
|
||||
// Notification should have been triggered
|
||||
expect(h.notificationsService.handleNotifications).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does not create duplicate incidents on continued failures", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
h.setNextResponse(false, 500);
|
||||
|
||||
// 3 failures trigger down, 4 more continue failing
|
||||
for (let i = 0; i < 7; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
|
||||
// Only one incident despite 7 heartbeats
|
||||
expect(h.incidentsRepo.getAll()).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("stays up when failures are below threshold", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// 2 failures: window = [true, true, true, false, false] = 40% < 60%
|
||||
h.setNextResponse(false, 500);
|
||||
await h.heartbeatJob(monitor);
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
// 3 passes: window = [false, false, true, true, true] = 40% < 60%
|
||||
h.setNextResponse(true, 200);
|
||||
await h.heartbeatJob(monitor);
|
||||
await h.heartbeatJob(monitor);
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
const storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("up");
|
||||
expect(h.incidentsRepo.getAll()).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,129 @@
|
||||
import { describe, expect, it, beforeEach } from "@jest/globals";
|
||||
import { createHeartbeatTestHarness, makeMonitor, type HeartbeatTestHarness } from "../helpers/heartbeatTestHarness.ts";
|
||||
import type { MaintenanceWindow } from "../../src/types/maintenanceWindow.ts";
|
||||
|
||||
const makeMaintenanceWindow = (overrides?: Partial<MaintenanceWindow>): MaintenanceWindow => {
|
||||
const now = new Date();
|
||||
const start = new Date(now.getTime() - 60 * 60 * 1000); // 1 hour ago
|
||||
const end = new Date(now.getTime() + 60 * 60 * 1000); // 1 hour from now
|
||||
return {
|
||||
id: "mw-1",
|
||||
monitorId: "mon-1",
|
||||
teamId: "team-1",
|
||||
active: true,
|
||||
name: "Test Maintenance",
|
||||
duration: 2,
|
||||
durationUnit: "hours",
|
||||
repeat: 0,
|
||||
start: start.toISOString(),
|
||||
end: end.toISOString(),
|
||||
createdAt: new Date().toISOString(),
|
||||
updatedAt: new Date().toISOString(),
|
||||
...overrides,
|
||||
};
|
||||
};
|
||||
|
||||
describe("Heartbeat job: maintenance windows", () => {
|
||||
let h: HeartbeatTestHarness;
|
||||
|
||||
beforeEach(() => {
|
||||
h = createHeartbeatTestHarness();
|
||||
});
|
||||
|
||||
it("skips checks and sets monitor to maintenance when window is active", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// Configure an active maintenance window covering the current time
|
||||
h.maintenanceWindowsRepo.findByMonitorId.mockResolvedValue([makeMaintenanceWindow()]);
|
||||
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
// Monitor status should be set to maintenance
|
||||
const storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("maintenance");
|
||||
|
||||
// No network request should have been made
|
||||
expect(h.networkService.requestStatus).not.toHaveBeenCalled();
|
||||
|
||||
// No check should have been buffered
|
||||
expect(h.bufferStub.addToBuffer).not.toHaveBeenCalled();
|
||||
|
||||
// No incident should have been created
|
||||
expect(h.incidentsRepo.getAll()).toHaveLength(0);
|
||||
|
||||
// No notification should have been sent
|
||||
expect(h.notificationsService.handleNotifications).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does not update status if monitor is already in maintenance", async () => {
|
||||
const monitor = makeMonitor({ status: "maintenance" });
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
h.maintenanceWindowsRepo.findByMonitorId.mockResolvedValue([makeMaintenanceWindow()]);
|
||||
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
// findById reads the stored monitor; updateById should not have been called
|
||||
// since status was already "maintenance"
|
||||
const storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("maintenance");
|
||||
expect(h.networkService.requestStatus).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("resumes normal checks when maintenance window expires", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// First heartbeat: inside maintenance window
|
||||
h.maintenanceWindowsRepo.findByMonitorId.mockResolvedValue([makeMaintenanceWindow()]);
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
let storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("maintenance");
|
||||
|
||||
// Second heartbeat: maintenance window expired (no windows returned)
|
||||
h.maintenanceWindowsRepo.findByMonitorId.mockResolvedValue([]);
|
||||
h.setNextResponse(true, 200);
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
// Network request should have been made this time
|
||||
expect(h.networkService.requestStatus).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Check should have been buffered
|
||||
expect(h.bufferStub.addToBuffer).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("skips checks when window is inactive (active: false)", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// Inactive maintenance window — should be ignored
|
||||
h.maintenanceWindowsRepo.findByMonitorId.mockResolvedValue([makeMaintenanceWindow({ active: false })]);
|
||||
|
||||
h.setNextResponse(true, 200);
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
// Normal check should proceed since window is inactive
|
||||
expect(h.networkService.requestStatus).toHaveBeenCalledTimes(1);
|
||||
expect(h.bufferStub.addToBuffer).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("skips checks when window is in the past (expired)", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// Window ended an hour ago
|
||||
const pastEnd = new Date(Date.now() - 60 * 60 * 1000);
|
||||
const pastStart = new Date(pastEnd.getTime() - 2 * 60 * 60 * 1000);
|
||||
h.maintenanceWindowsRepo.findByMonitorId.mockResolvedValue([
|
||||
makeMaintenanceWindow({ start: pastStart.toISOString(), end: pastEnd.toISOString() }),
|
||||
]);
|
||||
|
||||
h.setNextResponse(true, 200);
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
// Window is in the past — normal check should proceed
|
||||
expect(h.networkService.requestStatus).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,61 @@
|
||||
import { describe, expect, it, beforeEach } from "@jest/globals";
|
||||
import { createHeartbeatTestHarness, makeMonitor, type HeartbeatTestHarness } from "../helpers/heartbeatTestHarness.ts";
|
||||
|
||||
describe("Heartbeat job: notification failure isolation", () => {
|
||||
let h: HeartbeatTestHarness;
|
||||
|
||||
beforeEach(() => {
|
||||
h = createHeartbeatTestHarness();
|
||||
});
|
||||
|
||||
it("creates incident even when notification dispatch throws", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// Make notifications throw
|
||||
h.notificationsService.handleNotifications.mockRejectedValue(new Error("SMTP connection refused"));
|
||||
|
||||
// Drive monitor down
|
||||
h.setNextResponse(false, 503);
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
|
||||
// Incident should still have been created
|
||||
const incidents = h.incidentsRepo.getAll();
|
||||
expect(incidents).toHaveLength(1);
|
||||
expect(incidents[0].status).toBe(true);
|
||||
expect(incidents[0].statusCode).toBe(503);
|
||||
|
||||
// Monitor should still be marked down
|
||||
const storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("down");
|
||||
});
|
||||
|
||||
it("auto-resolves incident even when recovery notification throws", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// Drive monitor down (notifications succeed here)
|
||||
h.setNextResponse(false, 503);
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
expect(h.incidentsRepo.getAll()[0].status).toBe(true);
|
||||
|
||||
// Now make notifications throw for the recovery
|
||||
h.notificationsService.handleNotifications.mockRejectedValue(new Error("Slack API timeout"));
|
||||
|
||||
// Recover
|
||||
h.setNextResponse(true, 200);
|
||||
for (let i = 0; i < 5; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
|
||||
// Incident should still be resolved despite notification failure
|
||||
const incidents = h.incidentsRepo.getAll();
|
||||
expect(incidents).toHaveLength(1);
|
||||
expect(incidents[0].status).toBe(false);
|
||||
expect(incidents[0].resolutionType).toBe("automatic");
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,77 @@
|
||||
import { describe, expect, it, beforeEach } from "@jest/globals";
|
||||
import { createHeartbeatTestHarness, makeMonitor, type HeartbeatTestHarness } from "../helpers/heartbeatTestHarness.ts";
|
||||
|
||||
describe("Heartbeat job: recovery", () => {
|
||||
let h: HeartbeatTestHarness;
|
||||
|
||||
beforeEach(() => {
|
||||
h = createHeartbeatTestHarness();
|
||||
});
|
||||
|
||||
it("recovers monitor and auto-resolves incident when checks pass", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// 3 failures to trigger down (60% threshold)
|
||||
h.setNextResponse(false, 503);
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
expect(h.incidentsRepo.getAll()).toHaveLength(1);
|
||||
expect(h.incidentsRepo.getAll()[0].status).toBe(true);
|
||||
|
||||
// Send passing checks to push failure rate below threshold
|
||||
// Window after 3 fails: [false, false, false, false, false] (prior trues shifted out)
|
||||
// Need enough passes to drop below 60%
|
||||
h.setNextResponse(true, 200);
|
||||
for (let i = 0; i < 5; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
|
||||
// Monitor should be back up
|
||||
const storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("up");
|
||||
|
||||
// Incident should be auto-resolved
|
||||
const incidents = h.incidentsRepo.getAll();
|
||||
expect(incidents).toHaveLength(1);
|
||||
expect(incidents[0].status).toBe(false);
|
||||
expect(incidents[0].resolutionType).toBe("automatic");
|
||||
expect(incidents[0].endTime).not.toBeNull();
|
||||
});
|
||||
|
||||
it("creates a new incident after recovery if monitor goes down again", async () => {
|
||||
const monitor = makeMonitor();
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// First outage: 3 failures to trigger (window shifts from all-true)
|
||||
h.setNextResponse(false, 503);
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
|
||||
// Recovery: enough passes to drop below threshold
|
||||
h.setNextResponse(true, 200);
|
||||
for (let i = 0; i < 5; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
|
||||
// Second outage: 3 more failures to trigger again
|
||||
h.setNextResponse(false, 502);
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
|
||||
const incidents = h.incidentsRepo.getAll();
|
||||
expect(incidents).toHaveLength(2);
|
||||
|
||||
// First incident resolved
|
||||
expect(incidents[0].status).toBe(false);
|
||||
expect(incidents[0].resolutionType).toBe("automatic");
|
||||
expect(incidents[0].statusCode).toBe(503);
|
||||
|
||||
// Second incident active
|
||||
expect(incidents[1].status).toBe(true);
|
||||
expect(incidents[1].statusCode).toBe(502);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,153 @@
|
||||
import { describe, expect, it, beforeEach } from "@jest/globals";
|
||||
import { createHeartbeatTestHarness, makeMonitor, type HeartbeatTestHarness } from "../helpers/heartbeatTestHarness.ts";
|
||||
import type { MonitorStatusResponse } from "../../src/types/network.ts";
|
||||
import type { HardwareStatusPayload } from "../../src/types/network.ts";
|
||||
|
||||
const makeHardwareResponse = (cpuUsage: number): MonitorStatusResponse<HardwareStatusPayload> => ({
|
||||
monitorId: "mon-1",
|
||||
teamId: "team-1",
|
||||
type: "hardware",
|
||||
status: true,
|
||||
code: 200,
|
||||
message: "OK",
|
||||
responseTime: 50,
|
||||
payload: {
|
||||
data: {
|
||||
cpu: { usage_percent: cpuUsage, temperature: [] },
|
||||
memory: { usage_percent: 0.3 },
|
||||
disk: [{ usage_percent: 0.2 }],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
describe("Heartbeat job: hardware threshold breach", () => {
|
||||
let h: HeartbeatTestHarness;
|
||||
|
||||
beforeEach(() => {
|
||||
h = createHeartbeatTestHarness();
|
||||
});
|
||||
|
||||
it("transitions to breached and creates incident after alert counter hits zero", async () => {
|
||||
// Hardware monitor with CPU threshold at 80% (stored as 80, compared as 80/100 = 0.8)
|
||||
// Alert counters start at 5, decrement each breach, trigger at 0
|
||||
const monitor = makeMonitor({
|
||||
type: "hardware",
|
||||
status: "up",
|
||||
cpuAlertThreshold: 80,
|
||||
cpuAlertCounter: 5,
|
||||
memoryAlertThreshold: 100,
|
||||
memoryAlertCounter: 5,
|
||||
diskAlertThreshold: 100,
|
||||
diskAlertCounter: 5,
|
||||
tempAlertThreshold: 100,
|
||||
tempAlertCounter: 5,
|
||||
selectedDisks: [],
|
||||
});
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// CPU at 90% (0.9) exceeds threshold of 80% (0.8)
|
||||
h.setNextResponseFull(makeHardwareResponse(0.9));
|
||||
|
||||
// Configure message builder for when incident is created
|
||||
h.messageBuilder.extractThresholdBreaches.mockReturnValue([{ metric: "cpu", formattedValue: "90%", threshold: 80, unit: "%" }]);
|
||||
|
||||
// 4 heartbeats: counter goes 5→4→3→2→1, status still "up"
|
||||
for (let i = 0; i < 4; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
|
||||
let storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("up");
|
||||
expect(storedMonitor.cpuAlertCounter).toBe(1);
|
||||
expect(h.incidentsRepo.getAll()).toHaveLength(0);
|
||||
|
||||
// 5th heartbeat: counter hits 0, status transitions to "breached"
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("breached");
|
||||
expect(storedMonitor.cpuAlertCounter).toBe(0);
|
||||
|
||||
// Incident created with statusCode 9999 and threshold message
|
||||
const incidents = h.incidentsRepo.getAll();
|
||||
expect(incidents).toHaveLength(1);
|
||||
expect(incidents[0].statusCode).toBe(9999);
|
||||
expect(incidents[0].message).toBe("CPU: 90% (threshold: 80%)");
|
||||
expect(incidents[0].status).toBe(true);
|
||||
|
||||
// Notification should have been triggered
|
||||
expect(h.notificationsService.handleNotifications).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("recovers from breached to up when metrics return to normal", async () => {
|
||||
// Start with a monitor already in breached state with counter at 0
|
||||
const monitor = makeMonitor({
|
||||
type: "hardware",
|
||||
status: "breached",
|
||||
cpuAlertThreshold: 80,
|
||||
cpuAlertCounter: 0,
|
||||
memoryAlertThreshold: 100,
|
||||
memoryAlertCounter: 5,
|
||||
diskAlertThreshold: 100,
|
||||
diskAlertCounter: 5,
|
||||
tempAlertThreshold: 100,
|
||||
tempAlertCounter: 5,
|
||||
selectedDisks: [],
|
||||
});
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
// Also seed an active incident for this monitor
|
||||
await h.incidentsRepo.create({
|
||||
monitorId: "mon-1",
|
||||
teamId: "team-1",
|
||||
status: true,
|
||||
statusCode: 9999,
|
||||
message: "CPU: 90% (threshold: 80%)",
|
||||
});
|
||||
|
||||
// CPU at 50% (0.5) — below threshold of 80% (0.8)
|
||||
h.setNextResponseFull(makeHardwareResponse(0.5));
|
||||
|
||||
await h.heartbeatJob(monitor);
|
||||
|
||||
// Monitor should recover to "up"
|
||||
const storedMonitor = await h.monitorsRepo.findById("mon-1", "team-1");
|
||||
expect(storedMonitor.status).toBe("up");
|
||||
// Counter resets to 5 when not breaching
|
||||
expect(storedMonitor.cpuAlertCounter).toBe(5);
|
||||
|
||||
// Incident should be auto-resolved
|
||||
const incidents = h.incidentsRepo.getAll();
|
||||
expect(incidents).toHaveLength(1);
|
||||
expect(incidents[0].status).toBe(false);
|
||||
expect(incidents[0].resolutionType).toBe("automatic");
|
||||
});
|
||||
|
||||
it("does not create duplicate incidents while remaining breached", async () => {
|
||||
const monitor = makeMonitor({
|
||||
type: "hardware",
|
||||
status: "up",
|
||||
cpuAlertThreshold: 80,
|
||||
cpuAlertCounter: 5,
|
||||
memoryAlertThreshold: 100,
|
||||
memoryAlertCounter: 5,
|
||||
diskAlertThreshold: 100,
|
||||
diskAlertCounter: 5,
|
||||
tempAlertThreshold: 100,
|
||||
tempAlertCounter: 5,
|
||||
selectedDisks: [],
|
||||
});
|
||||
h.monitorsRepo.seed(monitor);
|
||||
|
||||
h.setNextResponseFull(makeHardwareResponse(0.9));
|
||||
h.messageBuilder.extractThresholdBreaches.mockReturnValue([{ metric: "cpu", formattedValue: "90%", threshold: 80, unit: "%" }]);
|
||||
|
||||
// 5 heartbeats to trigger breach, then 5 more while still breaching
|
||||
for (let i = 0; i < 10; i++) {
|
||||
await h.heartbeatJob(monitor);
|
||||
}
|
||||
|
||||
// Only one incident despite 10 heartbeats
|
||||
expect(h.incidentsRepo.getAll()).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,216 @@
|
||||
import { describe, expect, it, jest, beforeEach } from "@jest/globals";
|
||||
import { IncidentService } from "../../src/service/business/incidentService.ts";
|
||||
import { InMemoryIncidentsRepository } from "../helpers/InMemoryIncidentsRepository.ts";
|
||||
import { createMockLogger } from "../helpers/createMockLogger.ts";
|
||||
import type { IMonitorsRepository, IUsersRepository } from "../../src/repositories/index.ts";
|
||||
import type { INotificationMessageBuilder } from "../../src/service/infrastructure/notificationMessageBuilder.ts";
|
||||
import type { Monitor } from "../../src/types/monitor.ts";
|
||||
import type { MonitorActionDecision } from "../../src/service/infrastructure/SuperSimpleQueue/SuperSimpleQueueHelper.ts";
|
||||
|
||||
// ── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
const makeMonitor = (overrides?: Partial<Monitor>): Monitor =>
|
||||
({
|
||||
id: "mon-1",
|
||||
teamId: "team-1",
|
||||
name: "Test Monitor",
|
||||
type: "http",
|
||||
url: "https://example.com",
|
||||
status: "down",
|
||||
...overrides,
|
||||
}) as Monitor;
|
||||
|
||||
const makeDecision = (overrides?: Partial<MonitorActionDecision>): MonitorActionDecision => ({
|
||||
shouldCreateIncident: false,
|
||||
shouldResolveIncident: false,
|
||||
shouldSendNotification: false,
|
||||
incidentReason: null,
|
||||
notificationReason: null,
|
||||
...overrides,
|
||||
});
|
||||
|
||||
// ── Test suite ───────────────────────────────────────────────────────────────
|
||||
|
||||
describe("Incident lifecycle (integration)", () => {
|
||||
let repo: InMemoryIncidentsRepository;
|
||||
let service: IncidentService;
|
||||
let monitorsRepo: jest.Mocked<IMonitorsRepository>;
|
||||
let usersRepo: jest.Mocked<IUsersRepository>;
|
||||
let messageBuilder: jest.Mocked<INotificationMessageBuilder>;
|
||||
|
||||
beforeEach(() => {
|
||||
repo = new InMemoryIncidentsRepository();
|
||||
monitorsRepo = { findById: jest.fn() } as unknown as jest.Mocked<IMonitorsRepository>;
|
||||
usersRepo = { findById: jest.fn() } as unknown as jest.Mocked<IUsersRepository>;
|
||||
messageBuilder = { extractThresholdBreaches: jest.fn() } as unknown as jest.Mocked<INotificationMessageBuilder>;
|
||||
service = new IncidentService(createMockLogger() as any, repo, monitorsRepo, usersRepo, messageBuilder);
|
||||
});
|
||||
|
||||
// ── Creation ─────────────────────────────────────────────────────────────
|
||||
|
||||
it("creates an incident when monitor goes down", async () => {
|
||||
const monitor = makeMonitor({ status: "down" });
|
||||
const decision = makeDecision({ shouldCreateIncident: true, incidentReason: "status_down" });
|
||||
|
||||
const incident = await service.handleIncident(monitor, 503, decision);
|
||||
|
||||
expect(incident).not.toBeNull();
|
||||
expect(incident!.monitorId).toBe("mon-1");
|
||||
expect(incident!.teamId).toBe("team-1");
|
||||
expect(incident!.status).toBe(true);
|
||||
expect(incident!.statusCode).toBe(503);
|
||||
expect(incident!.endTime).toBeNull();
|
||||
expect(incident!.resolutionType).toBeNull();
|
||||
|
||||
const stored = repo.getAll();
|
||||
expect(stored).toHaveLength(1);
|
||||
expect(stored[0].id).toBe(incident!.id);
|
||||
});
|
||||
|
||||
// ── Idempotency ──────────────────────────────────────────────────────────
|
||||
|
||||
it("does not create a duplicate incident for the same monitor", async () => {
|
||||
const monitor = makeMonitor({ status: "down" });
|
||||
const decision = makeDecision({ shouldCreateIncident: true, incidentReason: "status_down" });
|
||||
|
||||
const first = await service.handleIncident(monitor, 503, decision);
|
||||
const second = await service.handleIncident(monitor, 503, decision);
|
||||
|
||||
expect(second!.id).toBe(first!.id);
|
||||
expect(repo.getAll()).toHaveLength(1);
|
||||
});
|
||||
|
||||
// ── Auto-resolve ─────────────────────────────────────────────────────────
|
||||
|
||||
it("auto-resolves an incident when monitor recovers", async () => {
|
||||
const monitor = makeMonitor({ status: "down" });
|
||||
const createDecision = makeDecision({ shouldCreateIncident: true, incidentReason: "status_down" });
|
||||
const created = await service.handleIncident(monitor, 503, createDecision);
|
||||
|
||||
const resolveDecision = makeDecision({ shouldResolveIncident: true });
|
||||
const resolved = await service.handleIncident(monitor, 200, resolveDecision);
|
||||
|
||||
expect(resolved).not.toBeNull();
|
||||
expect(resolved!.id).toBe(created!.id);
|
||||
expect(resolved!.status).toBe(false);
|
||||
expect(resolved!.endTime).toBeDefined();
|
||||
expect(resolved!.resolutionType).toBe("automatic");
|
||||
});
|
||||
|
||||
// ── Resolve with nothing active ──────────────────────────────────────────
|
||||
|
||||
it("returns null when resolving with no active incident", async () => {
|
||||
const monitor = makeMonitor({ status: "up" });
|
||||
const decision = makeDecision({ shouldResolveIncident: true });
|
||||
|
||||
const result = await service.handleIncident(monitor, 200, decision);
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
// ── Manual resolution ────────────────────────────────────────────────────
|
||||
|
||||
it("manually resolves an active incident with comment", async () => {
|
||||
const monitor = makeMonitor({ status: "down" });
|
||||
const decision = makeDecision({ shouldCreateIncident: true, incidentReason: "status_down" });
|
||||
const created = await service.handleIncident(monitor, 500, decision);
|
||||
|
||||
const resolved = await service.resolveIncident(created!.id, "user-1", "team-1", "Root cause identified", "user@test.com");
|
||||
|
||||
expect(resolved.status).toBe(false);
|
||||
expect(resolved.resolutionType).toBe("manual");
|
||||
expect(resolved.resolvedBy).toBe("user-1");
|
||||
expect(resolved.resolvedByEmail).toBe("user@test.com");
|
||||
expect(resolved.comment).toBe("Root cause identified");
|
||||
expect(resolved.endTime).toBeDefined();
|
||||
});
|
||||
|
||||
it("throws when manually resolving an already-resolved incident", async () => {
|
||||
const monitor = makeMonitor({ status: "down" });
|
||||
const decision = makeDecision({ shouldCreateIncident: true, incidentReason: "status_down" });
|
||||
const created = await service.handleIncident(monitor, 500, decision);
|
||||
|
||||
await service.resolveIncident(created!.id, "user-1", "team-1");
|
||||
|
||||
await expect(service.resolveIncident(created!.id, "user-1", "team-1")).rejects.toThrow("Incident not found");
|
||||
});
|
||||
|
||||
// ── Threshold breach ─────────────────────────────────────────────────────
|
||||
|
||||
it("creates a threshold breach incident with statusCode 9999 and descriptive message", async () => {
|
||||
const monitor = makeMonitor({ status: "breached", type: "hardware" });
|
||||
(messageBuilder.extractThresholdBreaches as jest.Mock).mockReturnValue([
|
||||
{ metric: "cpu", formattedValue: "92%", threshold: 80, unit: "%" },
|
||||
{ metric: "memory", formattedValue: "88%", threshold: 85, unit: "%" },
|
||||
]);
|
||||
const decision = makeDecision({ shouldCreateIncident: true, incidentReason: "threshold_breach" });
|
||||
|
||||
const incident = await service.handleIncident(monitor, 200, decision, { monitorId: "mon-1" } as any);
|
||||
|
||||
expect(incident!.statusCode).toBe(9999);
|
||||
expect(incident!.message).toBe("CPU: 92% (threshold: 80%), MEMORY: 88% (threshold: 85%)");
|
||||
});
|
||||
|
||||
// ── No action ────────────────────────────────────────────────────────────
|
||||
|
||||
it("returns null when no action is needed", async () => {
|
||||
const monitor = makeMonitor({ status: "up" });
|
||||
const decision = makeDecision();
|
||||
|
||||
const result = await service.handleIncident(monitor, 200, decision);
|
||||
|
||||
expect(result).toBeNull();
|
||||
expect(repo.getAll()).toHaveLength(0);
|
||||
});
|
||||
|
||||
// ── Full lifecycle ───────────────────────────────────────────────────────
|
||||
|
||||
it("handles a complete lifecycle: create -> manual resolve -> new incident -> auto-resolve", async () => {
|
||||
const monitor = makeMonitor({ status: "down" });
|
||||
|
||||
// First outage
|
||||
const first = await service.handleIncident(monitor, 503, makeDecision({ shouldCreateIncident: true, incidentReason: "status_down" }));
|
||||
expect(first!.status).toBe(true);
|
||||
|
||||
// Manually resolved
|
||||
await service.resolveIncident(first!.id, "user-1", "team-1", "Restarted server");
|
||||
|
||||
// Second outage — new incident since previous was resolved
|
||||
const second = await service.handleIncident(monitor, 502, makeDecision({ shouldCreateIncident: true, incidentReason: "status_down" }));
|
||||
expect(second!.id).not.toBe(first!.id);
|
||||
expect(second!.statusCode).toBe(502);
|
||||
|
||||
// Auto-resolve
|
||||
const resolved = await service.handleIncident(monitor, 200, makeDecision({ shouldResolveIncident: true }));
|
||||
expect(resolved!.id).toBe(second!.id);
|
||||
expect(resolved!.status).toBe(false);
|
||||
expect(resolved!.resolutionType).toBe("automatic");
|
||||
|
||||
// Both incidents stored, both resolved
|
||||
const all = repo.getAll();
|
||||
expect(all).toHaveLength(2);
|
||||
expect(all.every((i) => i.status === false)).toBe(true);
|
||||
});
|
||||
|
||||
// ── Cross-monitor isolation ──────────────────────────────────────────────
|
||||
|
||||
it("incidents for different monitors do not interfere", async () => {
|
||||
const monitorA = makeMonitor({ id: "mon-a", status: "down" });
|
||||
const monitorB = makeMonitor({ id: "mon-b", status: "down" });
|
||||
const decision = makeDecision({ shouldCreateIncident: true, incidentReason: "status_down" });
|
||||
|
||||
const incidentA = await service.handleIncident(monitorA, 500, decision);
|
||||
const incidentB = await service.handleIncident(monitorB, 502, decision);
|
||||
|
||||
expect(incidentA!.id).not.toBe(incidentB!.id);
|
||||
expect(repo.getAll()).toHaveLength(2);
|
||||
|
||||
// Resolving monitor A does not affect monitor B
|
||||
const resolved = await service.handleIncident(monitorA, 200, makeDecision({ shouldResolveIncident: true }));
|
||||
expect(resolved!.id).toBe(incidentA!.id);
|
||||
|
||||
const bStillActive = await repo.findActiveByMonitorId("mon-b", "team-1");
|
||||
expect(bStillActive).not.toBeNull();
|
||||
expect(bStillActive!.status).toBe(true);
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user