Compare commits

..

16 Commits

Author SHA1 Message Date
Matti Nannt
d49517be91 fix(ci): backport release tag validation fix to release/4.0 (#6609) 2025-09-26 09:41:06 +02:00
Victor Hugo dos Santos
7aedb73378 chore: backport jwt and script fix (#6607)
Co-authored-by: Anshuman Pandey <54475686+pandeymangg@users.noreply.github.com>
2025-09-25 11:53:45 -03:00
Dhruwang Jariwala
4112722a88 fix: replace button with div in IdBadge to prevent hydration issues (backport) (#6602) 2025-09-25 10:42:29 -03:00
Piyush Gupta
0eddeb46c1 fix: logicfallback cleanup backport (#6603) 2025-09-25 18:49:55 +05:30
Piyush Gupta
774f45b109 chore: reverted translations 2025-09-25 18:21:51 +05:30
Piyush Gupta
3c65c002bb chore: reverted translations 2025-09-25 18:19:28 +05:30
Piyush Gupta
65539e85df fix: logicFallback cleanup backport 2025-09-25 17:42:55 +05:30
Anshuman Pandey
91dab12a81 fix: backports migration script changes (#6583) 2025-09-22 15:30:36 +02:00
Matti Nannt
1c5244e030 fix: s3 storage configured flag fails on minimum setup (#6573) 2025-09-22 09:11:58 +02:00
Dhruwang Jariwala
8b3c0f1547 fix: Backport/follow ups toast to 4.0 (#6569) 2025-09-19 15:04:24 +02:00
Dhruwang Jariwala
07370ac765 fix: (backport) synced translations (#6567) 2025-09-19 12:26:48 +02:00
Anshuman Pandey
0f699405bb fix: backports the formbricks.sh script and adds the migration script (#6564) 2025-09-19 12:25:03 +02:00
Anshuman Pandey
422f05b386 feat: support IAM role authentication for S3 storage (#6560) 2025-09-19 15:32:30 +05:30
Dhruwang Jariwala
bdfbc4b0f6 fix: Backport/critical fixes to 4.0 (#6563)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-09-19 11:02:54 +02:00
Matti Nannt
b1828a2f27 feat: support IAM role authentication for S3 storage
- Update IS_STORAGE_CONFIGURED to only require S3_REGION and S3_BUCKET_NAME
- Make S3_ACCESS_KEY and S3_SECRET_KEY optional in S3 client creation
- Allow AWS SDK to use IAM roles, instance profiles, and credential chains
- Maintain backward compatibility with explicit credentials and MinIO
- Update tests to reflect new IAM role authentication behavior

BREAKING CHANGE: IS_STORAGE_CONFIGURED now returns true with only region and bucket configured
2025-09-18 12:24:58 +02:00
Matti Nannt
3ba6dd9ada chore(backport): updated release workflow (#6557) 2025-09-17 11:53:15 +02:00
96 changed files with 2206 additions and 2764 deletions

View File

@@ -33,7 +33,7 @@ jobs:
timeout-minutes: 60
services:
postgres:
image: pgvector/pgvector@sha256:9ae02a756ba16a2d69dd78058e25915e36e189bb36ddf01ceae86390d7ed786a
image: pgvector/pgvector:pg17
env:
POSTGRES_DB: postgres
POSTGRES_USER: postgres
@@ -166,12 +166,6 @@ jobs:
cd apps/web && pnpm vitest run modules/core/rate-limit/rate-limit-load.test.ts
shell: bash
- name: Run Cache Integration Tests
run: |
echo "Running cache integration tests with Redis/Valkey..."
cd packages/cache && pnpm vitest run src/cache-integration.test.ts
shell: bash
- name: Check for Enterprise License
run: |
LICENSE_KEY=$(grep '^ENTERPRISE_LICENSE_KEY=' .env | cut -d'=' -f2-)
@@ -245,4 +239,4 @@ jobs:
- name: Output App Logs
if: failure()
run: cat app.log
run: cat app.log

View File

@@ -4,7 +4,7 @@ on:
workflow_call:
inputs:
release_tag:
description: "The release tag name (e.g., v1.2.3)"
description: "The release tag name (e.g., 1.2.3)"
required: true
type: string
commit_sha:
@@ -53,8 +53,8 @@ jobs:
set -euo pipefail
# Validate release tag format
if [[ ! "$RELEASE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
echo "❌ Error: Invalid release tag format. Expected format: v1.2.3, v1.2.3-alpha"
if [[ ! "$RELEASE_TAG" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
echo "❌ Error: Invalid release tag format. Expected format: 1.2.3, 1.2.3-alpha"
echo "Provided: $RELEASE_TAG"
exit 1
fi

View File

@@ -1 +0,0 @@
export { GET } from "@/modules/api/v2/health/route";

View File

@@ -1,9 +1,9 @@
import { AuthenticationMethod } from "@/app/middleware/endpoint-validator";
import * as Sentry from "@sentry/nextjs";
import { NextRequest } from "next/server";
import { Mock, beforeEach, describe, expect, test, vi } from "vitest";
import { logger } from "@formbricks/logger";
import { TAuthenticationApiKey } from "@formbricks/types/auth";
import { AuthenticationMethod } from "@/app/middleware/endpoint-validator";
import { responses } from "./response";
// Mocks
@@ -14,10 +14,6 @@ vi.mock("@/modules/ee/audit-logs/lib/handler", () => ({
vi.mock("@sentry/nextjs", () => ({
captureException: vi.fn(),
withScope: vi.fn((callback) => {
callback(mockSentryScope);
return mockSentryScope;
}),
}));
// Define these outside the mock factory so they can be referenced in tests and reset by clearAllMocks.
@@ -25,14 +21,6 @@ const mockContextualLoggerError = vi.fn();
const mockContextualLoggerWarn = vi.fn();
const mockContextualLoggerInfo = vi.fn();
// Mock Sentry scope that can be referenced in tests
const mockSentryScope = {
setTag: vi.fn(),
setExtra: vi.fn(),
setContext: vi.fn(),
setLevel: vi.fn(),
};
vi.mock("@formbricks/logger", () => {
const mockWithContextInstance = vi.fn(() => ({
error: mockContextualLoggerError,
@@ -122,12 +110,6 @@ describe("withV1ApiWrapper", () => {
}));
vi.clearAllMocks();
// Reset mock Sentry scope calls
mockSentryScope.setTag.mockClear();
mockSentryScope.setExtra.mockClear();
mockSentryScope.setContext.mockClear();
mockSentryScope.setLevel.mockClear();
});
test("logs and audits on error response with API key authentication", async () => {
@@ -179,9 +161,10 @@ describe("withV1ApiWrapper", () => {
organizationId: "org-1",
})
);
expect(Sentry.withScope).toHaveBeenCalled();
expect(mockSentryScope.setExtra).toHaveBeenCalledWith("originalError", undefined);
expect(Sentry.captureException).toHaveBeenCalledWith(expect.any(Error));
expect(Sentry.captureException).toHaveBeenCalledWith(
expect.any(Error),
expect.objectContaining({ extra: expect.objectContaining({ correlationId: "abc-123" }) })
);
});
test("does not log Sentry if not 500", async () => {
@@ -286,8 +269,10 @@ describe("withV1ApiWrapper", () => {
organizationId: "org-1",
})
);
expect(Sentry.withScope).toHaveBeenCalled();
expect(Sentry.captureException).toHaveBeenCalledWith(expect.any(Error));
expect(Sentry.captureException).toHaveBeenCalledWith(
expect.any(Error),
expect.objectContaining({ extra: expect.objectContaining({ correlationId: "err-1" }) })
);
});
test("does not log on success response but still audits", async () => {

View File

@@ -1,8 +1,3 @@
import * as Sentry from "@sentry/nextjs";
import { Session, getServerSession } from "next-auth";
import { NextRequest } from "next/server";
import { logger } from "@formbricks/logger";
import { TAuthenticationApiKey } from "@formbricks/types/auth";
import { authenticateRequest } from "@/app/api/v1/auth";
import { responses } from "@/app/lib/api/response";
import {
@@ -19,6 +14,11 @@ import { rateLimitConfigs } from "@/modules/core/rate-limit/rate-limit-configs";
import { TRateLimitConfig } from "@/modules/core/rate-limit/types/rate-limit";
import { queueAuditEvent } from "@/modules/ee/audit-logs/lib/handler";
import { TAuditAction, TAuditTarget, UNKNOWN_DATA } from "@/modules/ee/audit-logs/types/audit-log";
import * as Sentry from "@sentry/nextjs";
import { Session, getServerSession } from "next-auth";
import { NextRequest } from "next/server";
import { logger } from "@formbricks/logger";
import { TAuthenticationApiKey } from "@formbricks/types/auth";
export type TApiAuditLog = Parameters<typeof queueAuditEvent>[0];
export type TApiV1Authentication = TAuthenticationApiKey | Session | null;
@@ -173,21 +173,8 @@ const logErrorDetails = (res: Response, req: NextRequest, correlationId: string,
logger.withContext(logContext).error("V1 API Error Details");
if (SENTRY_DSN && IS_PRODUCTION && res.status >= 500) {
// Set correlation ID as a tag for easy filtering
Sentry.withScope((scope) => {
scope.setTag("correlationId", correlationId);
scope.setLevel("error");
// If we have an actual error, capture it with full stacktrace
// Otherwise, create a generic error with context
if (error instanceof Error) {
Sentry.captureException(error);
} else {
scope.setExtra("originalError", error);
const genericError = new Error(`API V1 error, id: ${correlationId}`);
Sentry.captureException(genericError);
}
});
const err = new Error(`API V1 error, id: ${correlationId}`);
Sentry.captureException(err, { extra: { error, correlationId } });
}
};

View File

@@ -114,7 +114,10 @@ export const MAX_FILE_UPLOAD_SIZES = {
standard: 1024 * 1024 * 10, // 10MB
big: 1024 * 1024 * 1024, // 1GB
} as const;
export const IS_STORAGE_CONFIGURED = Boolean(S3_ACCESS_KEY && S3_SECRET_KEY && S3_REGION && S3_BUCKET_NAME);
// Storage is considered configured if we have the minimum required settings:
// - S3_REGION and S3_BUCKET_NAME are always required
// - S3_ACCESS_KEY and S3_SECRET_KEY are optional (for IAM role-based authentication)
export const IS_STORAGE_CONFIGURED = Boolean(S3_BUCKET_NAME);
// Colors for Survey Bg
export const SURVEY_BG_COLORS = [

View File

@@ -1,6 +1,7 @@
import { env } from "@/lib/env";
import jwt from "jsonwebtoken";
import { beforeEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import * as crypto from "@/lib/crypto";
import {
createEmailChangeToken,
createEmailToken,
@@ -14,12 +15,69 @@ import {
verifyTokenForLinkSurvey,
} from "./jwt";
const TEST_ENCRYPTION_KEY = "0".repeat(32); // 32-byte key for AES-256-GCM
const TEST_NEXTAUTH_SECRET = "test-nextauth-secret";
const DIFFERENT_SECRET = "different-secret";
// Error message constants
const NEXTAUTH_SECRET_ERROR = "NEXTAUTH_SECRET is not set";
const ENCRYPTION_KEY_ERROR = "ENCRYPTION_KEY is not set";
// Helper function to test error cases for missing secrets/keys
const testMissingSecretsError = async (
testFn: (...args: any[]) => any,
args: any[],
options: {
testNextAuthSecret?: boolean;
testEncryptionKey?: boolean;
isAsync?: boolean;
} = {}
) => {
const { testNextAuthSecret = true, testEncryptionKey = true, isAsync = false } = options;
if (testNextAuthSecret) {
const constants = await import("@/lib/constants");
const originalSecret = (constants as any).NEXTAUTH_SECRET;
(constants as any).NEXTAUTH_SECRET = undefined;
if (isAsync) {
await expect(testFn(...args)).rejects.toThrow(NEXTAUTH_SECRET_ERROR);
} else {
expect(() => testFn(...args)).toThrow(NEXTAUTH_SECRET_ERROR);
}
// Restore
(constants as any).NEXTAUTH_SECRET = originalSecret;
}
if (testEncryptionKey) {
const constants = await import("@/lib/constants");
const originalKey = (constants as any).ENCRYPTION_KEY;
(constants as any).ENCRYPTION_KEY = undefined;
if (isAsync) {
await expect(testFn(...args)).rejects.toThrow(ENCRYPTION_KEY_ERROR);
} else {
expect(() => testFn(...args)).toThrow(ENCRYPTION_KEY_ERROR);
}
// Restore
(constants as any).ENCRYPTION_KEY = originalKey;
}
};
// Mock environment variables
vi.mock("@/lib/env", () => ({
env: {
ENCRYPTION_KEY: "0".repeat(32), // 32-byte key for AES-256-GCM
ENCRYPTION_KEY: "0".repeat(32),
NEXTAUTH_SECRET: "test-nextauth-secret",
} as typeof env,
},
}));
// Mock constants
vi.mock("@/lib/constants", () => ({
NEXTAUTH_SECRET: "test-nextauth-secret",
ENCRYPTION_KEY: "0".repeat(32),
}));
// Mock prisma
@@ -31,22 +89,65 @@ vi.mock("@formbricks/database", () => ({
},
}));
describe("JWT Functions", () => {
// Mock logger
vi.mock("@formbricks/logger", () => ({
logger: {
error: vi.fn(),
warn: vi.fn(),
info: vi.fn(),
},
}));
describe("JWT Functions - Comprehensive Security Tests", () => {
const mockUser = {
id: "test-user-id",
email: "test@example.com",
};
let mockSymmetricEncrypt: any;
let mockSymmetricDecrypt: any;
beforeEach(() => {
vi.clearAllMocks();
// Setup default crypto mocks
mockSymmetricEncrypt = vi
.spyOn(crypto, "symmetricEncrypt")
.mockImplementation((text: string) => `encrypted_${text}`);
mockSymmetricDecrypt = vi
.spyOn(crypto, "symmetricDecrypt")
.mockImplementation((encryptedText: string) => encryptedText.replace("encrypted_", ""));
(prisma.user.findUnique as any).mockResolvedValue(mockUser);
});
describe("createToken", () => {
test("should create a valid token", () => {
const token = createToken(mockUser.id, mockUser.email);
test("should create a valid token with encrypted user ID", () => {
const token = createToken(mockUser.id);
expect(token).toBeDefined();
expect(typeof token).toBe("string");
expect(mockSymmetricEncrypt).toHaveBeenCalledWith(mockUser.id, TEST_ENCRYPTION_KEY);
});
test("should accept custom options", () => {
const customOptions = { expiresIn: "1h" };
const token = createToken(mockUser.id, customOptions);
expect(token).toBeDefined();
// Verify the token contains the expected expiration
const decoded = jwt.decode(token) as any;
expect(decoded.exp).toBeDefined();
expect(decoded.iat).toBeDefined();
// Should expire in approximately 1 hour (3600 seconds)
expect(decoded.exp - decoded.iat).toBe(3600);
});
test("should throw error if NEXTAUTH_SECRET is not set", async () => {
await testMissingSecretsError(createToken, [mockUser.id], {
testNextAuthSecret: true,
testEncryptionKey: false,
});
});
});
@@ -56,6 +157,18 @@ describe("JWT Functions", () => {
const token = createTokenForLinkSurvey(surveyId, mockUser.email);
expect(token).toBeDefined();
expect(typeof token).toBe("string");
expect(mockSymmetricEncrypt).toHaveBeenCalledWith(mockUser.email, TEST_ENCRYPTION_KEY);
});
test("should include surveyId in payload", () => {
const surveyId = "test-survey-id";
const token = createTokenForLinkSurvey(surveyId, mockUser.email);
const decoded = jwt.decode(token) as any;
expect(decoded.surveyId).toBe(surveyId);
});
test("should throw error if NEXTAUTH_SECRET or ENCRYPTION_KEY is not set", async () => {
await testMissingSecretsError(createTokenForLinkSurvey, ["survey-id", mockUser.email]);
});
});
@@ -64,24 +177,30 @@ describe("JWT Functions", () => {
const token = createEmailToken(mockUser.email);
expect(token).toBeDefined();
expect(typeof token).toBe("string");
expect(mockSymmetricEncrypt).toHaveBeenCalledWith(mockUser.email, TEST_ENCRYPTION_KEY);
});
test("should throw error if NEXTAUTH_SECRET is not set", () => {
const originalSecret = env.NEXTAUTH_SECRET;
try {
(env as any).NEXTAUTH_SECRET = undefined;
expect(() => createEmailToken(mockUser.email)).toThrow("NEXTAUTH_SECRET is not set");
} finally {
(env as any).NEXTAUTH_SECRET = originalSecret;
}
test("should throw error if NEXTAUTH_SECRET or ENCRYPTION_KEY is not set", async () => {
await testMissingSecretsError(createEmailToken, [mockUser.email]);
});
});
describe("getEmailFromEmailToken", () => {
test("should extract email from valid token", () => {
const token = createEmailToken(mockUser.email);
const extractedEmail = getEmailFromEmailToken(token);
expect(extractedEmail).toBe(mockUser.email);
describe("createEmailChangeToken", () => {
test("should create a valid email change token with 1 day expiration", () => {
const token = createEmailChangeToken(mockUser.id, mockUser.email);
expect(token).toBeDefined();
expect(mockSymmetricEncrypt).toHaveBeenCalledWith(mockUser.id, TEST_ENCRYPTION_KEY);
expect(mockSymmetricEncrypt).toHaveBeenCalledWith(mockUser.email, TEST_ENCRYPTION_KEY);
const decoded = jwt.decode(token) as any;
expect(decoded.exp).toBeDefined();
expect(decoded.iat).toBeDefined();
// Should expire in approximately 1 day (86400 seconds)
expect(decoded.exp - decoded.iat).toBe(86400);
});
test("should throw error if NEXTAUTH_SECRET or ENCRYPTION_KEY is not set", async () => {
await testMissingSecretsError(createEmailChangeToken, [mockUser.id, mockUser.email]);
});
});
@@ -91,6 +210,50 @@ describe("JWT Functions", () => {
const token = createInviteToken(inviteId, mockUser.email);
expect(token).toBeDefined();
expect(typeof token).toBe("string");
expect(mockSymmetricEncrypt).toHaveBeenCalledWith(inviteId, TEST_ENCRYPTION_KEY);
expect(mockSymmetricEncrypt).toHaveBeenCalledWith(mockUser.email, TEST_ENCRYPTION_KEY);
});
test("should accept custom options", () => {
const inviteId = "test-invite-id";
const customOptions = { expiresIn: "24h" };
const token = createInviteToken(inviteId, mockUser.email, customOptions);
expect(token).toBeDefined();
const decoded = jwt.decode(token) as any;
expect(decoded.exp).toBeDefined();
expect(decoded.iat).toBeDefined();
// Should expire in approximately 24 hours (86400 seconds)
expect(decoded.exp - decoded.iat).toBe(86400);
});
test("should throw error if NEXTAUTH_SECRET or ENCRYPTION_KEY is not set", async () => {
await testMissingSecretsError(createInviteToken, ["invite-id", mockUser.email]);
});
});
describe("getEmailFromEmailToken", () => {
test("should extract email from valid token", () => {
const token = createEmailToken(mockUser.email);
const extractedEmail = getEmailFromEmailToken(token);
expect(extractedEmail).toBe(mockUser.email);
expect(mockSymmetricDecrypt).toHaveBeenCalledWith(`encrypted_${mockUser.email}`, TEST_ENCRYPTION_KEY);
});
test("should fall back to original email if decryption fails", () => {
mockSymmetricDecrypt.mockImplementationOnce(() => {
throw new Error("Decryption failed");
});
// Create token manually with unencrypted email for legacy compatibility
const legacyToken = jwt.sign({ email: mockUser.email }, TEST_NEXTAUTH_SECRET);
const extractedEmail = getEmailFromEmailToken(legacyToken);
expect(extractedEmail).toBe(mockUser.email);
});
test("should throw error if NEXTAUTH_SECRET or ENCRYPTION_KEY is not set", async () => {
const token = jwt.sign({ email: "test@example.com" }, TEST_NEXTAUTH_SECRET);
await testMissingSecretsError(getEmailFromEmailToken, [token]);
});
});
@@ -106,23 +269,194 @@ describe("JWT Functions", () => {
const result = verifyTokenForLinkSurvey("invalid-token", "test-survey-id");
expect(result).toBeNull();
});
test("should return null if NEXTAUTH_SECRET is not set", async () => {
const constants = await import("@/lib/constants");
const originalSecret = (constants as any).NEXTAUTH_SECRET;
(constants as any).NEXTAUTH_SECRET = undefined;
const result = verifyTokenForLinkSurvey("any-token", "test-survey-id");
expect(result).toBeNull();
// Restore
(constants as any).NEXTAUTH_SECRET = originalSecret;
});
test("should return null if surveyId doesn't match", () => {
const surveyId = "test-survey-id";
const differentSurveyId = "different-survey-id";
const token = createTokenForLinkSurvey(surveyId, mockUser.email);
const result = verifyTokenForLinkSurvey(token, differentSurveyId);
expect(result).toBeNull();
});
test("should return null if email is missing from payload", () => {
const tokenWithoutEmail = jwt.sign({ surveyId: "test-survey-id" }, TEST_NEXTAUTH_SECRET);
const result = verifyTokenForLinkSurvey(tokenWithoutEmail, "test-survey-id");
expect(result).toBeNull();
});
test("should fall back to original email if decryption fails", () => {
mockSymmetricDecrypt.mockImplementationOnce(() => {
throw new Error("Decryption failed");
});
// Create legacy token with unencrypted email
const legacyToken = jwt.sign(
{
email: mockUser.email,
surveyId: "test-survey-id",
},
TEST_NEXTAUTH_SECRET
);
const result = verifyTokenForLinkSurvey(legacyToken, "test-survey-id");
expect(result).toBe(mockUser.email);
});
test("should fall back to original email if ENCRYPTION_KEY is not set", async () => {
const constants = await import("@/lib/constants");
const originalKey = (constants as any).ENCRYPTION_KEY;
(constants as any).ENCRYPTION_KEY = undefined;
// Create a token with unencrypted email (as it would be if ENCRYPTION_KEY was not set during creation)
const token = jwt.sign(
{
email: mockUser.email,
surveyId: "survey-id",
},
TEST_NEXTAUTH_SECRET
);
const result = verifyTokenForLinkSurvey(token, "survey-id");
expect(result).toBe(mockUser.email);
// Restore
(constants as any).ENCRYPTION_KEY = originalKey;
});
test("should verify legacy survey tokens with surveyId-based secret", async () => {
const surveyId = "test-survey-id";
// Create legacy token with old format (NEXTAUTH_SECRET + surveyId)
const legacyToken = jwt.sign({ email: `encrypted_${mockUser.email}` }, TEST_NEXTAUTH_SECRET + surveyId);
const result = verifyTokenForLinkSurvey(legacyToken, surveyId);
expect(result).toBe(mockUser.email);
});
test("should reject survey tokens that fail both new and legacy verification", async () => {
const surveyId = "test-survey-id";
const invalidToken = jwt.sign({ email: "encrypted_test@example.com" }, "wrong-secret");
const result = verifyTokenForLinkSurvey(invalidToken, surveyId);
expect(result).toBeNull();
// Verify error logging
const { logger } = await import("@formbricks/logger");
expect(logger.error).toHaveBeenCalledWith(expect.any(Error), "Survey link token verification failed");
});
test("should reject legacy survey tokens for wrong survey", () => {
const correctSurveyId = "correct-survey-id";
const wrongSurveyId = "wrong-survey-id";
// Create legacy token for one survey
const legacyToken = jwt.sign(
{ email: `encrypted_${mockUser.email}` },
TEST_NEXTAUTH_SECRET + correctSurveyId
);
// Try to verify with different survey ID
const result = verifyTokenForLinkSurvey(legacyToken, wrongSurveyId);
expect(result).toBeNull();
});
});
describe("verifyToken", () => {
test("should verify valid token", async () => {
const token = createToken(mockUser.id, mockUser.email);
const token = createToken(mockUser.id);
const verified = await verifyToken(token);
expect(verified).toEqual({
id: mockUser.id,
id: mockUser.id, // Returns the decrypted user ID
email: mockUser.email,
});
});
test("should throw error if user not found", async () => {
(prisma.user.findUnique as any).mockResolvedValue(null);
const token = createToken(mockUser.id, mockUser.email);
const token = createToken(mockUser.id);
await expect(verifyToken(token)).rejects.toThrow("User not found");
});
test("should throw error if NEXTAUTH_SECRET is not set", async () => {
await testMissingSecretsError(verifyToken, ["any-token"], {
testNextAuthSecret: true,
testEncryptionKey: false,
isAsync: true,
});
});
test("should throw error for invalid token signature", async () => {
const invalidToken = jwt.sign({ id: "test-id" }, DIFFERENT_SECRET);
await expect(verifyToken(invalidToken)).rejects.toThrow("Invalid token");
});
test("should throw error if token payload is missing id", async () => {
const tokenWithoutId = jwt.sign({ email: mockUser.email }, TEST_NEXTAUTH_SECRET);
await expect(verifyToken(tokenWithoutId)).rejects.toThrow("Invalid token");
});
test("should return raw id from payload", async () => {
// Create token with unencrypted id
const token = jwt.sign({ id: mockUser.id }, TEST_NEXTAUTH_SECRET);
const verified = await verifyToken(token);
expect(verified).toEqual({
id: mockUser.id, // Returns the raw ID from payload
email: mockUser.email,
});
});
test("should verify legacy tokens with email-based secret", async () => {
// Create legacy token with old format (NEXTAUTH_SECRET + userEmail)
const legacyToken = jwt.sign({ id: `encrypted_${mockUser.id}` }, TEST_NEXTAUTH_SECRET + mockUser.email);
const verified = await verifyToken(legacyToken);
expect(verified).toEqual({
id: mockUser.id, // Returns the decrypted user ID
email: mockUser.email,
});
});
test("should prioritize new tokens over legacy tokens", async () => {
// Create both new and legacy tokens for the same user
const newToken = createToken(mockUser.id);
const legacyToken = jwt.sign({ id: `encrypted_${mockUser.id}` }, TEST_NEXTAUTH_SECRET + mockUser.email);
// New token should verify without triggering legacy path
const verifiedNew = await verifyToken(newToken);
expect(verifiedNew.id).toBe(mockUser.id); // Returns decrypted user ID
// Legacy token should trigger legacy path
const verifiedLegacy = await verifyToken(legacyToken);
expect(verifiedLegacy.id).toBe(mockUser.id); // Returns decrypted user ID
});
test("should reject tokens that fail both new and legacy verification", async () => {
const invalidToken = jwt.sign({ id: "encrypted_test-id" }, "wrong-secret");
await expect(verifyToken(invalidToken)).rejects.toThrow("Invalid token");
// Verify both methods were attempted
const { logger } = await import("@formbricks/logger");
expect(logger.error).toHaveBeenCalledWith(
expect.any(Error),
"Token verification failed with new method"
);
expect(logger.error).toHaveBeenCalledWith(
expect.any(Error),
"Token verification failed with legacy method"
);
});
});
describe("verifyInviteToken", () => {
@@ -139,6 +473,53 @@ describe("JWT Functions", () => {
test("should throw error for invalid token", () => {
expect(() => verifyInviteToken("invalid-token")).toThrow("Invalid or expired invite token");
});
test("should throw error if NEXTAUTH_SECRET or ENCRYPTION_KEY is not set", async () => {
await testMissingSecretsError(verifyInviteToken, ["any-token"]);
});
test("should throw error if inviteId is missing", () => {
const tokenWithoutInviteId = jwt.sign({ email: mockUser.email }, TEST_NEXTAUTH_SECRET);
expect(() => verifyInviteToken(tokenWithoutInviteId)).toThrow("Invalid or expired invite token");
});
test("should throw error if email is missing", () => {
const tokenWithoutEmail = jwt.sign({ inviteId: "test-invite-id" }, TEST_NEXTAUTH_SECRET);
expect(() => verifyInviteToken(tokenWithoutEmail)).toThrow("Invalid or expired invite token");
});
test("should fall back to original values if decryption fails", () => {
mockSymmetricDecrypt.mockImplementation(() => {
throw new Error("Decryption failed");
});
const inviteId = "test-invite-id";
const legacyToken = jwt.sign(
{
inviteId,
email: mockUser.email,
},
TEST_NEXTAUTH_SECRET
);
const verified = verifyInviteToken(legacyToken);
expect(verified).toEqual({
inviteId,
email: mockUser.email,
});
});
test("should throw error for token with wrong signature", () => {
const invalidToken = jwt.sign(
{
inviteId: "test-invite-id",
email: mockUser.email,
},
DIFFERENT_SECRET
);
expect(() => verifyInviteToken(invalidToken)).toThrow("Invalid or expired invite token");
});
});
describe("verifyEmailChangeToken", () => {
@@ -150,22 +531,478 @@ describe("JWT Functions", () => {
expect(result).toEqual({ id: userId, email });
});
test("should throw error if NEXTAUTH_SECRET or ENCRYPTION_KEY is not set", async () => {
await testMissingSecretsError(verifyEmailChangeToken, ["any-token"], { isAsync: true });
});
test("should throw error if token is invalid or missing fields", async () => {
// Create a token with missing fields
const jwt = await import("jsonwebtoken");
const token = jwt.sign({ foo: "bar" }, env.NEXTAUTH_SECRET as string);
const token = jwt.sign({ foo: "bar" }, TEST_NEXTAUTH_SECRET);
await expect(verifyEmailChangeToken(token)).rejects.toThrow(
"Token is invalid or missing required fields"
);
});
test("should throw error if id is missing", async () => {
const token = jwt.sign({ email: "test@example.com" }, TEST_NEXTAUTH_SECRET);
await expect(verifyEmailChangeToken(token)).rejects.toThrow(
"Token is invalid or missing required fields"
);
});
test("should throw error if email is missing", async () => {
const token = jwt.sign({ id: "test-id" }, TEST_NEXTAUTH_SECRET);
await expect(verifyEmailChangeToken(token)).rejects.toThrow(
"Token is invalid or missing required fields"
);
});
test("should return original id/email if decryption fails", async () => {
// Create a token with non-encrypted id/email
const jwt = await import("jsonwebtoken");
mockSymmetricDecrypt.mockImplementation(() => {
throw new Error("Decryption failed");
});
const payload = { id: "plain-id", email: "plain@example.com" };
const token = jwt.sign(payload, env.NEXTAUTH_SECRET as string);
const token = jwt.sign(payload, TEST_NEXTAUTH_SECRET);
const result = await verifyEmailChangeToken(token);
expect(result).toEqual(payload);
});
test("should throw error for token with wrong signature", async () => {
const invalidToken = jwt.sign(
{
id: "test-id",
email: "test@example.com",
},
DIFFERENT_SECRET
);
await expect(verifyEmailChangeToken(invalidToken)).rejects.toThrow();
});
});
// SECURITY SCENARIO TESTS
describe("Security Scenarios", () => {
describe("Algorithm Confusion Attack Prevention", () => {
test("should reject 'none' algorithm tokens in verifyToken", async () => {
// Create malicious token with "none" algorithm
const maliciousToken =
Buffer.from(
JSON.stringify({
alg: "none",
typ: "JWT",
})
).toString("base64url") +
"." +
Buffer.from(
JSON.stringify({
id: "encrypted_malicious-id",
})
).toString("base64url") +
".";
await expect(verifyToken(maliciousToken)).rejects.toThrow("Invalid token");
});
test("should reject 'none' algorithm tokens in verifyTokenForLinkSurvey", () => {
const maliciousToken =
Buffer.from(
JSON.stringify({
alg: "none",
typ: "JWT",
})
).toString("base64url") +
"." +
Buffer.from(
JSON.stringify({
email: "encrypted_attacker@evil.com",
surveyId: "test-survey-id",
})
).toString("base64url") +
".";
const result = verifyTokenForLinkSurvey(maliciousToken, "test-survey-id");
expect(result).toBeNull();
});
test("should reject 'none' algorithm tokens in verifyInviteToken", () => {
const maliciousToken =
Buffer.from(
JSON.stringify({
alg: "none",
typ: "JWT",
})
).toString("base64url") +
"." +
Buffer.from(
JSON.stringify({
inviteId: "encrypted_malicious-invite",
email: "encrypted_attacker@evil.com",
})
).toString("base64url") +
".";
expect(() => verifyInviteToken(maliciousToken)).toThrow("Invalid or expired invite token");
});
test("should reject 'none' algorithm tokens in verifyEmailChangeToken", async () => {
const maliciousToken =
Buffer.from(
JSON.stringify({
alg: "none",
typ: "JWT",
})
).toString("base64url") +
"." +
Buffer.from(
JSON.stringify({
id: "encrypted_malicious-id",
email: "encrypted_attacker@evil.com",
})
).toString("base64url") +
".";
await expect(verifyEmailChangeToken(maliciousToken)).rejects.toThrow();
});
test("should reject RS256 algorithm tokens (HS256/RS256 confusion)", async () => {
// Create malicious token with RS256 algorithm header but HS256 signature
const maliciousHeader = Buffer.from(
JSON.stringify({
alg: "RS256",
typ: "JWT",
})
).toString("base64url");
const maliciousPayload = Buffer.from(
JSON.stringify({
id: "encrypted_malicious-id",
})
).toString("base64url");
// Create signature using HMAC (as if it were HS256)
const crypto = require("crypto");
const signature = crypto
.createHmac("sha256", TEST_NEXTAUTH_SECRET)
.update(`${maliciousHeader}.${maliciousPayload}`)
.digest("base64url");
const maliciousToken = `${maliciousHeader}.${maliciousPayload}.${signature}`;
await expect(verifyToken(maliciousToken)).rejects.toThrow("Invalid token");
});
test("should only accept HS256 algorithm", async () => {
// Test that other valid algorithms are rejected
const otherAlgorithms = ["HS384", "HS512", "RS256", "RS384", "RS512", "ES256", "ES384", "ES512"];
for (const alg of otherAlgorithms) {
const maliciousHeader = Buffer.from(
JSON.stringify({
alg,
typ: "JWT",
})
).toString("base64url");
const maliciousPayload = Buffer.from(
JSON.stringify({
id: "encrypted_test-id",
})
).toString("base64url");
const maliciousToken = `${maliciousHeader}.${maliciousPayload}.fake-signature`;
await expect(verifyToken(maliciousToken)).rejects.toThrow("Invalid token");
}
});
});
describe("Token Tampering", () => {
test("should reject tokens with modified payload", async () => {
const token = createToken(mockUser.id);
const [header, payload, signature] = token.split(".");
// Modify the payload
const decodedPayload = JSON.parse(Buffer.from(payload, "base64url").toString());
decodedPayload.id = "malicious-id";
const tamperedPayload = Buffer.from(JSON.stringify(decodedPayload)).toString("base64url");
const tamperedToken = `${header}.${tamperedPayload}.${signature}`;
await expect(verifyToken(tamperedToken)).rejects.toThrow("Invalid token");
});
test("should reject tokens with modified signature", async () => {
const token = createToken(mockUser.id);
const [header, payload] = token.split(".");
const tamperedToken = `${header}.${payload}.tamperedsignature`;
await expect(verifyToken(tamperedToken)).rejects.toThrow("Invalid token");
});
test("should reject malformed tokens", async () => {
const malformedTokens = [
"not.a.jwt",
"only.two.parts",
"too.many.parts.here.invalid",
"",
"invalid-base64",
];
for (const malformedToken of malformedTokens) {
await expect(verifyToken(malformedToken)).rejects.toThrow();
}
});
});
describe("Cross-Survey Token Reuse", () => {
test("should reject survey tokens used for different surveys", () => {
const surveyId1 = "survey-1";
const surveyId2 = "survey-2";
const token = createTokenForLinkSurvey(surveyId1, mockUser.email);
const result = verifyTokenForLinkSurvey(token, surveyId2);
expect(result).toBeNull();
});
});
describe("Expired Tokens", () => {
test("should reject expired tokens", async () => {
const expiredToken = jwt.sign(
{
id: "encrypted_test-id",
exp: Math.floor(Date.now() / 1000) - 3600, // Expired 1 hour ago
},
TEST_NEXTAUTH_SECRET
);
await expect(verifyToken(expiredToken)).rejects.toThrow("Invalid token");
});
test("should reject expired email change tokens", async () => {
const expiredToken = jwt.sign(
{
id: "encrypted_test-id",
email: "encrypted_test@example.com",
exp: Math.floor(Date.now() / 1000) - 3600, // Expired 1 hour ago
},
TEST_NEXTAUTH_SECRET
);
await expect(verifyEmailChangeToken(expiredToken)).rejects.toThrow();
});
});
describe("Encryption Key Attacks", () => {
test("should fail gracefully with wrong encryption key", async () => {
mockSymmetricDecrypt.mockImplementation(() => {
throw new Error("Authentication tag verification failed");
});
// Mock findUnique to only return user for correct decrypted ID, not ciphertext
(prisma.user.findUnique as any).mockImplementation(({ where }: { where: { id: string } }) => {
if (where.id === mockUser.id) {
return Promise.resolve(mockUser);
}
return Promise.resolve(null); // Return null for ciphertext IDs
});
const token = createToken(mockUser.id);
// Should fail because ciphertext passed as userId won't match any user in DB
await expect(verifyToken(token)).rejects.toThrow(/User not found/i);
});
test("should handle encryption key not set gracefully", async () => {
const constants = await import("@/lib/constants");
const originalKey = (constants as any).ENCRYPTION_KEY;
(constants as any).ENCRYPTION_KEY = undefined;
const token = jwt.sign(
{
email: "test@example.com",
surveyId: "test-survey-id",
},
TEST_NEXTAUTH_SECRET
);
const result = verifyTokenForLinkSurvey(token, "test-survey-id");
expect(result).toBe("test@example.com");
// Restore
(constants as any).ENCRYPTION_KEY = originalKey;
});
});
describe("SQL Injection Attempts", () => {
test("should safely handle malicious user IDs", async () => {
const maliciousIds = [
"'; DROP TABLE users; --",
"1' OR '1'='1",
"admin'/*",
"<script>alert('xss')</script>",
"../../etc/passwd",
];
for (const maliciousId of maliciousIds) {
mockSymmetricDecrypt.mockReturnValueOnce(maliciousId);
const token = jwt.sign({ id: "encrypted_malicious" }, TEST_NEXTAUTH_SECRET);
// The function should look up the user safely
await verifyToken(token);
expect(prisma.user.findUnique).toHaveBeenCalledWith({
where: { id: maliciousId },
});
}
});
});
describe("Token Reuse and Replay Attacks", () => {
test("should allow legitimate token reuse within validity period", async () => {
const token = createToken(mockUser.id);
// First use
const result1 = await verifyToken(token);
expect(result1.id).toBe(mockUser.id); // Returns decrypted user ID
// Second use (should still work)
const result2 = await verifyToken(token);
expect(result2.id).toBe(mockUser.id); // Returns decrypted user ID
});
});
describe("Legacy Token Compatibility", () => {
test("should handle legacy unencrypted tokens gracefully", async () => {
// Legacy token with plain text data
const legacyToken = jwt.sign({ id: mockUser.id }, TEST_NEXTAUTH_SECRET);
const result = await verifyToken(legacyToken);
expect(result.id).toBe(mockUser.id); // Returns raw ID from payload
expect(result.email).toBe(mockUser.email);
});
test("should handle mixed encrypted/unencrypted fields", async () => {
mockSymmetricDecrypt
.mockImplementationOnce(() => mockUser.id) // id decrypts successfully
.mockImplementationOnce(() => {
throw new Error("Email not encrypted");
}); // email fails
const token = jwt.sign(
{
id: "encrypted_test-id",
email: "plain-email@example.com",
},
TEST_NEXTAUTH_SECRET
);
const result = await verifyEmailChangeToken(token);
expect(result.id).toBe(mockUser.id);
expect(result.email).toBe("plain-email@example.com");
});
test("should verify old format user tokens with email-based secrets", async () => {
// Simulate old token format with per-user secret
const oldFormatToken = jwt.sign(
{ id: `encrypted_${mockUser.id}` },
TEST_NEXTAUTH_SECRET + mockUser.email
);
const result = await verifyToken(oldFormatToken);
expect(result.id).toBe(mockUser.id); // Returns decrypted user ID
expect(result.email).toBe(mockUser.email);
});
test("should verify old format survey tokens with survey-based secrets", () => {
const surveyId = "legacy-survey-id";
// Simulate old survey token format
const oldFormatSurveyToken = jwt.sign(
{ email: `encrypted_${mockUser.email}` },
TEST_NEXTAUTH_SECRET + surveyId
);
const result = verifyTokenForLinkSurvey(oldFormatSurveyToken, surveyId);
expect(result).toBe(mockUser.email);
});
test("should gracefully handle database errors during legacy verification", async () => {
// Create token that will fail new method
const legacyToken = jwt.sign(
{ id: `encrypted_${mockUser.id}` },
TEST_NEXTAUTH_SECRET + mockUser.email
);
// Make database lookup fail
(prisma.user.findUnique as any).mockRejectedValueOnce(new Error("DB connection lost"));
await expect(verifyToken(legacyToken)).rejects.toThrow("DB connection lost");
});
});
describe("Edge Cases and Error Handling", () => {
test("should handle database connection errors gracefully", async () => {
(prisma.user.findUnique as any).mockRejectedValue(new Error("Database connection failed"));
const token = createToken(mockUser.id);
await expect(verifyToken(token)).rejects.toThrow("Database connection failed");
});
test("should handle crypto module errors", () => {
mockSymmetricEncrypt.mockImplementation(() => {
throw new Error("Crypto module error");
});
expect(() => createToken(mockUser.id)).toThrow("Crypto module error");
});
test("should validate email format in tokens", () => {
const invalidEmails = ["", "not-an-email", "missing@", "@missing-local.com", "spaces in@email.com"];
invalidEmails.forEach((invalidEmail) => {
expect(() => createEmailToken(invalidEmail)).not.toThrow();
// Note: JWT functions don't validate email format, they just encrypt/decrypt
// Email validation should happen at a higher level
});
});
test("should handle extremely long inputs", () => {
const longString = "a".repeat(10000);
expect(() => createToken(longString)).not.toThrow();
expect(() => createEmailToken(longString)).not.toThrow();
});
test("should handle special characters in user data", () => {
const specialChars = "!@#$%^&*()_+-=[]{}|;:'\",.<>?/~`";
expect(() => createToken(specialChars)).not.toThrow();
expect(() => createEmailToken(specialChars)).not.toThrow();
});
});
describe("Performance and Resource Exhaustion", () => {
test("should handle rapid token creation without memory leaks", () => {
const tokens: string[] = [];
for (let i = 0; i < 1000; i++) {
tokens.push(createToken(`user-${i}`));
}
expect(tokens.length).toBe(1000);
expect(tokens.every((token) => typeof token === "string")).toBe(true);
});
test("should handle rapid token verification", async () => {
const token = createToken(mockUser.id);
const verifications: Promise<any>[] = [];
for (let i = 0; i < 100; i++) {
verifications.push(verifyToken(token));
}
const results = await Promise.all(verifications);
expect(results.length).toBe(100);
expect(results.every((result: any) => result.id === mockUser.id)).toBe(true); // Returns decrypted user ID
});
});
});
});

View File

@@ -1,43 +1,64 @@
import { symmetricDecrypt, symmetricEncrypt } from "@/lib/crypto";
import { env } from "@/lib/env";
import jwt, { JwtPayload } from "jsonwebtoken";
import { prisma } from "@formbricks/database";
import { logger } from "@formbricks/logger";
import { ENCRYPTION_KEY, NEXTAUTH_SECRET } from "@/lib/constants";
import { symmetricDecrypt, symmetricEncrypt } from "@/lib/crypto";
export const createToken = (userId: string, userEmail: string, options = {}): string => {
const encryptedUserId = symmetricEncrypt(userId, env.ENCRYPTION_KEY);
return jwt.sign({ id: encryptedUserId }, env.NEXTAUTH_SECRET + userEmail, options);
};
export const createTokenForLinkSurvey = (surveyId: string, userEmail: string): string => {
const encryptedEmail = symmetricEncrypt(userEmail, env.ENCRYPTION_KEY);
return jwt.sign({ email: encryptedEmail }, env.NEXTAUTH_SECRET + surveyId);
// Helper function to decrypt with fallback to plain text
const decryptWithFallback = (encryptedText: string, key: string): string => {
try {
return symmetricDecrypt(encryptedText, key);
} catch {
return encryptedText; // Return as-is if decryption fails (legacy format)
}
};
export const verifyEmailChangeToken = async (token: string): Promise<{ id: string; email: string }> => {
if (!env.NEXTAUTH_SECRET) {
export const createToken = (userId: string, options = {}): string => {
if (!NEXTAUTH_SECRET) {
throw new Error("NEXTAUTH_SECRET is not set");
}
const payload = jwt.verify(token, env.NEXTAUTH_SECRET) as { id: string; email: string };
if (!ENCRYPTION_KEY) {
throw new Error("ENCRYPTION_KEY is not set");
}
const encryptedUserId = symmetricEncrypt(userId, ENCRYPTION_KEY);
return jwt.sign({ id: encryptedUserId }, NEXTAUTH_SECRET, options);
};
export const createTokenForLinkSurvey = (surveyId: string, userEmail: string): string => {
if (!NEXTAUTH_SECRET) {
throw new Error("NEXTAUTH_SECRET is not set");
}
if (!ENCRYPTION_KEY) {
throw new Error("ENCRYPTION_KEY is not set");
}
const encryptedEmail = symmetricEncrypt(userEmail, ENCRYPTION_KEY);
return jwt.sign({ email: encryptedEmail, surveyId }, NEXTAUTH_SECRET);
};
export const verifyEmailChangeToken = async (token: string): Promise<{ id: string; email: string }> => {
if (!NEXTAUTH_SECRET) {
throw new Error("NEXTAUTH_SECRET is not set");
}
if (!ENCRYPTION_KEY) {
throw new Error("ENCRYPTION_KEY is not set");
}
const payload = jwt.verify(token, NEXTAUTH_SECRET, { algorithms: ["HS256"] }) as {
id: string;
email: string;
};
if (!payload?.id || !payload?.email) {
throw new Error("Token is invalid or missing required fields");
}
let decryptedId: string;
let decryptedEmail: string;
try {
decryptedId = symmetricDecrypt(payload.id, env.ENCRYPTION_KEY);
} catch {
decryptedId = payload.id;
}
try {
decryptedEmail = symmetricDecrypt(payload.email, env.ENCRYPTION_KEY);
} catch {
decryptedEmail = payload.email;
}
// Decrypt both fields with fallback
const decryptedId = decryptWithFallback(payload.id, ENCRYPTION_KEY);
const decryptedEmail = decryptWithFallback(payload.email, ENCRYPTION_KEY);
return {
id: decryptedId,
@@ -46,127 +67,230 @@ export const verifyEmailChangeToken = async (token: string): Promise<{ id: strin
};
export const createEmailChangeToken = (userId: string, email: string): string => {
const encryptedUserId = symmetricEncrypt(userId, env.ENCRYPTION_KEY);
const encryptedEmail = symmetricEncrypt(email, env.ENCRYPTION_KEY);
if (!NEXTAUTH_SECRET) {
throw new Error("NEXTAUTH_SECRET is not set");
}
if (!ENCRYPTION_KEY) {
throw new Error("ENCRYPTION_KEY is not set");
}
const encryptedUserId = symmetricEncrypt(userId, ENCRYPTION_KEY);
const encryptedEmail = symmetricEncrypt(email, ENCRYPTION_KEY);
const payload = {
id: encryptedUserId,
email: encryptedEmail,
};
return jwt.sign(payload, env.NEXTAUTH_SECRET as string, {
return jwt.sign(payload, NEXTAUTH_SECRET, {
expiresIn: "1d",
});
};
export const createEmailToken = (email: string): string => {
if (!env.NEXTAUTH_SECRET) {
if (!NEXTAUTH_SECRET) {
throw new Error("NEXTAUTH_SECRET is not set");
}
const encryptedEmail = symmetricEncrypt(email, env.ENCRYPTION_KEY);
return jwt.sign({ email: encryptedEmail }, env.NEXTAUTH_SECRET);
if (!ENCRYPTION_KEY) {
throw new Error("ENCRYPTION_KEY is not set");
}
const encryptedEmail = symmetricEncrypt(email, ENCRYPTION_KEY);
return jwt.sign({ email: encryptedEmail }, NEXTAUTH_SECRET);
};
export const getEmailFromEmailToken = (token: string): string => {
if (!env.NEXTAUTH_SECRET) {
if (!NEXTAUTH_SECRET) {
throw new Error("NEXTAUTH_SECRET is not set");
}
const payload = jwt.verify(token, env.NEXTAUTH_SECRET) as JwtPayload;
try {
// Try to decrypt first (for newer tokens)
const decryptedEmail = symmetricDecrypt(payload.email, env.ENCRYPTION_KEY);
return decryptedEmail;
} catch {
// If decryption fails, return the original email (for older tokens)
return payload.email;
if (!ENCRYPTION_KEY) {
throw new Error("ENCRYPTION_KEY is not set");
}
const payload = jwt.verify(token, NEXTAUTH_SECRET, { algorithms: ["HS256"] }) as JwtPayload & {
email: string;
};
return decryptWithFallback(payload.email, ENCRYPTION_KEY);
};
export const createInviteToken = (inviteId: string, email: string, options = {}): string => {
if (!env.NEXTAUTH_SECRET) {
if (!NEXTAUTH_SECRET) {
throw new Error("NEXTAUTH_SECRET is not set");
}
const encryptedInviteId = symmetricEncrypt(inviteId, env.ENCRYPTION_KEY);
const encryptedEmail = symmetricEncrypt(email, env.ENCRYPTION_KEY);
return jwt.sign({ inviteId: encryptedInviteId, email: encryptedEmail }, env.NEXTAUTH_SECRET, options);
if (!ENCRYPTION_KEY) {
throw new Error("ENCRYPTION_KEY is not set");
}
const encryptedInviteId = symmetricEncrypt(inviteId, ENCRYPTION_KEY);
const encryptedEmail = symmetricEncrypt(email, ENCRYPTION_KEY);
return jwt.sign({ inviteId: encryptedInviteId, email: encryptedEmail }, NEXTAUTH_SECRET, options);
};
export const verifyTokenForLinkSurvey = (token: string, surveyId: string): string | null => {
if (!NEXTAUTH_SECRET) {
return null;
}
try {
const { email } = jwt.verify(token, env.NEXTAUTH_SECRET + surveyId) as JwtPayload;
let payload: JwtPayload & { email: string; surveyId?: string };
// Try primary method first (consistent secret)
try {
// Try to decrypt first (for newer tokens)
if (!env.ENCRYPTION_KEY) {
throw new Error("ENCRYPTION_KEY is not set");
payload = jwt.verify(token, NEXTAUTH_SECRET, { algorithms: ["HS256"] }) as JwtPayload & {
email: string;
surveyId: string;
};
} catch (primaryError) {
logger.error(primaryError, "Token verification failed with primary method");
// Fallback to legacy method (surveyId-based secret)
try {
payload = jwt.verify(token, NEXTAUTH_SECRET + surveyId, { algorithms: ["HS256"] }) as JwtPayload & {
email: string;
};
} catch (legacyError) {
logger.error(legacyError, "Token verification failed with legacy method");
throw new Error("Invalid token");
}
const decryptedEmail = symmetricDecrypt(email, env.ENCRYPTION_KEY);
return decryptedEmail;
} catch {
// If decryption fails, return the original email (for older tokens)
return email;
}
} catch (err) {
// Verify the surveyId matches if present in payload (new format)
if (payload.surveyId && payload.surveyId !== surveyId) {
return null;
}
const { email } = payload;
if (!email) {
return null;
}
// Decrypt email with fallback to plain text
if (!ENCRYPTION_KEY) {
return email; // Return as-is if encryption key not set
}
return decryptWithFallback(email, ENCRYPTION_KEY);
} catch (error) {
logger.error(error, "Survey link token verification failed");
return null;
}
};
export const verifyToken = async (token: string): Promise<JwtPayload> => {
// First decode to get the ID
const decoded = jwt.decode(token);
const payload: JwtPayload = decoded as JwtPayload;
// Helper function to get user email for legacy verification
const getUserEmailForLegacyVerification = async (
token: string,
userId?: string
): Promise<{ userId: string; userEmail: string }> => {
if (!userId) {
const decoded = jwt.decode(token);
if (!payload) {
throw new Error("Token is invalid");
// Validate decoded token structure before using it
if (
!decoded ||
typeof decoded !== "object" ||
!decoded.id ||
typeof decoded.id !== "string" ||
decoded.id.trim() === ""
) {
logger.error("Invalid token: missing or invalid user ID");
throw new Error("Invalid token");
}
userId = decoded.id;
}
const { id } = payload;
if (!id) {
throw new Error("Token missing required field: id");
const decryptedId = decryptWithFallback(userId, ENCRYPTION_KEY);
// Validate decrypted ID before database query
if (!decryptedId || typeof decryptedId !== "string" || decryptedId.trim() === "") {
logger.error("Invalid token: missing or invalid user ID");
throw new Error("Invalid token");
}
// Try to decrypt the ID (for newer tokens), if it fails use the ID as-is (for older tokens)
let decryptedId: string;
try {
decryptedId = symmetricDecrypt(id, env.ENCRYPTION_KEY);
} catch {
decryptedId = id;
}
// If no email provided, look up the user
const foundUser = await prisma.user.findUnique({
where: { id: decryptedId },
});
if (!foundUser) {
throw new Error("User not found");
const errorMessage = "User not found";
logger.error(errorMessage);
throw new Error(errorMessage);
}
const userEmail = foundUser.email;
return { userId: decryptedId, userEmail: foundUser.email };
};
return { id: decryptedId, email: userEmail };
export const verifyToken = async (token: string): Promise<JwtPayload> => {
if (!NEXTAUTH_SECRET) {
throw new Error("NEXTAUTH_SECRET is not set");
}
let payload: JwtPayload & { id: string };
let userData: { userId: string; userEmail: string } | null = null;
// Try new method first, with smart fallback to legacy
try {
payload = jwt.verify(token, NEXTAUTH_SECRET, { algorithms: ["HS256"] }) as JwtPayload & {
id: string;
};
} catch (newMethodError) {
logger.error(newMethodError, "Token verification failed with new method");
// Get user email for legacy verification
userData = await getUserEmailForLegacyVerification(token);
// Try legacy verification with email-based secret
try {
payload = jwt.verify(token, NEXTAUTH_SECRET + userData.userEmail, {
algorithms: ["HS256"],
}) as JwtPayload & {
id: string;
};
} catch (legacyMethodError) {
logger.error(legacyMethodError, "Token verification failed with legacy method");
throw new Error("Invalid token");
}
}
if (!payload?.id) {
throw new Error("Invalid token");
}
// Get user email if we don't have it yet
userData ??= await getUserEmailForLegacyVerification(token, payload.id);
return { id: userData.userId, email: userData.userEmail };
};
export const verifyInviteToken = (token: string): { inviteId: string; email: string } => {
if (!NEXTAUTH_SECRET) {
throw new Error("NEXTAUTH_SECRET is not set");
}
if (!ENCRYPTION_KEY) {
throw new Error("ENCRYPTION_KEY is not set");
}
try {
const decoded = jwt.decode(token);
const payload: JwtPayload = decoded as JwtPayload;
const payload = jwt.verify(token, NEXTAUTH_SECRET, { algorithms: ["HS256"] }) as JwtPayload & {
inviteId: string;
email: string;
};
const { inviteId, email } = payload;
const { inviteId: encryptedInviteId, email: encryptedEmail } = payload;
let decryptedInviteId: string;
let decryptedEmail: string;
try {
// Try to decrypt first (for newer tokens)
decryptedInviteId = symmetricDecrypt(inviteId, env.ENCRYPTION_KEY);
decryptedEmail = symmetricDecrypt(email, env.ENCRYPTION_KEY);
} catch {
// If decryption fails, use original values (for older tokens)
decryptedInviteId = inviteId;
decryptedEmail = email;
if (!encryptedInviteId || !encryptedEmail) {
throw new Error("Invalid token");
}
// Decrypt both fields with fallback to original values
const decryptedInviteId = decryptWithFallback(encryptedInviteId, ENCRYPTION_KEY);
const decryptedEmail = decryptWithFallback(encryptedEmail, ENCRYPTION_KEY);
return {
inviteId: decryptedInviteId,
email: decryptedEmail,

View File

@@ -262,9 +262,7 @@
"membership_not_found": "Mitgliedschaft nicht gefunden",
"metadata": "Metadaten",
"minimum": "Minimum",
"mobile_overlay_app_works_best_on_desktop": "Formbricks funktioniert am besten auf einem größeren Bildschirm. Um Umfragen zu verwalten oder zu erstellen, wechsle zu einem anderen Gerät.",
"mobile_overlay_surveys_look_good": "Keine Sorge deine Umfragen sehen auf jedem Gerät und jeder Bildschirmgröße großartig aus!",
"mobile_overlay_title": "Oops, Bildschirm zu klein erkannt!",
"mobile_overlay_text": "Formbricks ist für Geräte mit kleineren Auflösungen nicht verfügbar.",
"move_down": "Nach unten bewegen",
"move_up": "Nach oben bewegen",
"multiple_languages": "Mehrsprachigkeit",
@@ -752,6 +750,7 @@
},
"project": {
"api_keys": {
"access_control": "Zugriffskontrolle",
"add_api_key": "API-Schlüssel hinzufügen",
"api_key": "API-Schlüssel",
"api_key_copied_to_clipboard": "API-Schlüssel in die Zwischenablage kopiert",
@@ -2888,4 +2887,4 @@
"usability_rating_description": "Bewerte die wahrgenommene Benutzerfreundlichkeit, indem du die Nutzer bittest, ihre Erfahrung mit deinem Produkt mittels eines standardisierten 10-Fragen-Fragebogens zu bewerten.",
"usability_score_name": "System Usability Score Survey (SUS)"
}
}
}

View File

@@ -262,9 +262,7 @@
"membership_not_found": "Membership not found",
"metadata": "Metadata",
"minimum": "Minimum",
"mobile_overlay_app_works_best_on_desktop": "Formbricks works best on a bigger screen. To manage or build surveys, switch to another device.",
"mobile_overlay_surveys_look_good": "Don't worry your surveys look great on every device and screen size!",
"mobile_overlay_title": "Oops, tiny screen detected!",
"mobile_overlay_text": "Formbricks is not available for devices with smaller resolutions.",
"move_down": "Move down",
"move_up": "Move up",
"multiple_languages": "Multiple languages",
@@ -752,6 +750,7 @@
},
"project": {
"api_keys": {
"access_control": "Access Control",
"add_api_key": "Add API Key",
"api_key": "API Key",
"api_key_copied_to_clipboard": "API key copied to clipboard",
@@ -2888,4 +2887,4 @@
"usability_rating_description": "Measure perceived usability by asking users to rate their experience with your product using a standardized 10-question survey.",
"usability_score_name": "System Usability Score (SUS)"
}
}
}

View File

@@ -262,9 +262,7 @@
"membership_not_found": "Abonnement non trouvé",
"metadata": "Métadonnées",
"minimum": "Min",
"mobile_overlay_app_works_best_on_desktop": "Formbricks fonctionne mieux sur un écran plus grand. Pour gérer ou créer des sondages, passez à un autre appareil.",
"mobile_overlay_surveys_look_good": "Ne t'inquiète pas tes enquêtes sont superbes sur tous les appareils et tailles d'écran!",
"mobile_overlay_title": "Oups, écran minuscule détecté!",
"mobile_overlay_text": "Formbricks n'est pas disponible pour les appareils avec des résolutions plus petites.",
"move_down": "Déplacer vers le bas",
"move_up": "Déplacer vers le haut",
"multiple_languages": "Plusieurs langues",
@@ -752,6 +750,7 @@
},
"project": {
"api_keys": {
"access_control": "Contrôle d'accès",
"add_api_key": "Ajouter une clé API",
"api_key": "Clé API",
"api_key_copied_to_clipboard": "Clé API copiée dans le presse-papiers",
@@ -2888,4 +2887,4 @@
"usability_rating_description": "Mesurez la convivialité perçue en demandant aux utilisateurs d'évaluer leur expérience avec votre produit via un sondage standardisé de 10 questions.",
"usability_score_name": "Score d'Utilisabilité du Système (SUS)"
}
}
}

View File

@@ -262,9 +262,7 @@
"membership_not_found": "メンバーシップが見つかりません",
"metadata": "メタデータ",
"minimum": "最小",
"mobile_overlay_app_works_best_on_desktop": "Formbricks は より 大きな 画面 で最適に 作動します。 フォーム を 管理または 構築する には、 別の デバイス に 切り替える 必要が あります。",
"mobile_overlay_surveys_look_good": "ご安心ください - お使い の デバイス や 画面 サイズ に 関係なく、 フォーム は 素晴らしく 見えます!",
"mobile_overlay_title": "おっと、 小さな 画面 が 検出されました!",
"mobile_overlay_text": "Formbricksは、解像度の小さいデバイスでは利用できません。",
"move_down": "下に移動",
"move_up": "上に移動",
"multiple_languages": "多言語",
@@ -752,6 +750,7 @@
},
"project": {
"api_keys": {
"access_control": "アクセス制御",
"add_api_key": "APIキーを追加",
"api_key": "APIキー",
"api_key_copied_to_clipboard": "APIキーをクリップボードにコピーしました",
@@ -2888,4 +2887,4 @@
"usability_rating_description": "標準化された10の質問アンケートを使用して、製品に対するユーザーの体験を評価し、知覚された使いやすさを測定する。",
"usability_score_name": "システムユーザビリティスコアSUS"
}
}
}

View File

@@ -262,9 +262,7 @@
"membership_not_found": "Assinatura não encontrada",
"metadata": "metadados",
"minimum": "Mínimo",
"mobile_overlay_app_works_best_on_desktop": "Formbricks funciona melhor em uma tela maior. Para gerenciar ou criar pesquisas, mude para outro dispositivo.",
"mobile_overlay_surveys_look_good": "Não se preocupe suas pesquisas ficam ótimas em qualquer dispositivo e tamanho de tela!",
"mobile_overlay_title": "Eita, tela pequena detectada!",
"mobile_overlay_text": "O Formbricks não está disponível para dispositivos com resoluções menores.",
"move_down": "Descer",
"move_up": "Subir",
"multiple_languages": "Vários idiomas",
@@ -752,6 +750,7 @@
},
"project": {
"api_keys": {
"access_control": "Controle de Acesso",
"add_api_key": "Adicionar Chave API",
"api_key": "Chave de API",
"api_key_copied_to_clipboard": "Chave da API copiada para a área de transferência",
@@ -2888,4 +2887,4 @@
"usability_rating_description": "Meça a usabilidade percebida perguntando aos usuários para avaliar sua experiência com seu produto usando uma pesquisa padronizada de 10 perguntas.",
"usability_score_name": "Pontuação de Usabilidade do Sistema (SUS)"
}
}
}

View File

@@ -262,9 +262,7 @@
"membership_not_found": "Associação não encontrada",
"metadata": "Metadados",
"minimum": "Mínimo",
"mobile_overlay_app_works_best_on_desktop": "Formbricks funciona melhor num ecrã maior. Para gerir ou criar inquéritos, mude de dispositivo.",
"mobile_overlay_surveys_look_good": "Não se preocupe os seus inquéritos têm uma ótima aparência em todos os dispositivos e tamanhos de ecrã!",
"mobile_overlay_title": "Oops, ecrã pequeno detectado!",
"mobile_overlay_text": "O Formbricks não está disponível para dispositivos com resoluções menores.",
"move_down": "Mover para baixo",
"move_up": "Mover para cima",
"multiple_languages": "Várias línguas",
@@ -752,6 +750,7 @@
},
"project": {
"api_keys": {
"access_control": "Controlo de Acesso",
"add_api_key": "Adicionar Chave API",
"api_key": "Chave API",
"api_key_copied_to_clipboard": "Chave API copiada para a área de transferência",
@@ -2888,4 +2887,4 @@
"usability_rating_description": "Meça a usabilidade percebida ao solicitar que os utilizadores avaliem a sua experiência com o seu produto usando um questionário padronizado de 10 perguntas.",
"usability_score_name": "Pontuação de Usabilidade do Sistema (SUS)"
}
}
}

View File

@@ -262,9 +262,7 @@
"membership_not_found": "Apartenența nu a fost găsită",
"metadata": "Metadate",
"minimum": "Minim",
"mobile_overlay_app_works_best_on_desktop": "Formbricks funcționează cel mai bine pe un ecran mai mare. Pentru a gestiona sau crea chestionare, treceți la un alt dispozitiv.",
"mobile_overlay_surveys_look_good": "Nu vă faceți griji chestionarele dumneavoastră arată grozav pe orice dispozitiv și dimensiune a ecranului!",
"mobile_overlay_title": "Ups, ecran mic detectat!",
"mobile_overlay_text": "Formbricks nu este disponibil pentru dispozitive cu rezoluții mai mici.",
"move_down": "Mută în jos",
"move_up": "Mută sus",
"multiple_languages": "Mai multe limbi",
@@ -752,6 +750,7 @@
},
"project": {
"api_keys": {
"access_control": "Control acces",
"add_api_key": "Adaugă Cheie API",
"api_key": "Cheie API",
"api_key_copied_to_clipboard": "Cheia API a fost copiată în clipboard",
@@ -2888,4 +2887,4 @@
"usability_rating_description": "Măsurați uzabilitatea percepută cerând utilizatorilor să își evalueze experiența cu produsul dumneavoastră folosind un chestionar standardizat din 10 întrebări.",
"usability_score_name": "Scor de Uzabilitate al Sistemului (SUS)"
}
}
}

View File

@@ -262,9 +262,7 @@
"membership_not_found": "未找到会员资格",
"metadata": "元数据",
"minimum": "最低",
"mobile_overlay_app_works_best_on_desktop": "Formbricks 在 更大 的 屏幕 上 效果 最佳。 若 需要 管理 或 构建 调查, 请 切换 到 其他 设备",
"mobile_overlay_surveys_look_good": "别 担心 您 的 调查 在 每 一 种 设备 和 屏幕 尺寸 上 看起来 都 很 棒!",
"mobile_overlay_title": "噢, 检测 到 小 屏幕!",
"mobile_overlay_text": "Formbricks 不 适用 于 分辨率 较小 的 设备",
"move_down": "下移",
"move_up": "上移",
"multiple_languages": "多种 语言",
@@ -752,6 +750,7 @@
},
"project": {
"api_keys": {
"access_control": "访问控制",
"add_api_key": "添加 API 密钥",
"api_key": "API Key",
"api_key_copied_to_clipboard": "API 密钥 已复制到 剪贴板",
@@ -2888,4 +2887,4 @@
"usability_rating_description": "通过要求用户使用标准化的 10 问 调查 来 评价 他们对您产品的体验,以 测量 感知 的 可用性。",
"usability_score_name": "系统 可用性 得分 ( SUS )"
}
}
}

View File

@@ -262,9 +262,7 @@
"membership_not_found": "找不到成員資格",
"metadata": "元數據",
"minimum": "最小值",
"mobile_overlay_app_works_best_on_desktop": "Formbricks 適合在大螢幕上使用。若要管理或建立問卷,請切換到其他裝置。",
"mobile_overlay_surveys_look_good": "別擔心 -你的 問卷 在每個 裝置 和 螢幕尺寸 上 都 很出色!",
"mobile_overlay_title": "糟糕 ,偵測到小螢幕!",
"mobile_overlay_text": "Formbricks 不適用於較小解析度的裝置。",
"move_down": "下移",
"move_up": "上移",
"multiple_languages": "多種語言",
@@ -752,6 +750,7 @@
},
"project": {
"api_keys": {
"access_control": "存取控制",
"add_api_key": "新增 API 金鑰",
"api_key": "API 金鑰",
"api_key_copied_to_clipboard": "API 金鑰已複製到剪貼簿",
@@ -2888,4 +2887,4 @@
"usability_rating_description": "透過使用標準化的 十個問題 問卷,要求使用者評估他們對 您 產品的使用體驗,來衡量感知的 可用性。",
"usability_score_name": "系統 可用性 分數 (SUS)"
}
}
}

View File

@@ -1,101 +0,0 @@
import { getCacheService } from "@formbricks/cache";
import { prisma } from "@formbricks/database";
import { logger } from "@formbricks/logger";
import { Result, err, ok } from "@formbricks/types/error-handlers";
import { type OverallHealthStatus } from "@/modules/api/v2/health/types/health-status";
import { type ApiErrorResponseV2 } from "@/modules/api/v2/types/api-error";
/**
* Check if the main database is reachable and responding
* @returns Promise<Result<boolean, ApiErrorResponseV2>> - Result of the database health check
*/
export const checkDatabaseHealth = async (): Promise<Result<boolean, ApiErrorResponseV2>> => {
try {
// Simple query to check if database is reachable
await prisma.$queryRaw`SELECT 1`;
return ok(true);
} catch (error) {
logger
.withContext({
component: "health_check",
check_type: "main_database",
error,
})
.error("Database health check failed");
return err({
type: "internal_server_error",
details: [{ field: "main_database", issue: "Database health check failed" }],
});
}
};
/**
* Check if the Redis cache is reachable and responding
* @returns Promise<Result<boolean, ApiErrorResponseV2>> - Result of the cache health check
*/
export const checkCacheHealth = async (): Promise<Result<boolean, ApiErrorResponseV2>> => {
try {
const cacheServiceResult = await getCacheService();
if (!cacheServiceResult.ok) {
return err({
type: "internal_server_error",
details: [{ field: "cache_database", issue: "Cache service not available" }],
});
}
const isAvailable = await cacheServiceResult.data.isRedisAvailable();
if (isAvailable) {
return ok(true);
}
return err({
type: "internal_server_error",
details: [{ field: "cache_database", issue: "Redis not available" }],
});
} catch (error) {
logger
.withContext({
component: "health_check",
check_type: "cache_database",
error,
})
.error("Redis health check failed");
return err({
type: "internal_server_error",
details: [{ field: "cache_database", issue: "Redis health check failed" }],
});
}
};
/**
* Perform all health checks and return the overall status
* Always returns ok() with health status unless the health check endpoint itself fails
* @returns Promise<Result<OverallHealthStatus, ApiErrorResponseV2>> - Overall health status of all dependencies
*/
export const performHealthChecks = async (): Promise<Result<OverallHealthStatus, ApiErrorResponseV2>> => {
try {
const [databaseResult, cacheResult] = await Promise.all([checkDatabaseHealth(), checkCacheHealth()]);
const healthStatus: OverallHealthStatus = {
main_database: databaseResult.ok ? databaseResult.data : false,
cache_database: cacheResult.ok ? cacheResult.data : false,
};
// Always return ok() with the health status - individual dependency failures
// are reflected in the boolean values
return ok(healthStatus);
} catch (error) {
// Only return err() if the health check endpoint itself fails
logger
.withContext({
component: "health_check",
error,
})
.error("Health check endpoint failed");
return err({
type: "internal_server_error",
details: [{ field: "health", issue: "Failed to perform health checks" }],
});
}
};

View File

@@ -1,29 +0,0 @@
import { ZOverallHealthStatus } from "@/modules/api/v2/health/types/health-status";
import { makePartialSchema } from "@/modules/api/v2/types/openapi-response";
import { ZodOpenApiOperationObject } from "zod-openapi";
export const healthCheckEndpoint: ZodOpenApiOperationObject = {
tags: ["Health"],
summary: "Health Check",
description: "Check the health status of critical application dependencies including database and cache.",
requestParams: {},
operationId: "healthCheck",
security: [],
responses: {
"200": {
description:
"Health check completed successfully. Check individual dependency status in response data.",
content: {
"application/json": {
schema: makePartialSchema(ZOverallHealthStatus),
},
},
},
},
};
export const healthPaths = {
"/health": {
get: healthCheckEndpoint,
},
};

View File

@@ -1,288 +0,0 @@
import { beforeEach, describe, expect, test, vi } from "vitest";
import { ErrorCode, getCacheService } from "@formbricks/cache";
import { prisma } from "@formbricks/database";
import { err, ok } from "@formbricks/types/error-handlers";
import { checkCacheHealth, checkDatabaseHealth, performHealthChecks } from "../health-checks";
// Mock dependencies
vi.mock("@formbricks/database", () => ({
prisma: {
$queryRaw: vi.fn(),
},
}));
vi.mock("@formbricks/cache", () => ({
getCacheService: vi.fn(),
ErrorCode: {
RedisConnectionError: "redis_connection_error",
},
}));
vi.mock("@formbricks/logger", () => ({
logger: {
error: vi.fn(),
info: vi.fn(),
withContext: vi.fn(() => ({
error: vi.fn(),
info: vi.fn(),
})),
},
}));
describe("Health Checks", () => {
beforeEach(() => {
vi.clearAllMocks();
});
// Helper function to create a mock CacheService
const createMockCacheService = (isRedisAvailable: boolean = true) => ({
getRedisClient: vi.fn(),
withTimeout: vi.fn(),
get: vi.fn(),
exists: vi.fn(),
set: vi.fn(),
del: vi.fn(),
keys: vi.fn(),
withCache: vi.fn(),
flush: vi.fn(),
tryGetCachedValue: vi.fn(),
trySetCache: vi.fn(),
isRedisAvailable: vi.fn().mockResolvedValue(isRedisAvailable),
});
describe("checkDatabaseHealth", () => {
test("should return healthy when database query succeeds", async () => {
vi.mocked(prisma.$queryRaw).mockResolvedValue([{ "?column?": 1 }]);
const result = await checkDatabaseHealth();
expect(result).toEqual({ ok: true, data: true });
expect(prisma.$queryRaw).toHaveBeenCalledWith(["SELECT 1"]);
});
test("should return unhealthy when database query fails", async () => {
const dbError = new Error("Database connection failed");
vi.mocked(prisma.$queryRaw).mockRejectedValue(dbError);
const result = await checkDatabaseHealth();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.type).toBe("internal_server_error");
expect(result.error.details).toEqual([
{ field: "main_database", issue: "Database health check failed" },
]);
}
});
test("should handle different types of database errors", async () => {
const networkError = new Error("ECONNREFUSED");
vi.mocked(prisma.$queryRaw).mockRejectedValue(networkError);
const result = await checkDatabaseHealth();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.type).toBe("internal_server_error");
expect(result.error.details).toEqual([
{ field: "main_database", issue: "Database health check failed" },
]);
}
});
});
describe("checkCacheHealth", () => {
test("should return healthy when Redis is available", async () => {
const mockCacheService = createMockCacheService(true);
vi.mocked(getCacheService).mockResolvedValue(ok(mockCacheService as any));
const result = await checkCacheHealth();
expect(result).toEqual({ ok: true, data: true });
expect(getCacheService).toHaveBeenCalled();
expect(mockCacheService.isRedisAvailable).toHaveBeenCalled();
});
test("should return unhealthy when cache service fails to initialize", async () => {
const cacheError = { code: ErrorCode.RedisConnectionError };
vi.mocked(getCacheService).mockResolvedValue(err(cacheError));
const result = await checkCacheHealth();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.type).toBe("internal_server_error");
expect(result.error.details).toEqual([
{ field: "cache_database", issue: "Cache service not available" },
]);
}
});
test("should return unhealthy when Redis is not available", async () => {
const mockCacheService = createMockCacheService(false);
vi.mocked(getCacheService).mockResolvedValue(ok(mockCacheService as any));
const result = await checkCacheHealth();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.type).toBe("internal_server_error");
expect(result.error.details).toEqual([{ field: "cache_database", issue: "Redis not available" }]);
}
expect(mockCacheService.isRedisAvailable).toHaveBeenCalled();
});
test("should handle Redis availability check exceptions", async () => {
const mockCacheService = createMockCacheService(true);
mockCacheService.isRedisAvailable.mockRejectedValue(new Error("Redis ping failed"));
vi.mocked(getCacheService).mockResolvedValue(ok(mockCacheService as any));
const result = await checkCacheHealth();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.type).toBe("internal_server_error");
expect(result.error.details).toEqual([
{ field: "cache_database", issue: "Redis health check failed" },
]);
}
});
test("should handle cache service initialization exceptions", async () => {
const serviceException = new Error("Cache service unavailable");
vi.mocked(getCacheService).mockRejectedValue(serviceException);
const result = await checkCacheHealth();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.type).toBe("internal_server_error");
expect(result.error.details).toEqual([
{ field: "cache_database", issue: "Redis health check failed" },
]);
}
});
test("should verify isRedisAvailable is called asynchronously", async () => {
const mockCacheService = createMockCacheService(true);
vi.mocked(getCacheService).mockResolvedValue(ok(mockCacheService as any));
await checkCacheHealth();
// Verify the async method was called
expect(mockCacheService.isRedisAvailable).toHaveBeenCalledTimes(1);
expect(mockCacheService.isRedisAvailable).toReturnWith(Promise.resolve(true));
});
});
describe("performHealthChecks", () => {
test("should return all healthy when both checks pass", async () => {
// Mock successful database check
vi.mocked(prisma.$queryRaw).mockResolvedValue([{ "?column?": 1 }]);
// Mock successful cache check
const mockCacheService = createMockCacheService(true);
vi.mocked(getCacheService).mockResolvedValue(ok(mockCacheService as any));
const result = await performHealthChecks();
expect(result).toEqual({
ok: true,
data: {
main_database: true,
cache_database: true,
},
});
});
test("should return mixed results when only database is healthy", async () => {
// Mock successful database check
vi.mocked(prisma.$queryRaw).mockResolvedValue([{ "?column?": 1 }]);
// Mock failed cache check
vi.mocked(getCacheService).mockResolvedValue(err({ code: ErrorCode.RedisConnectionError }));
const result = await performHealthChecks();
expect(result).toEqual({
ok: true,
data: {
main_database: true,
cache_database: false,
},
});
});
test("should return mixed results when only cache is healthy", async () => {
// Mock failed database check
vi.mocked(prisma.$queryRaw).mockRejectedValue(new Error("DB Error"));
// Mock successful cache check
const mockCacheService = createMockCacheService(true);
vi.mocked(getCacheService).mockResolvedValue(ok(mockCacheService as any));
const result = await performHealthChecks();
expect(result).toEqual({
ok: true,
data: {
main_database: false,
cache_database: true,
},
});
});
test("should return all unhealthy when both checks fail", async () => {
// Mock failed database check
vi.mocked(prisma.$queryRaw).mockRejectedValue(new Error("DB Error"));
// Mock failed cache check
vi.mocked(getCacheService).mockResolvedValue(err({ code: ErrorCode.RedisConnectionError }));
const result = await performHealthChecks();
expect(result).toEqual({
ok: true,
data: {
main_database: false,
cache_database: false,
},
});
});
test("should run both checks in parallel", async () => {
const dbPromise = new Promise((resolve) => setTimeout(() => resolve([{ "?column?": 1 }]), 100));
const redisPromise = new Promise((resolve) => setTimeout(() => resolve(true), 100));
vi.mocked(prisma.$queryRaw).mockReturnValue(dbPromise as any);
const mockCacheService = createMockCacheService(true);
mockCacheService.isRedisAvailable.mockReturnValue(redisPromise as any);
vi.mocked(getCacheService).mockResolvedValue(ok(mockCacheService as any));
const startTime = Date.now();
await performHealthChecks();
const endTime = Date.now();
// Should complete in roughly 100ms (parallel) rather than 200ms (sequential)
expect(endTime - startTime).toBeLessThan(150);
});
test("should return error only on catastrophic failure (endpoint itself fails)", async () => {
// Mock a catastrophic failure in Promise.all itself
const originalPromiseAll = Promise.all;
vi.spyOn(Promise, "all").mockRejectedValue(new Error("Catastrophic system failure"));
const result = await performHealthChecks();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.type).toBe("internal_server_error");
expect(result.error.details).toEqual([{ field: "health", issue: "Failed to perform health checks" }]);
}
// Restore original Promise.all
Promise.all = originalPromiseAll;
});
});
});

View File

@@ -1,15 +0,0 @@
import { responses } from "@/modules/api/v2/lib/response";
import { performHealthChecks } from "./lib/health-checks";
export const GET = async () => {
const healthStatusResult = await performHealthChecks();
if (!healthStatusResult.ok) {
return responses.serviceUnavailableResponse({
details: healthStatusResult.error.details,
});
}
return responses.successResponse({
data: healthStatusResult.data,
});
};

View File

@@ -1,22 +0,0 @@
import { z } from "zod";
import { extendZodWithOpenApi } from "zod-openapi";
extendZodWithOpenApi(z);
export const ZOverallHealthStatus = z
.object({
main_database: z.boolean().openapi({
description: "Main database connection status - true if database is reachable and running",
example: true,
}),
cache_database: z.boolean().openapi({
description: "Cache database connection status - true if cache database is reachable and running",
example: true,
}),
})
.openapi({
title: "Health Check Response",
description: "Health check status for critical application dependencies",
});
export type OverallHealthStatus = z.infer<typeof ZOverallHealthStatus>;

View File

@@ -232,35 +232,6 @@ const internalServerErrorResponse = ({
);
};
const serviceUnavailableResponse = ({
details = [],
cors = false,
cache = "private, no-store",
}: {
details?: ApiErrorDetails;
cors?: boolean;
cache?: string;
} = {}) => {
const headers = {
...(cors && corsHeaders),
"Cache-Control": cache,
};
return Response.json(
{
error: {
code: 503,
message: "Service Unavailable",
details,
},
},
{
status: 503,
headers,
}
);
};
const successResponse = ({
data,
meta,
@@ -354,7 +325,6 @@ export const responses = {
unprocessableEntityResponse,
tooManyRequestsResponse,
internalServerErrorResponse,
serviceUnavailableResponse,
successResponse,
createdResponse,
multiStatusResponse,

View File

@@ -1,8 +1,8 @@
import { ApiErrorResponseV2 } from "@/modules/api/v2/types/api-error";
import * as Sentry from "@sentry/nextjs";
import { describe, expect, test, vi } from "vitest";
import { ZodError } from "zod";
import { logger } from "@formbricks/logger";
import { ApiErrorResponseV2 } from "@/modules/api/v2/types/api-error";
import { formatZodError, handleApiError, logApiError, logApiRequest } from "../utils";
const mockRequest = new Request("http://localhost");
@@ -12,15 +12,6 @@ mockRequest.headers.set("x-request-id", "123");
vi.mock("@sentry/nextjs", () => ({
captureException: vi.fn(),
withScope: vi.fn((callback: (scope: any) => void) => {
const mockScope = {
setTag: vi.fn(),
setContext: vi.fn(),
setLevel: vi.fn(),
setExtra: vi.fn(),
};
callback(mockScope);
}),
}));
// Mock SENTRY_DSN constant
@@ -241,7 +232,7 @@ describe("utils", () => {
});
// Verify error was called on the child logger
expect(errorMock).toHaveBeenCalledWith("API V2 Error Details");
expect(errorMock).toHaveBeenCalledWith("API Error Details");
// Restore the original method
logger.withContext = originalWithContext;
@@ -275,7 +266,7 @@ describe("utils", () => {
});
// Verify error was called on the child logger
expect(errorMock).toHaveBeenCalledWith("API V2 Error Details");
expect(errorMock).toHaveBeenCalledWith("API Error Details");
// Restore the original method
logger.withContext = originalWithContext;
@@ -312,7 +303,7 @@ describe("utils", () => {
});
// Verify error was called on the child logger
expect(errorMock).toHaveBeenCalledWith("API V2 Error Details");
expect(errorMock).toHaveBeenCalledWith("API Error Details");
// Verify Sentry.captureException was called
expect(Sentry.captureException).toHaveBeenCalled();

View File

@@ -1,8 +1,8 @@
// Function is this file can be used in edge runtime functions, like api routes.
import * as Sentry from "@sentry/nextjs";
import { logger } from "@formbricks/logger";
import { IS_PRODUCTION, SENTRY_DSN } from "@/lib/constants";
import { ApiErrorResponseV2 } from "@/modules/api/v2/types/api-error";
import * as Sentry from "@sentry/nextjs";
import { logger } from "@formbricks/logger";
export const logApiErrorEdge = (request: Request, error: ApiErrorResponseV2): void => {
const correlationId = request.headers.get("x-request-id") ?? "";
@@ -10,14 +10,14 @@ export const logApiErrorEdge = (request: Request, error: ApiErrorResponseV2): vo
// Send the error to Sentry if the DSN is set and the error type is internal_server_error
// This is useful for tracking down issues without overloading Sentry with errors
if (SENTRY_DSN && IS_PRODUCTION && error.type === "internal_server_error") {
// Use Sentry scope to add correlation ID as a tag for easy filtering
Sentry.withScope((scope) => {
scope.setTag("correlationId", correlationId);
scope.setLevel("error");
const err = new Error(`API V2 error, id: ${correlationId}`);
scope.setExtra("originalError", error);
const err = new Error(`API V2 error, id: ${correlationId}`);
Sentry.captureException(err);
Sentry.captureException(err, {
extra: {
details: error.details,
type: error.type,
correlationId,
},
});
}
@@ -26,5 +26,5 @@ export const logApiErrorEdge = (request: Request, error: ApiErrorResponseV2): vo
correlationId,
error,
})
.error("API V2 Error Details");
.error("API Error Details");
};

View File

@@ -1,18 +1,17 @@
// @ts-nocheck // We can remove this when we update the prisma client and the typescript version
// if we don't add this we get build errors with prisma due to type-nesting
import { ZodCustomIssue, ZodIssue } from "zod";
import { logger } from "@formbricks/logger";
import { TApiAuditLog } from "@/app/lib/api/with-api-logging";
import { AUDIT_LOG_ENABLED } from "@/lib/constants";
import { responses } from "@/modules/api/v2/lib/response";
import { ApiErrorResponseV2 } from "@/modules/api/v2/types/api-error";
import { queueAuditEvent } from "@/modules/ee/audit-logs/lib/handler";
import { ZodCustomIssue, ZodIssue } from "zod";
import { logger } from "@formbricks/logger";
import { logApiErrorEdge } from "./utils-edge";
export const handleApiError = (
request: Request,
err: ApiErrorResponseV2,
auditLog?: TApiAuditLog
auditLog?: ApiAuditLog
): Response => {
logApiError(request, err, auditLog);
@@ -56,7 +55,7 @@ export const formatZodError = (error: { issues: (ZodIssue | ZodCustomIssue)[] })
});
};
export const logApiRequest = (request: Request, responseStatus: number, auditLog?: TApiAuditLog): void => {
export const logApiRequest = (request: Request, responseStatus: number, auditLog?: ApiAuditLog): void => {
const method = request.method;
const url = new URL(request.url);
const path = url.pathname;
@@ -83,13 +82,13 @@ export const logApiRequest = (request: Request, responseStatus: number, auditLog
logAuditLog(request, auditLog);
};
export const logApiError = (request: Request, error: ApiErrorResponseV2, auditLog?: TApiAuditLog): void => {
export const logApiError = (request: Request, error: ApiErrorResponseV2, auditLog?: ApiAuditLog): void => {
logApiErrorEdge(request, error);
logAuditLog(request, auditLog);
};
const logAuditLog = (request: Request, auditLog?: TApiAuditLog): void => {
const logAuditLog = (request: Request, auditLog?: ApiAuditLog): void => {
if (AUDIT_LOG_ENABLED && auditLog) {
const correlationId = request.headers.get("x-request-id") ?? "";
queueAuditEvent({

View File

@@ -1,5 +1,3 @@
import { healthPaths } from "@/modules/api/v2/health/lib/openapi";
import { ZOverallHealthStatus } from "@/modules/api/v2/health/types/health-status";
import { contactAttributeKeyPaths } from "@/modules/api/v2/management/contact-attribute-keys/lib/openapi";
import { responsePaths } from "@/modules/api/v2/management/responses/lib/openapi";
import { surveyContactLinksBySegmentPaths } from "@/modules/api/v2/management/surveys/[surveyId]/contact-links/segments/lib/openapi";
@@ -37,7 +35,6 @@ const document = createDocument({
version: "2.0.0",
},
paths: {
...healthPaths,
...rolePaths,
...mePaths,
...responsePaths,
@@ -58,10 +55,6 @@ const document = createDocument({
},
],
tags: [
{
name: "Health",
description: "Operations for checking critical application dependencies health status.",
},
{
name: "Roles",
description: "Operations for managing roles.",
@@ -121,7 +114,6 @@ const document = createDocument({
},
},
schemas: {
health: ZOverallHealthStatus,
role: ZRoles,
me: ZApiKeyData,
response: ZResponse,

View File

@@ -1,12 +1,12 @@
import { randomBytes } from "crypto";
import { Provider } from "next-auth/providers/index";
import { afterEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { EMAIL_VERIFICATION_DISABLED } from "@/lib/constants";
import { createToken } from "@/lib/jwt";
// Import mocked rate limiting functions
import { applyIPRateLimit } from "@/modules/core/rate-limit/helpers";
import { rateLimitConfigs } from "@/modules/core/rate-limit/rate-limit-configs";
import { randomBytes } from "crypto";
import { Provider } from "next-auth/providers/index";
import { afterEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { authOptions } from "./authOptions";
import { mockUser } from "./mock-data";
import { hashPassword } from "./utils";
@@ -31,7 +31,7 @@ vi.mock("@/lib/constants", () => ({
SESSION_MAX_AGE: 86400,
NEXTAUTH_SECRET: "test-secret",
WEBAPP_URL: "http://localhost:3000",
ENCRYPTION_KEY: "test-encryption-key-32-chars-long",
ENCRYPTION_KEY: "12345678901234567890123456789012", // 32 bytes for AES-256
REDIS_URL: undefined,
AUDIT_LOG_ENABLED: false,
AUDIT_LOG_GET_USER_IP: false,
@@ -261,7 +261,7 @@ describe("authOptions", () => {
vi.mocked(applyIPRateLimit).mockResolvedValue(); // Rate limiting passes
vi.spyOn(prisma.user, "findUnique").mockResolvedValue(mockUser as any);
const credentials = { token: createToken(mockUser.id, mockUser.email) };
const credentials = { token: createToken(mockUser.id) };
await expect(tokenProvider.options.authorize(credentials, {})).rejects.toThrow(
"Email already verified"
@@ -280,7 +280,7 @@ describe("authOptions", () => {
groupId: null,
} as any);
const credentials = { token: createToken(mockUserId, mockUser.email) };
const credentials = { token: createToken(mockUserId) };
const result = await tokenProvider.options.authorize(credentials, {});
expect(result.email).toBe(mockUser.email);
@@ -303,7 +303,7 @@ describe("authOptions", () => {
groupId: null,
} as any);
const credentials = { token: createToken(mockUserId, mockUser.email) };
const credentials = { token: createToken(mockUserId) };
await tokenProvider.options.authorize(credentials, {});
@@ -315,7 +315,7 @@ describe("authOptions", () => {
new Error("Maximum number of requests reached. Please try again later.")
);
const credentials = { token: createToken(mockUserId, mockUser.email) };
const credentials = { token: createToken(mockUserId) };
await expect(tokenProvider.options.authorize(credentials, {})).rejects.toThrow(
"Maximum number of requests reached. Please try again later."
@@ -339,7 +339,7 @@ describe("authOptions", () => {
groupId: null,
} as any);
const credentials = { token: createToken(mockUserId, mockUser.email) };
const credentials = { token: createToken(mockUserId) };
await tokenProvider.options.authorize(credentials, {});

View File

@@ -2,7 +2,6 @@
import { Button } from "@/modules/ui/components/button";
import { Label } from "@/modules/ui/components/label";
import { TooltipRenderer } from "@/modules/ui/components/tooltip";
import { useTranslate } from "@tolgee/react";
import { CopyIcon, Trash2Icon } from "lucide-react";
import { TSurveyQuota } from "@formbricks/types/quota";
@@ -38,30 +37,26 @@ export const QuotaList = ({ quotas, onEdit, deleteQuota, duplicateQuota }: Quota
</div>
<div className="flex items-center gap-2">
<TooltipRenderer tooltipContent={t("common.delete")}>
<Button
variant="ghost"
size="sm"
onClick={(e) => {
e.stopPropagation();
deleteQuota(quota);
}}
className="h-8 w-8 p-0 text-slate-500">
<Trash2Icon className="h-4 w-4" />
</Button>
</TooltipRenderer>
<TooltipRenderer tooltipContent={t("common.duplicate")}>
<Button
variant="ghost"
size="sm"
onClick={(e) => {
e.stopPropagation();
duplicateQuota(quota);
}}
className="h-8 w-8 p-0 text-slate-500">
<CopyIcon className="h-4 w-4" />
</Button>
</TooltipRenderer>
<Button
variant="ghost"
size="sm"
onClick={(e) => {
e.stopPropagation();
deleteQuota(quota);
}}
className="h-8 w-8 p-0 text-slate-500">
<Trash2Icon className="h-4 w-4" />
</Button>
<Button
variant="ghost"
size="sm"
onClick={(e) => {
e.stopPropagation();
duplicateQuota(quota);
}}
className="h-8 w-8 p-0 text-slate-500">
<CopyIcon className="h-4 w-4" />
</Button>
</div>
</div>
))}

View File

@@ -1,3 +1,12 @@
import { render } from "@react-email/render";
import { createTransport } from "nodemailer";
import type SMTPTransport from "nodemailer/lib/smtp-transport";
import { logger } from "@formbricks/logger";
import type { TLinkSurveyEmailData } from "@formbricks/types/email";
import { InvalidInputError } from "@formbricks/types/errors";
import type { TResponse } from "@formbricks/types/responses";
import type { TSurvey } from "@formbricks/types/surveys/types";
import { TUserEmail, TUserLocale } from "@formbricks/types/user";
import {
DEBUG,
MAIL_FROM,
@@ -17,15 +26,6 @@ import { getOrganizationByEnvironmentId } from "@/lib/organization/service";
import NewEmailVerification from "@/modules/email/emails/auth/new-email-verification";
import { EmailCustomizationPreviewEmail } from "@/modules/email/emails/general/email-customization-preview-email";
import { getTranslate } from "@/tolgee/server";
import { render } from "@react-email/render";
import { createTransport } from "nodemailer";
import type SMTPTransport from "nodemailer/lib/smtp-transport";
import { logger } from "@formbricks/logger";
import type { TLinkSurveyEmailData } from "@formbricks/types/email";
import { InvalidInputError } from "@formbricks/types/errors";
import type { TResponse } from "@formbricks/types/responses";
import type { TSurvey } from "@formbricks/types/surveys/types";
import { TUserEmail, TUserLocale } from "@formbricks/types/user";
import { ForgotPasswordEmail } from "./emails/auth/forgot-password-email";
import { PasswordResetNotifyEmail } from "./emails/auth/password-reset-notify-email";
import { VerificationEmail } from "./emails/auth/verification-email";
@@ -111,7 +111,7 @@ export const sendVerificationEmail = async ({
}): Promise<boolean> => {
try {
const t = await getTranslate();
const token = createToken(id, email, {
const token = createToken(id, {
expiresIn: "1d",
});
const verifyLink = `${WEBAPP_URL}/auth/verify?token=${encodeURIComponent(token)}`;
@@ -136,7 +136,7 @@ export const sendForgotPasswordEmail = async (user: {
locale: TUserLocale;
}): Promise<boolean> => {
const t = await getTranslate();
const token = createToken(user.id, user.email, {
const token = createToken(user.id, {
expiresIn: "1d",
});
const verifyLink = `${WEBAPP_URL}/auth/forgot-password/reset?token=${encodeURIComponent(token)}`;

View File

@@ -1,5 +1,6 @@
"use client";
import { getOrganizationAccessKeyDisplayName } from "@/modules/organization/settings/api-keys/lib/utils";
import { TOrganizationProject } from "@/modules/organization/settings/api-keys/types/api-keys";
import { Alert, AlertTitle } from "@/modules/ui/components/alert";
import { Button } from "@/modules/ui/components/button";
@@ -23,7 +24,7 @@ import { Switch } from "@/modules/ui/components/switch";
import { ApiKeyPermission } from "@prisma/client";
import { useTranslate } from "@tolgee/react";
import { ChevronDownIcon, Trash2Icon } from "lucide-react";
import { useState } from "react";
import { Fragment, useState } from "react";
import { useForm } from "react-hook-form";
import { toast } from "react-hot-toast";
import { TOrganizationAccess } from "@formbricks/types/api-key";
@@ -219,10 +220,10 @@ export const AddApiKeyModal = ({
<Dialog open={open} onOpenChange={setOpen}>
<DialogContent>
<DialogHeader>
<DialogTitle className="px-1">{t("environments.project.api_keys.add_api_key")}</DialogTitle>
<DialogTitle>{t("environments.project.api_keys.add_api_key")}</DialogTitle>
</DialogHeader>
<form onSubmit={handleSubmit(submitAPIKey)} className="contents">
<DialogBody className="space-y-4 overflow-y-auto px-1 py-4">
<DialogBody className="space-y-4 overflow-y-auto py-4">
<div className="space-y-2">
<Label>{t("environments.project.api_keys.api_key_label")}</Label>
<Input
@@ -347,31 +348,43 @@ export const AddApiKeyModal = ({
</div>
<div className="space-y-4">
<Label>{t("environments.project.api_keys.organization_access")}</Label>
{Object.keys(selectedOrganizationAccess).map((key) => (
<div key={key} className="mt-2 flex items-center gap-6">
<div className="flex items-center gap-2">
<Label>Read</Label>
<Switch
data-testid={`organization-access-${key}-read`}
checked={selectedOrganizationAccess[key].read || selectedOrganizationAccess[key].write}
onCheckedChange={(newVal) => setSelectedOrganizationAccessValue(key, "read", newVal)}
disabled={selectedOrganizationAccess[key].write}
/>
</div>
<div className="flex items-center gap-2">
<Label>Write</Label>
<Switch
data-testid={`organization-access-${key}-write`}
checked={selectedOrganizationAccess[key].write}
onCheckedChange={(newVal) => setSelectedOrganizationAccessValue(key, "write", newVal)}
/>
</div>
<div>
<Label>{t("environments.project.api_keys.organization_access")}</Label>
<p className="text-sm text-slate-500">
{t("environments.project.api_keys.organization_access_description")}
</p>
</div>
<div className="space-y-2">
<div className="grid grid-cols-[auto_100px_100px] gap-4">
<div></div>
<span className="flex items-center justify-center text-sm font-medium">Read</span>
<span className="flex items-center justify-center text-sm font-medium">Write</span>
{Object.keys(selectedOrganizationAccess).map((key) => (
<Fragment key={key}>
<div className="py-1 text-sm">{getOrganizationAccessKeyDisplayName(key, t)}</div>
<div className="flex items-center justify-center py-1">
<Switch
data-testid={`organization-access-${key}-read`}
checked={selectedOrganizationAccess[key].read}
onCheckedChange={(newVal) =>
setSelectedOrganizationAccessValue(key, "read", newVal)
}
/>
</div>
<div className="flex items-center justify-center py-1">
<Switch
data-testid={`organization-access-${key}-write`}
checked={selectedOrganizationAccess[key].write}
onCheckedChange={(newVal) =>
setSelectedOrganizationAccessValue(key, "write", newVal)
}
/>
</div>
</Fragment>
))}
</div>
))}
<p className="text-sm text-slate-500">
{t("environments.project.api_keys.organization_access_description")}
</p>
</div>
</div>
<Alert variant="warning">
<AlertTitle>{t("environments.project.api_keys.api_key_security_warning")}</AlertTitle>

View File

@@ -1,6 +1,7 @@
import { ApiKeyPermission } from "@prisma/client";
import "@testing-library/jest-dom/vitest";
import { cleanup, render, screen } from "@testing-library/react";
import React from "react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TProject } from "@formbricks/types/project";
import { TApiKeyWithEnvironmentPermission } from "../types/api-keys";
@@ -103,8 +104,6 @@ describe("ViewPermissionModal", () => {
setOpen: vi.fn(),
projects: mockProjects,
apiKey: mockApiKey,
onSubmit: vi.fn(),
isUpdating: false,
};
test("renders the modal with correct title", () => {
@@ -155,7 +154,7 @@ describe("ViewPermissionModal", () => {
expect(screen.getByTestId("organization-access-accessControl-read")).toBeDisabled();
expect(screen.getByTestId("organization-access-accessControl-write")).not.toBeChecked();
expect(screen.getByTestId("organization-access-accessControl-write")).toBeDisabled();
expect(screen.getByTestId("organization-access-otherAccess-read")).toBeChecked();
expect(screen.getByTestId("organization-access-otherAccess-read")).not.toBeChecked();
expect(screen.getByTestId("organization-access-otherAccess-write")).toBeChecked();
});
});

View File

@@ -1,5 +1,6 @@
"use client";
import { getOrganizationAccessKeyDisplayName } from "@/modules/organization/settings/api-keys/lib/utils";
import {
TApiKeyUpdateInput,
TApiKeyWithEnvironmentPermission,
@@ -21,7 +22,7 @@ import { Label } from "@/modules/ui/components/label";
import { Switch } from "@/modules/ui/components/switch";
import { zodResolver } from "@hookform/resolvers/zod";
import { useTranslate } from "@tolgee/react";
import { useEffect } from "react";
import { Fragment, useEffect } from "react";
import { useForm } from "react-hook-form";
import { TOrganizationAccess } from "@formbricks/types/api-key";
@@ -167,28 +168,36 @@ export const ViewPermissionModal = ({
})}
</div>
</div>
<div className="space-y-4">
<div className="space-y-2">
<Label>{t("environments.project.api_keys.organization_access")}</Label>
{Object.keys(organizationAccess).map((key) => (
<div key={key} className="mb-2 flex items-center gap-6">
<div className="flex items-center gap-2">
<Label className="text-sm font-medium">Read</Label>
<Switch
disabled={true}
data-testid={`organization-access-${key}-read`}
checked={organizationAccess[key].read || organizationAccess[key].write}
/>
</div>
<div className="flex items-center gap-2">
<Label className="text-sm font-medium">Write</Label>
<Switch
disabled={true}
data-testid={`organization-access-${key}-write`}
checked={organizationAccess[key].write}
/>
</div>
<div className="space-y-2">
<div className="grid grid-cols-[auto_100px_100px] gap-4">
<div></div>
<span className="flex items-center justify-center text-sm font-medium">Read</span>
<span className="flex items-center justify-center text-sm font-medium">Write</span>
{Object.keys(organizationAccess).map((key) => (
<Fragment key={key}>
<div className="py-1 text-sm">{getOrganizationAccessKeyDisplayName(key, t)}</div>
<div className="flex items-center justify-center py-1">
<Switch
disabled={true}
data-testid={`organization-access-${key}-read`}
checked={organizationAccess[key].read}
/>
</div>
<div className="flex items-center justify-center py-1">
<Switch
disabled={true}
data-testid={`organization-access-${key}-write`}
checked={organizationAccess[key].write}
/>
</div>
</Fragment>
))}
</div>
))}
</div>
</div>
</div>
</form>

View File

@@ -1,6 +1,6 @@
import { describe, expect, test } from "vitest";
import { describe, expect, test, vi } from "vitest";
import { TAPIKeyEnvironmentPermission } from "@formbricks/types/auth";
import { hasPermission } from "./utils";
import { getOrganizationAccessKeyDisplayName, hasPermission } from "./utils";
describe("hasPermission", () => {
const envId = "env1";
@@ -83,3 +83,17 @@ describe("hasPermission", () => {
expect(hasPermission(permissions, "other", "GET")).toBe(false);
});
});
describe("getOrganizationAccessKeyDisplayName", () => {
test("returns tolgee string for accessControl", () => {
const t = vi.fn((k) => k);
expect(getOrganizationAccessKeyDisplayName("accessControl", t)).toBe(
"environments.project.api_keys.access_control"
);
expect(t).toHaveBeenCalledWith("environments.project.api_keys.access_control");
});
test("returns tolgee string for other keys", () => {
const t = vi.fn((k) => k);
expect(getOrganizationAccessKeyDisplayName("otherKey", t)).toBe("otherKey");
});
});

View File

@@ -1,3 +1,4 @@
import { TFnType } from "@tolgee/react";
import { OrganizationAccessType } from "@formbricks/types/api-key";
import { TAPIKeyEnvironmentPermission, TAuthenticationApiKey } from "@formbricks/types/auth";
@@ -42,6 +43,15 @@ export const hasPermission = (
}
};
export const getOrganizationAccessKeyDisplayName = (key: string, t: TFnType) => {
switch (key) {
case "accessControl":
return t("environments.project.api_keys.access_control");
default:
return key;
}
};
export const hasOrganizationAccess = (
authentication: TAuthenticationApiKey,
accessType: OrganizationAccessType

View File

@@ -1,19 +1,5 @@
"use client";
import { useAutoAnimate } from "@formkit/auto-animate/react";
import { createId } from "@paralleldrive/cuid2";
import { useTranslate } from "@tolgee/react";
import {
ArrowDownIcon,
ArrowUpIcon,
CopyIcon,
EllipsisVerticalIcon,
PlusIcon,
SplitIcon,
TrashIcon,
} from "lucide-react";
import { useEffect, useMemo } from "react";
import { TSurvey, TSurveyLogic, TSurveyQuestion } from "@formbricks/types/surveys/types";
import { duplicateLogicItem } from "@/lib/surveyLogic/utils";
import { replaceHeadlineRecall } from "@/lib/utils/recall";
import { LogicEditor } from "@/modules/survey/editor/components/logic-editor";
@@ -29,6 +15,20 @@ import {
DropdownMenuTrigger,
} from "@/modules/ui/components/dropdown-menu";
import { Label } from "@/modules/ui/components/label";
import { useAutoAnimate } from "@formkit/auto-animate/react";
import { createId } from "@paralleldrive/cuid2";
import { useTranslate } from "@tolgee/react";
import {
ArrowDownIcon,
ArrowUpIcon,
CopyIcon,
EllipsisVerticalIcon,
PlusIcon,
SplitIcon,
TrashIcon,
} from "lucide-react";
import { useEffect, useMemo } from "react";
import { TSurvey, TSurveyLogic, TSurveyQuestion } from "@formbricks/types/surveys/types";
interface ConditionalLogicProps {
localSurvey: TSurvey;

View File

@@ -1,10 +1,10 @@
"use client";
import { useTranslate } from "@tolgee/react";
import { TConditionGroup, TSurvey, TSurveyQuestion } from "@formbricks/types/surveys/types";
import { createSharedConditionsFactory } from "@/modules/survey/editor/lib/shared-conditions-factory";
import { getDefaultOperatorForQuestion } from "@/modules/survey/editor/lib/utils";
import { ConditionsEditor } from "@/modules/ui/components/conditions-editor";
import { useTranslate } from "@tolgee/react";
import { TConditionGroup, TSurvey, TSurveyQuestion } from "@formbricks/types/surveys/types";
interface LogicEditorConditionsProps {
conditions: TConditionGroup;

View File

@@ -1,3 +1,5 @@
import { LogicEditorActions } from "@/modules/survey/editor/components/logic-editor-actions";
import { LogicEditorConditions } from "@/modules/survey/editor/components/logic-editor-conditions";
import { cleanup, render, screen } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import {
@@ -6,8 +8,6 @@ import {
TSurveyQuestion,
TSurveyQuestionTypeEnum,
} from "@formbricks/types/surveys/types";
import { LogicEditorActions } from "@/modules/survey/editor/components/logic-editor-actions";
import { LogicEditorConditions } from "@/modules/survey/editor/components/logic-editor-conditions";
import { LogicEditor } from "./logic-editor";
// Mock the subcomponents to isolate the LogicEditor component

View File

@@ -1,9 +1,5 @@
"use client";
import { useTranslate } from "@tolgee/react";
import { ArrowRightIcon } from "lucide-react";
import { ReactElement, useMemo } from "react";
import { TSurvey, TSurveyLogic, TSurveyQuestion } from "@formbricks/types/surveys/types";
import { getLocalizedValue } from "@/lib/i18n/utils";
import { LogicEditorActions } from "@/modules/survey/editor/components/logic-editor-actions";
import { LogicEditorConditions } from "@/modules/survey/editor/components/logic-editor-conditions";
@@ -15,6 +11,10 @@ import {
SelectTrigger,
SelectValue,
} from "@/modules/ui/components/select";
import { useTranslate } from "@tolgee/react";
import { ArrowRightIcon } from "lucide-react";
import { ReactElement, useMemo } from "react";
import { TSurvey, TSurveyLogic, TSurveyQuestion } from "@formbricks/types/surveys/types";
interface LogicEditorProps {
localSurvey: TSurvey;

View File

@@ -8,7 +8,7 @@ import { ConfirmationModal } from "@/modules/ui/components/confirmation-modal";
import { TooltipRenderer } from "@/modules/ui/components/tooltip";
import { createId } from "@paralleldrive/cuid2";
import { useTranslate } from "@tolgee/react";
import { CopyIcon, Trash2Icon } from "lucide-react";
import { CopyPlusIcon, TrashIcon } from "lucide-react";
import { useCallback, useMemo, useState } from "react";
import { TSurveyFollowUp } from "@formbricks/database/types/survey-follow-up";
import { TSurvey, TSurveyQuestionTypeEnum } from "@formbricks/types/surveys/types";
@@ -154,7 +154,7 @@ export const FollowUpItem = ({
setDeleteFollowUpModalOpen(true);
}}
aria-label={t("common.delete")}>
<Trash2Icon className="h-4 w-4 text-slate-500" />
<TrashIcon className="h-4 w-4 text-slate-500" />
</Button>
</TooltipRenderer>
@@ -167,7 +167,7 @@ export const FollowUpItem = ({
duplicateFollowUp();
}}
aria-label={t("common.duplicate")}>
<CopyIcon className="h-4 w-4 text-slate-500" />
<CopyPlusIcon className="h-4 w-4 text-slate-500" />
</Button>
</TooltipRenderer>
</div>

View File

@@ -1,6 +1,3 @@
import { type Response } from "@prisma/client";
import { notFound } from "next/navigation";
import { TSurvey } from "@formbricks/types/surveys/types";
import {
IMPRINT_URL,
IS_FORMBRICKS_CLOUD,
@@ -19,6 +16,9 @@ import { PinScreen } from "@/modules/survey/link/components/pin-screen";
import { SurveyInactive } from "@/modules/survey/link/components/survey-inactive";
import { getEmailVerificationDetails } from "@/modules/survey/link/lib/helper";
import { getProjectByEnvironmentId } from "@/modules/survey/link/lib/project";
import { type Response } from "@prisma/client";
import { notFound } from "next/navigation";
import { TSurvey } from "@formbricks/types/surveys/types";
interface SurveyRendererProps {
survey: TSurvey;
@@ -59,7 +59,7 @@ export const renderSurvey = async ({
const isSpamProtectionEnabled = Boolean(IS_RECAPTCHA_CONFIGURED && survey.recaptcha?.enabled);
if (survey.status !== "inProgress") {
if (survey.status !== "inProgress" && !isPreview) {
const project = await getProjectByEnvironmentId(survey.environmentId);
return (
<SurveyInactive

View File

@@ -59,8 +59,8 @@ export const BadgeContent: React.FC<BadgeContentProps> = ({
};
const content = (
<button
type="button"
// eslint-disable-next-line jsx-a11y/no-noninteractive-element-interactions, jsx-a11y/prefer-tag-over-role, jsx-a11y/click-events-have-key-events, jsx-a11y/no-static-element-interactions
<div
role={isCopyEnabled ? "button" : undefined}
className={getButtonClasses()}
onClick={handleCopy}
@@ -69,7 +69,7 @@ export const BadgeContent: React.FC<BadgeContentProps> = ({
onMouseLeave={isCopyEnabled ? () => setIsHovered(false) : undefined}>
<span>{id}</span>
{renderIcon()}
</button>
</div>
);
const getTooltipContent = () => {

View File

@@ -96,7 +96,7 @@ describe("IdBadge", () => {
test("removes interactive elements when copy is disabled", () => {
const { container } = render(<IdBadge id="1734" copyDisabled={true} />);
const badge = container.querySelector("button");
const badge = container.querySelector("div");
// Should not have cursor-pointer class
expect(badge).not.toHaveClass("cursor-pointer");

View File

@@ -1,51 +1,38 @@
import "@testing-library/jest-dom/vitest";
import { cleanup, render, screen } from "@testing-library/react";
import { afterEach, describe, expect, test } from "vitest";
import { afterEach, describe, expect, test, vi } from "vitest";
import { NoMobileOverlay } from "./index";
// Mock the tolgee translation
vi.mock("@tolgee/react", () => ({
useTranslate: () => ({
t: (key: string) =>
key === "common.mobile_overlay_text" ? "Please use desktop to access this section" : key,
}),
}));
describe("NoMobileOverlay", () => {
afterEach(() => {
cleanup();
});
test("renders title and paragraphs", () => {
test("renders overlay with correct text", () => {
render(<NoMobileOverlay />);
expect(
screen.getByRole("heading", { level: 1, name: "common.mobile_overlay_title" })
).toBeInTheDocument();
expect(screen.getByText("common.mobile_overlay_app_works_best_on_desktop")).toBeInTheDocument();
expect(screen.getByText("common.mobile_overlay_surveys_look_good")).toBeInTheDocument();
expect(screen.getByText("Please use desktop to access this section")).toBeInTheDocument();
});
test("has proper overlay classes (z-index and responsive hide)", () => {
test("has proper z-index for overlay", () => {
render(<NoMobileOverlay />);
const overlay = document.querySelector("div.fixed");
expect(overlay).toBeInTheDocument();
const overlay = screen.getByText("Please use desktop to access this section").closest("div.fixed");
expect(overlay).toHaveClass("z-[9999]");
});
test("has responsive layout with sm:hidden class", () => {
render(<NoMobileOverlay />);
const overlay = screen.getByText("Please use desktop to access this section").closest("div.fixed");
expect(overlay).toHaveClass("sm:hidden");
});
test("renders learn more link with correct href", () => {
render(<NoMobileOverlay />);
const link = screen.getByRole("link", { name: "common.learn_more" });
expect(link).toBeInTheDocument();
expect(link).toHaveAttribute("href", "https://formbricks.com/docs/xm-and-surveys/overview");
});
test("stacks icons with maximize centered inside smartphone", () => {
const { container } = render(<NoMobileOverlay />);
const wrapper = container.querySelector("div.relative.h-16.w-16");
expect(wrapper).toBeInTheDocument();
const phoneSvg = wrapper?.querySelector("svg.h-16.w-16");
expect(phoneSvg).toBeInTheDocument();
const expandSvg = wrapper?.querySelector("svg.absolute");
expect(expandSvg).toBeInTheDocument();
expect(expandSvg).toHaveClass("left-1/2", "top-1/3", "-translate-x-1/2", "-translate-y-1/3");
});
});

View File

@@ -1,35 +1,20 @@
"use client";
import { useTranslate } from "@tolgee/react";
import { ExternalLinkIcon, Maximize2Icon, SmartphoneIcon } from "lucide-react";
import { Button } from "@/modules/ui/components/button";
import { SmartphoneIcon, XIcon } from "lucide-react";
export const NoMobileOverlay = () => {
const { t } = useTranslate();
return (
<div className="fixed inset-0 z-[9999] sm:hidden">
<div className="absolute inset-0 bg-slate-50"></div>
<div className="relative mx-auto flex h-full max-w-xl flex-col items-center justify-center py-16 text-center">
<div className="relative h-16 w-16">
<SmartphoneIcon className="text-muted-foreground h-16 w-16" />
<Maximize2Icon className="text-muted-foreground absolute left-1/2 top-1/3 h-5 w-5 -translate-x-1/2 -translate-y-1/3" />
<>
<div className="fixed inset-0 z-[9999] flex items-center justify-center sm:hidden">
<div className="relative h-full w-full bg-slate-50"></div>
<div className="bg-slate-850 absolute mx-8 flex flex-col items-center gap-6 rounded-lg px-8 py-10 text-center">
<XIcon className="absolute top-14 h-8 w-8 text-slate-500" />
<SmartphoneIcon className="h-16 w-16 text-slate-500" />
<p className="text-slate-500">{t("common.mobile_overlay_text")}</p>
</div>
<h1 className="mt-2 text-2xl font-bold text-zinc-900 dark:text-white">
{t("common.mobile_overlay_title")}
</h1>
<p className="mt-2 text-base text-zinc-600 dark:text-zinc-400">
{t("common.mobile_overlay_app_works_best_on_desktop")}
</p>
<p className="mt-2 text-base text-zinc-600 dark:text-zinc-400">
{t("common.mobile_overlay_surveys_look_good")}
</p>
<Button variant="default" asChild className="mt-8">
<a href="https://formbricks.com/docs/xm-and-surveys/overview">
{t("common.learn_more")}
<ExternalLinkIcon />
</a>
</Button>
</div>
</div>
</>
);
};

View File

@@ -3,7 +3,6 @@ export const SURVEYS_API_URL = `/api/v1/management/surveys`;
export const WEBHOOKS_API_URL = `/api/v2/management/webhooks`;
export const ROLES_API_URL = `/api/v2/roles`;
export const ME_API_URL = `/api/v2/me`;
export const HEALTH_API_URL = `/api/v2/health`;
export const TEAMS_API_URL = (organizationId: string) => `/api/v2/organizations/${organizationId}/teams`;
export const PROJECT_TEAMS_API_URL = (organizationId: string) =>

View File

@@ -1,135 +0,0 @@
import { expect } from "@playwright/test";
import { logger } from "@formbricks/logger";
import { test } from "../lib/fixtures";
import { HEALTH_API_URL } from "./constants";
test.describe("API Tests for Health Endpoint", () => {
test("Health check returns 200 with dependency status", async ({ request }) => {
try {
// Make request to health endpoint (no authentication required)
const response = await request.get(HEALTH_API_URL);
// Should always return 200 if the health check endpoint can execute
expect(response.status()).toBe(200);
const responseBody = await response.json();
// Verify response structure
expect(responseBody).toHaveProperty("data");
expect(responseBody.data).toHaveProperty("main_database");
expect(responseBody.data).toHaveProperty("cache_database");
// Verify data types are boolean
expect(typeof responseBody.data.main_database).toBe("boolean");
expect(typeof responseBody.data.cache_database).toBe("boolean");
// Log the health status for debugging
logger.info(
{
main_database: responseBody.data.main_database,
cache_database: responseBody.data.cache_database,
},
"Health check status"
);
// In a healthy system, we expect both to be true
// But we don't fail the test if they're false - that's what the health check is for
if (!responseBody.data.main_database) {
logger.warn("Main database is unhealthy");
}
if (!responseBody.data.cache_database) {
logger.warn("Cache database is unhealthy");
}
} catch (error) {
logger.error(error, "Error during health check API test");
throw error;
}
});
test("Health check response time is reasonable", async ({ request }) => {
try {
const startTime = Date.now();
const response = await request.get(HEALTH_API_URL);
const endTime = Date.now();
const responseTime = endTime - startTime;
expect(response.status()).toBe(200);
// Health check should respond within 5 seconds
expect(responseTime).toBeLessThan(5000);
logger.info({ responseTime }, "Health check response time");
} catch (error) {
logger.error(error, "Error during health check performance test");
throw error;
}
});
test("Health check is accessible without authentication", async ({ request }) => {
try {
// Make request without any headers or authentication
const response = await request.get(HEALTH_API_URL, {
headers: {
// Explicitly no x-api-key or other auth headers
},
});
// Should be accessible without authentication
expect(response.status()).toBe(200);
const responseBody = await response.json();
expect(responseBody).toHaveProperty("data");
} catch (error) {
logger.error(error, "Error during unauthenticated health check test");
throw error;
}
});
test("Health check handles CORS properly", async ({ request }) => {
try {
// Test with OPTIONS request (preflight)
const optionsResponse = await request.fetch(HEALTH_API_URL, {
method: "OPTIONS",
});
// OPTIONS should succeed or at least not be a server error
expect(optionsResponse.status()).not.toBe(500);
// Test regular GET request
const getResponse = await request.get(HEALTH_API_URL);
expect(getResponse.status()).toBe(200);
} catch (error) {
logger.error(error, "Error during CORS health check test");
throw error;
}
});
test("Health check OpenAPI schema compliance", async ({ request }) => {
try {
const response = await request.get(HEALTH_API_URL);
expect(response.status()).toBe(200);
const responseBody = await response.json();
// Verify it matches our OpenAPI schema
expect(responseBody).toMatchObject({
data: {
main_database: expect.any(Boolean),
cache_database: expect.any(Boolean),
},
});
// Ensure no extra properties in the response data
const dataKeys = Object.keys(responseBody.data);
expect(dataKeys).toHaveLength(2);
expect(dataKeys).toContain("main_database");
expect(dataKeys).toContain("cache_database");
} catch (error) {
logger.error(error, "Error during OpenAPI schema compliance test");
throw error;
}
});
});

View File

@@ -15,7 +15,7 @@ Before you proceed, make sure you have the following:
Copy and paste the following command into your terminal:
```bash
/bin/sh -c "$(curl -fsSL https://raw.githubusercontent.com/formbricks/formbricks/stable/docker/formbricks.sh)"
/bin/sh -c "$(curl -fsSL https://raw.githubusercontent.com/formbricks/formbricks/main/docker/formbricks.sh)"
```
The script will prompt you for the following information:

View File

@@ -424,12 +424,20 @@ EOT
' docker-compose.yml > tmp.yml && mv tmp.yml docker-compose.yml
fi
# Step 3: Build service snippets and inject them BEFORE the volumes section (robust, no sed -i multiline)
# Step 3: Build service snippets and inject them BEFORE the volumes section (non-destructive: skip if service exists)
services_snippet_file="services_snippet.yml"
: > "$services_snippet_file"
insert_traefik="y"
if grep -q "^ traefik:" docker-compose.yml; then insert_traefik="n"; fi
if [[ $minio_storage == "y" ]]; then
cat > "$services_snippet_file" << EOF
insert_minio="y"; insert_minio_init="y"
if grep -q "^ minio:" docker-compose.yml; then insert_minio="n"; fi
if grep -q "^ minio-init:" docker-compose.yml; then insert_minio_init="n"; fi
if [[ $insert_minio == "y" ]]; then
cat >> "$services_snippet_file" << EOF
minio:
restart: always
@@ -458,6 +466,11 @@ EOT
- "traefik.http.middlewares.minio-cors.headers.addvaryheader=true"
- "traefik.http.middlewares.minio-ratelimit.ratelimit.average=100"
- "traefik.http.middlewares.minio-ratelimit.ratelimit.burst=200"
EOF
fi
if [[ $insert_minio_init == "y" ]]; then
cat >> "$services_snippet_file" << EOF
minio-init:
image: minio/mc@sha256:95b5f3f7969a5c5a9f3a700ba72d5c84172819e13385aaf916e237cf111ab868
depends_on:
@@ -471,7 +484,11 @@ EOT
entrypoint: ["/bin/sh", "/tmp/minio-init.sh"]
volumes:
- ./minio-init.sh:/tmp/minio-init.sh:ro
EOF
fi
if [[ $insert_traefik == "y" ]]; then
cat >> "$services_snippet_file" << EOF
traefik:
image: "traefik:v2.7"
restart: always
@@ -488,6 +505,7 @@ EOT
- ./acme.json:/acme.json
- /var/run/docker.sock:/var/run/docker.sock:ro
EOF
fi
# Downgrade MinIO router to plain HTTP when HTTPS is not configured
if [[ $https_setup != "y" ]]; then
@@ -497,7 +515,8 @@ EOF
sed -i "s|accesscontrolalloworiginlist=https://$domain_name|accesscontrolalloworiginlist=http://$domain_name|" "$services_snippet_file"
fi
else
cat > "$services_snippet_file" << EOF
if [[ $insert_traefik == "y" ]]; then
cat > "$services_snippet_file" << EOF
traefik:
image: "traefik:v2.7"
@@ -514,6 +533,9 @@ EOF
- ./acme.json:/acme.json
- /var/run/docker.sock:/var/run/docker.sock:ro
EOF
else
: > "$services_snippet_file"
fi
fi
awk '
@@ -529,24 +551,51 @@ EOF
rm -f "$services_snippet_file"
# Deterministically rewrite the volumes section to include required volumes
awk -v add_minio="$minio_storage" '
BEGIN { in_vol=0 }
/^volumes:/ {
print "volumes:";
print " postgres:";
print " driver: local";
print " uploads:";
print " driver: local";
if (add_minio == "y") {
print " minio-data:";
print " driver: local";
}
in_vol=1; skip=1; next
}
# Skip original volumes block lines until EOF (we already printed ours)
{ if (!skip) print }
' docker-compose.yml > tmp.yml && mv tmp.yml docker-compose.yml
# Ensure required volumes exist without removing user-defined volumes
if grep -q '^volumes:' docker-compose.yml; then
# Ensure postgres
if ! awk '/^volumes:/{invol=1; next} invol && (/^[^[:space:]]/ || NF==0){invol=0} invol{ if($1=="postgres:") found=1 } END{ exit(found?0:1) }' docker-compose.yml; then
awk '
/^volumes:/ { print; invol=1; next }
invol && /^[^[:space:]]/ { if(!added){ print " postgres:"; print " driver: local"; added=1 } ; invol=0 }
{ print }
END { if (invol && !added) { print " postgres:"; print " driver: local" } }
' docker-compose.yml > tmp.yml && mv tmp.yml docker-compose.yml
fi
# Ensure redis
if ! awk '/^volumes:/{invol=1; next} invol && (/^[^[:space:]]/ || NF==0){invol=0} invol{ if($1=="redis:") found=1 } END{ exit(found?0:1) }' docker-compose.yml; then
awk '
/^volumes:/ { print; invol=1; next }
invol && /^[^[:space:]]/ { if(!added){ print " redis:"; print " driver: local"; added=1 } ; invol=0 }
{ print }
END { if (invol && !added) { print " redis:"; print " driver: local" } }
' docker-compose.yml > tmp.yml && mv tmp.yml docker-compose.yml
fi
# Ensure minio-data if needed
if [[ $minio_storage == "y" ]]; then
if ! awk '/^volumes:/{invol=1; next} invol && (/^[^[:space:]]/ || NF==0){invol=0} invol{ if($1=="minio-data:") found=1 } END{ exit(found?0:1) }' docker-compose.yml; then
awk '
/^volumes:/ { print; invol=1; next }
invol && /^[^[:space:]]/ { if(!added){ print " minio-data:"; print " driver: local"; added=1 } ; invol=0 }
{ print }
END { if (invol && !added) { print " minio-data:"; print " driver: local" } }
' docker-compose.yml > tmp.yml && mv tmp.yml docker-compose.yml
fi
fi
else
{
echo ""
echo "volumes:"
echo " postgres:"
echo " driver: local"
echo " redis:"
echo " driver: local"
if [[ $minio_storage == "y" ]]; then
echo " minio-data:"
echo " driver: local"
fi
} >> docker-compose.yml
fi
# Create minio-init script outside heredoc to avoid variable expansion issues
if [[ $minio_storage == "y" ]]; then
@@ -618,105 +667,6 @@ MINIO_SCRIPT_EOF
docker compose up -d
if [[ $minio_storage == "y" ]]; then
echo " Waiting for MinIO to be ready..."
attempts=0
max_attempts=30
until docker run --rm --network $(basename "$PWD")_default --entrypoint /bin/sh \
minio/mc@sha256:95b5f3f7969a5c5a9f3a700ba72d5c84172819e13385aaf916e237cf111ab868 -lc \
"mc alias set minio http://minio:9000 '$minio_root_user' '$minio_root_password' >/dev/null 2>&1 && mc admin info minio >/dev/null 2>&1"; do
attempts=$((attempts+1))
if [ $attempts -ge $max_attempts ]; then
echo "❌ MinIO did not become ready in time. Proceeding, but subsequent steps may fail."
break
fi
echo "...attempt $attempts/$max_attempts"
sleep 5
done
echo " Ensuring bucket exists..."
docker run --rm --network $(basename "$PWD")_default \
-e MINIO_ROOT_USER="$minio_root_user" \
-e MINIO_ROOT_PASSWORD="$minio_root_password" \
-e MINIO_BUCKET_NAME="$minio_bucket_name" \
--entrypoint /bin/sh \
minio/mc@sha256:95b5f3f7969a5c5a9f3a700ba72d5c84172819e13385aaf916e237cf111ab868 -lc '
mc alias set minio http://minio:9000 "$minio_root_user" "$minio_root_password" >/dev/null 2>&1;
mc mb minio/"$minio_bucket_name" --ignore-existing
'
echo " Ensuring service user and policy exist (idempotent)..."
docker run --rm --network $(basename "$PWD")_default \
-e MINIO_ROOT_USER="$minio_root_user" \
-e MINIO_ROOT_PASSWORD="$minio_root_password" \
-e MINIO_SERVICE_USER="$minio_service_user" \
-e MINIO_SERVICE_PASSWORD="$minio_service_password" \
-e MINIO_BUCKET_NAME="$minio_bucket_name" \
--entrypoint /bin/sh \
minio/mc@sha256:95b5f3f7969a5c5a9f3a700ba72d5c84172819e13385aaf916e237cf111ab868 -lc '
mc alias set minio http://minio:9000 "$minio_root_user" "$minio_root_password" >/dev/null 2>&1;
if ! mc admin policy info minio formbricks-policy >/dev/null 2>&1; then
cat > /tmp/formbricks-policy.json << EOF
{
"Version": "2012-10-17",
"Statement": [
{ "Effect": "Allow", "Action": ["s3:DeleteObject", "s3:GetObject", "s3:PutObject"], "Resource": ["arn:aws:s3:::$minio_bucket_name/*"] },
{ "Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": ["arn:aws:s3:::$minio_bucket_name"] }
]
}
EOF
mc admin policy create minio formbricks-policy /tmp/formbricks-policy.json >/dev/null 2>&1 || true
fi;
if ! mc admin user info minio "$minio_service_user" >/dev/null 2>&1; then
mc admin user add minio "$minio_service_user" "$minio_service_password" >/dev/null 2>&1 || true
fi;
mc admin policy attach minio formbricks-policy --user "$minio_service_user" >/dev/null 2>&1 || true
'
fi
if [[ $minio_storage == "y" ]]; then
echo "⏳ Finalizing MinIO setup..."
attempts=0; max_attempts=60
while cid=$(docker compose ps -q minio-init 2>/dev/null); do
status=$(docker inspect -f '{{.State.Status}}' "$cid" 2>/dev/null || echo "")
if [ "$status" = "exited" ] || [ -z "$status" ]; then
break
fi
attempts=$((attempts+1))
if [ $attempts -ge $max_attempts ]; then
echo "⚠️ minio-init still running after wait; proceeding with cleanup anyway."
break
fi
sleep 2
done
echo "🧹 Cleaning up minio-init service and references..."
awk '
BEGIN{skip=0}
/^services:[[:space:]]*$/ { print; next }
/^ minio-init:/ { skip=1; next }
/^ [A-Za-z0-9_-]+:/ { if (skip) skip=0 }
{ if (!skip) print }
' docker-compose.yml > tmp.yml && mv tmp.yml docker-compose.yml
# Remove list-style "- minio-init" lines under depends_on (if any)
sed -E -i '/^[[:space:]]*-[[:space:]]*minio-init[[:space:]]*$/d' docker-compose.yml
# Remove the minio-init mapping and its condition line
sed -i '/^[[:space:]]*minio-init:[[:space:]]*$/,/^[[:space:]]*condition:[[:space:]]*service_completed_successfully[[:space:]]*$/d' docker-compose.yml
# Remove any stopped minio-init container and restart without orphans
docker compose rm -f -s minio-init >/dev/null 2>&1 || true
docker compose up -d --remove-orphans
# Clean up the temporary minio-init script
rm -f minio-init.sh
echo "✅ MinIO one-time init cleaned up."
fi
echo "🔗 To edit more variables and deeper config, go to the formbricks/docker-compose.yml, edit the file, and restart the container!"
echo "🚨 Make sure you have set up the DNS records as well as inbound rules for the domain name and IP address of this instance."
@@ -780,6 +730,40 @@ get_logs() {
sudo docker compose logs
}
cleanup_minio_init() {
echo "🧹 Cleaning up MinIO init service and references..."
cd formbricks
# Remove minio-init service block from docker-compose.yml
awk '
BEGIN{skip=0}
/^services:[[:space:]]*$/ { print; next }
/^ minio-init:/ { skip=1; next }
/^ [A-Za-z0-9_-]+:/ { if (skip) skip=0 }
{ if (!skip) print }
' docker-compose.yml > tmp.yml && mv tmp.yml docker-compose.yml
# Remove list-style "- minio-init" lines under depends_on (if any)
if sed --version >/dev/null 2>&1; then
sed -E -i '/^[[:space:]]*-[[:space:]]*minio-init[[:space:]]*$/d' docker-compose.yml
else
sed -E -i '' '/^[[:space:]]*-[[:space:]]*minio-init[[:space:]]*$/d' docker-compose.yml
fi
# Remove the minio-init mapping and its condition line (mapping style depends_on)
if sed --version >/dev/null 2>&1; then
sed -i '/^[[:space:]]*minio-init:[[:space:]]*$/,/^[[:space:]]*condition:[[:space:]]*service_completed_successfully[[:space:]]*$/d' docker-compose.yml
else
sed -i '' '/^[[:space:]]*minio-init:[[:space:]]*$/,/^[[:space:]]*condition:[[:space:]]*service_completed_successfully[[:space:]]*$/d' docker-compose.yml
fi
# Remove any stopped minio-init container and restart without orphans
docker compose rm -f -s minio-init >/dev/null 2>&1 || true
docker compose up -d --remove-orphans
echo "✅ MinIO init cleanup complete."
}
case "$1" in
install)
install_formbricks
@@ -796,6 +780,9 @@ restart)
logs)
get_logs
;;
cleanup-minio-init)
cleanup_minio_init
;;
uninstall)
uninstall_formbricks
;;

0
docker/migrate-to-v4.sh Executable file → Normal file
View File

View File

@@ -5675,7 +5675,7 @@
},
"/api/v1/management/storage": {
"post": {
"description": "API endpoint for uploading public files. Uploaded files are public and accessible by anyone. This endpoint requires authentication. It accepts a JSON body with fileName, fileType, environmentId, and optionally allowedFileExtensions to restrict file types. On success, it returns a signed URL for uploading the file to S3.",
"description": "API endpoint for uploading public files. Uploaded files are public and accessible by anyone. This endpoint requires authentication. It accepts a JSON body with fileName, fileType, environmentId, and optionally allowedFileExtensions to restrict file types. On success, it returns a signed URL for uploading the file to S3 along with a local upload URL.",
"parameters": [
{
"example": "{{apiKey}}",
@@ -5732,15 +5732,8 @@
"example": {
"data": {
"fileUrl": "http://localhost:3000/storage/cm1ubebtj000614kqe4hs3c67/public/profile--fid--abc123.png",
"presignedFields": {
"Policy": "base64EncodedPolicy",
"X-Amz-Algorithm": "AWS4-HMAC-SHA256",
"X-Amz-Credential": "your-credential",
"X-Amz-Date": "20250312T000000Z",
"X-Amz-Signature": "your-signature",
"key": "uploads/public/profile--fid--abc123.png"
},
"signedUrl": "https://s3.example.com/your-bucket",
"localUrl": "http://localhost:3000/storage/cm1ubebtj000614kqe4hs3c67/public/profile.png",
"signedUrl": "http://localhost:3000/api/v1/client/cm1ubebtj000614kqe4hs3c67/storage/public",
"updatedFileName": "profile--fid--abc123.png"
}
},
@@ -5752,12 +5745,9 @@
"description": "URL where the uploaded file can be accessed.",
"type": "string"
},
"presignedFields": {
"additionalProperties": {
"type": "string"
},
"description": "Form fields to include in the multipart/form-data POST to S3.",
"type": "object"
"localUrl": {
"description": "URL for uploading the file to local storage.",
"type": "string"
},
"signedUrl": {
"description": "Signed URL for uploading the file to S3.",
@@ -5775,7 +5765,7 @@
}
}
},
"description": "OK - Returns the signed URL, presigned fields, updated file name, and file URL."
"description": "OK - Returns the signed URL, updated file name, and file URL."
},
"400": {
"content": {
@@ -5839,6 +5829,187 @@
"tags": ["Management API - Storage"]
}
},
"/api/v1/management/storage/local": {
"post": {
"description": "Management API endpoint for uploading public files to local storage. This endpoint requires authentication. File metadata is provided via headers (X-File-Type, X-File-Name, X-Environment-ID, X-Signature, X-UUID, X-Timestamp) and the file is provided as a multipart/form-data file field named \"file\". The \"Content-Type\" header must be set to a valid MIME type.",
"parameters": [
{
"example": "{{apiKey}}",
"in": "header",
"name": "x-api-key",
"required": true,
"schema": {
"type": "string"
}
},
{
"description": "MIME type of the file. Must be a valid MIME type.",
"in": "header",
"name": "X-File-Type",
"required": true,
"schema": {
"type": "string"
}
},
{
"description": "URI encoded file name.",
"in": "header",
"name": "X-File-Name",
"required": true,
"schema": {
"type": "string"
}
},
{
"description": "ID of the environment.",
"in": "header",
"name": "X-Environment-ID",
"required": true,
"schema": {
"type": "string"
}
},
{
"description": "Signature for verifying the request.",
"in": "header",
"name": "X-Signature",
"required": true,
"schema": {
"type": "string"
}
},
{
"description": "Unique identifier for the signed upload.",
"in": "header",
"name": "X-UUID",
"required": true,
"schema": {
"type": "string"
}
},
{
"description": "Timestamp used for the signature.",
"in": "header",
"name": "X-Timestamp",
"required": true,
"schema": {
"type": "string"
}
}
],
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"properties": {
"file": {
"description": "The file to be uploaded as a valid file object (buffer).",
"format": "binary",
"type": "string"
}
},
"required": ["file"],
"type": "object"
}
}
},
"required": true
},
"responses": {
"200": {
"content": {
"application/json": {
"example": {
"data": {
"message": "File uploaded successfully"
}
},
"schema": {
"properties": {
"data": {
"properties": {
"message": {
"description": "Success message.",
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
}
}
},
"description": "OK - File uploaded successfully."
},
"400": {
"content": {
"application/json": {
"example": {
"error": "fileType is required"
},
"schema": {
"properties": {
"error": {
"description": "Detailed error message.",
"type": "string"
}
},
"type": "object"
}
}
},
"description": "Bad Request - Missing required fields, invalid header values, or file issues."
},
"401": {
"content": {
"application/json": {
"example": {
"error": "Not authenticated"
},
"schema": {
"properties": {
"error": {
"description": "Detailed error message.",
"type": "string"
}
},
"type": "object"
}
}
},
"description": "Unauthorized - Authentication failed, invalid signature, or user not authorized."
},
"500": {
"content": {
"application/json": {
"example": {
"error": "File upload failed"
},
"schema": {
"properties": {
"error": {
"description": "Detailed error message.",
"type": "string"
}
},
"type": "object"
}
}
},
"description": "Internal Server Error - File upload failed due to server error."
}
},
"servers": [
{
"description": "Formbricks API Server",
"url": "https://app.formbricks.com/api/v1"
}
],
"summary": "Upload Public File to Local Storage",
"tags": ["Management API - Storage"]
}
},
"/api/v1/management/surveys": {
"get": {
"description": "Fetches all existing surveys",

View File

@@ -7,8 +7,6 @@ servers:
- url: https://app.formbricks.com/api/v2
description: Formbricks Cloud
tags:
- name: Health
description: Operations for checking critical application dependencies health status.
- name: Roles
description: Operations for managing roles.
- name: Me
@@ -393,36 +391,6 @@ paths:
servers:
- url: https://app.formbricks.com/api/v2
description: Formbricks API Server
/health:
get:
tags:
- Health
summary: Health Check
description: Check the health status of critical application dependencies
including database and cache.
operationId: healthCheck
security: []
responses:
"200":
description: Health check completed successfully. Check individual dependency
status in response data.
content:
application/json:
schema:
type: object
properties:
main_database:
type: boolean
description: Main database connection status - true if database is reachable and
running
example: true
cache_database:
type: boolean
description: Cache database connection status - true if cache database is
reachable and running
example: true
title: Health Check Response
description: Health check status for critical application dependencies
/roles:
get:
operationId: getRoles
@@ -3532,24 +3500,6 @@ components:
name: x-api-key
description: Use your Formbricks x-api-key to authenticate.
schemas:
health:
type: object
properties:
main_database:
type: boolean
description: Main database connection status - true if database is reachable and
running
example: true
cache_database:
type: boolean
description: Cache database connection status - true if cache database is
reachable and running
example: true
required:
- main_database
- cache_database
title: Health Check Response
description: Health check status for critical application dependencies
role:
type: object
properties:
@@ -3885,6 +3835,8 @@ components:
type: string
enum:
- link
- web
- website
- app
description: The type of the survey
status:
@@ -4394,6 +4346,7 @@ components:
- createdBy
- environmentId
- endings
- thankYouCard
- hiddenFields
- variables
- displayOption
@@ -4411,6 +4364,7 @@ components:
- isSingleResponsePerEmailEnabled
- inlineTriggers
- isBackButtonHidden
- verifyEmail
- recaptcha
- metadata
- displayPercentage

View File

@@ -5,11 +5,6 @@
"light": "#00C4B8",
"primary": "#00C4B8"
},
"errors": {
"404": {
"redirect": true
}
},
"favicon": "/images/favicon.svg",
"footer": {
"socials": {
@@ -74,6 +69,7 @@
"xm-and-surveys/surveys/general-features/multi-language-surveys",
"xm-and-surveys/surveys/general-features/partial-submissions",
"xm-and-surveys/surveys/general-features/recall",
"xm-and-surveys/surveys/general-features/schedule-start-end-dates",
"xm-and-surveys/surveys/general-features/metadata",
"xm-and-surveys/surveys/general-features/variables",
"xm-and-surveys/surveys/general-features/hide-back-button",
@@ -229,7 +225,6 @@
"self-hosting/configuration/custom-ssl",
"self-hosting/configuration/environment-variables",
"self-hosting/configuration/smtp",
"self-hosting/configuration/file-uploads",
"self-hosting/configuration/domain-configuration",
{
"group": "Auth & SSO",
@@ -398,358 +393,447 @@
"redirects": [
{
"destination": "/docs/overview/what-is-formbricks",
"permanent": true,
"source": "/docs/introduction/what-is-formbricks"
},
{
"destination": "/docs/overview/open-source",
"permanent": true,
"source": "/docs/introduction/why-open-source"
},
{
"destination": "/docs/xm-and-surveys/overview",
"permanent": true,
"source": "/docs/introduction/how-it-works"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/contact-form",
"permanent": true,
"source": "/docs/best-practices/contact-form"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/docs-feedback",
"permanent": true,
"source": "/docs/best-practices/docs-feedback"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/feature-chaser",
"permanent": true,
"source": "/docs/best-practices/feature-chaser"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/feedback-box",
"permanent": true,
"source": "/docs/best-practices/feedback-box"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/improve-email-content",
"permanent": true,
"source": "/docs/best-practices/improve-email-content"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/interview-prompt",
"permanent": true,
"source": "/docs/best-practices/interview-prompt"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/cancel-subscription",
"permanent": true,
"source": "/docs/best-practices/cancel-subscription"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/pmf-survey",
"permanent": true,
"source": "/docs/best-practices/pmf-survey"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/quiz-time",
"permanent": true,
"source": "/docs/best-practices/quiz-time"
},
{
"destination": "/docs/xm-and-surveys/xm/best-practices/improve-trial-cr",
"permanent": true,
"source": "/docs/best-practices/improve-trial-cr"
},
{
"destination": "/docs/xm-and-surveys/surveys/link-surveys/quickstart",
"permanent": true,
"source": "/docs/link-surveys/quickstart"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/add-image-or-video-question",
"permanent": true,
"source": "/docs/link-surveys/global/add-image-or-video-question"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/conditional-logic",
"permanent": true,
"source": "/docs/link-surveys/global/conditional-logic"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/overwrite-styling",
"permanent": true,
"source": "/docs/link-surveys/global/overwrite-styling"
},
{
"destination": "/docs/xm-and-surveys/surveys/link-surveys/data-prefilling",
"permanent": true,
"source": "/docs/link-surveys/global/data-prefilling"
},
{
"destination": "/docs/xm-and-surveys/surveys/link-surveys/embed-surveys",
"permanent": true,
"source": "/docs/link-surveys/embed-surveys"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/hidden-fields",
"permanent": true,
"source": "/docs/link-surveys/global/hidden-fields"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/limit-submissions",
"permanent": true,
"source": "/docs/link-surveys/global/limit-submissions"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/market-research-panel",
"permanent": true,
"source": "/docs/link-surveys/market-research-panel"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/multi-language-surveys",
"permanent": true,
"source": "/docs/link-surveys/global/multi-language-surveys"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/partial-submissions",
"permanent": true,
"source": "/docs/link-surveys/global/partial-submissions"
},
{
"destination": "/docs/xm-and-surveys/surveys/link-surveys/pin-protected-surveys",
"permanent": true,
"source": "/docs/link-surveys/pin-protected-surveys"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/recall",
"permanent": true,
"source": "/docs/link-surveys/global/recall"
},
{
"destination": "/docs/xm-and-surveys/surveys/link-surveys/single-use-links",
"permanent": true,
"source": "/docs/link-surveys/single-use-links"
},
{
"destination": "/docs/xm-and-surveys/surveys/link-surveys/source-tracking",
"permanent": true,
"source": "/docs/link-surveys/source-tracking"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/schedule-start-end-dates",
"permanent": true,
"source": "/docs/link-surveys/global/schedule-start-end-dates"
},
{
"destination": "/docs/xm-and-surveys/surveys/link-surveys/start-at-question",
"permanent": true,
"source": "/docs/link-surveys/start-at-question"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/metadata",
"permanent": true,
"source": "/docs/link-surveys/global/metadata"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/variables",
"permanent": true,
"source": "/docs/link-surveys/global/variables"
},
{
"destination": "/docs/xm-and-surveys/surveys/link-surveys/verify-email-before-survey",
"permanent": true,
"source": "/docs/link-surveys/verify-email-before-survey"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/add-image-or-video-question",
"permanent": true,
"source": "/docs/app-surveys/global/add-image-or-video-question"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/consent",
"permanent": true,
"source": "/docs/core-features/global/question-type/consent"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/statement-cta",
"permanent": true,
"source": "/docs/core-features/global/question-type/statement-cta"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/airtable",
"permanent": true,
"source": "/docs/developer-docs/integrations/airtable"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/zapier",
"permanent": true,
"source": "/docs/developer-docs/integrations/zapier"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/wordpress",
"permanent": true,
"source": "/docs/developer-docs/integrations/wordpress"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/slack",
"permanent": true,
"source": "/docs/developer-docs/integrations/slack"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/n8n",
"permanent": true,
"source": "/docs/developer-docs/integrations/n8n"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/notion",
"permanent": true,
"source": "/docs/developer-docs/integrations/notion"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/google-sheets",
"permanent": true,
"source": "/docs/developer-docs/integrations/google-sheets"
},
{
"destination": "/docs/xm-and-surveys/surveys/website-app-surveys/quickstart",
"permanent": true,
"source": "/docs/app-surveys/quickstart"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/address",
"permanent": true,
"source": "/docs/core-features/global/question-type/address"
},
{
"destination": "/docs/xm-and-surveys/surveys/website-app-surveys/framework-guides",
"permanent": true,
"source": "/docs/app-surveys/framework-guides"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/activepieces",
"permanent": true,
"source": "/docs/developer-docs/integrations/activepieces"
},
{
"destination": "/docs/xm-and-surveys/core-features/user-management",
"permanent": true,
"source": "/docs/core-features/global/access-roles"
},
{
"destination": "/docs/xm-and-surveys/core-features/styling-theme",
"permanent": true,
"source": "/docs/core-features/global/styling-theme"
},
{
"destination": "/docs/xm-and-surveys/core-features/email-customization",
"permanent": true,
"source": "/docs/core-features/global/email-customization"
},
{
"destination": "/docs/self-hosting/setup/one-click",
"permanent": true,
"source": "/docs/self-hosting/one-click"
},
{
"destination": "/docs/self-hosting/configuration/custom-ssl",
"permanent": true,
"source": "/docs/self-hosting/custom-ssl"
},
{
"destination": "/docs/self-hosting/setup/docker",
"permanent": true,
"source": "/docs/self-hosting/docker"
},
{
"destination": "/docs/self-hosting/setup/cluster-setup",
"permanent": true,
"source": "/docs/self-hosting/cluster-setup"
},
{
"destination": "/docs/self-hosting/advanced/migration",
"permanent": true,
"source": "/docs/self-hosting/migration-guide"
},
{
"destination": "/docs/self-hosting/configuration/integrations",
"permanent": true,
"source": "/docs/self-hosting/integrations"
},
{
"destination": "/docs/self-hosting/advanced/license",
"permanent": true,
"source": "/docs/self-hosting/license"
},
{
"destination": "/docs/self-hosting/advanced/rate-limiting",
"permanent": true,
"source": "/docs/self-hosting/rate-limiting"
},
{
"destination": "/docs/self-hosting/setup/cluster-setup",
"permanent": true,
"source": "/docs/self-hosting/kubernetes"
},
{
"destination": "/docs/development/overview",
"permanent": true,
"source": "/docs/developer-docs/overview"
},
{
"destination": "/docs/xm-and-surveys/surveys/website-app-surveys/framework-guides",
"permanent": true,
"source": "/docs/developer-docs/js-sdk"
},
{
"destination": "/docs/xm-and-surveys/surveys/website-app-surveys/framework-guides#react-native",
"permanent": true,
"source": "/docs/developer-docs/react-native-in-app-surveys"
},
{
"destination": "/docs/api-reference/rest-api",
"permanent": true,
"source": "/docs/developer-docs/rest-api"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/webhooks",
"permanent": true,
"source": "/docs/developer-docs/webhooks"
},
{
"destination": "/docs/development/contribution/contribution",
"permanent": true,
"source": "/docs/developer-docs/contributing/get-started"
},
{
"destination": "/docs/xm-and-surveys/surveys/website-app-surveys/actions",
"permanent": true,
"source": "/docs/app-surveys/actions"
},
{
"destination": "/docs/xm-and-surveys/surveys/website-app-surveys/advanced-targeting",
"permanent": true,
"source": "/docs/app-surveys/advanced-targeting"
},
{
"destination": "/docs/xm-and-surveys/surveys/website-app-surveys/user-identification",
"permanent": true,
"source": "/docs/app-surveys/user-identification"
},
{
"destination": "/docs/xm-and-surveys/surveys/website-app-surveys/recontact",
"permanent": true,
"source": "/docs/app-surveys/recontact"
},
{
"destination": "/docs/xm-and-surveys/surveys/website-app-surveys/show-survey-to-percent-of-users",
"permanent": true,
"source": "/docs/app-surveys/global/show-survey-to-percent-of-users"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/metadata",
"permanent": true,
"source": "/docs/app-surveys/global/metadata"
},
{
"destination": "/docs/api-reference",
"permanent": true,
"source": "/docs/api-docs"
},
{
"destination": "/docs/development/troubleshooting",
"permanent": true,
"source": "/docs/developer-docs/contributing/troubleshooting"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/file-upload",
"permanent": true,
"source": "/docs/core-features/global/question-type/file-upload"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/select-picture",
"permanent": true,
"source": "/docs/core-features/global/question-type/picture-selection"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/rating",
"permanent": true,
"source": "/docs/core-features/global/question-type/rating"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/date",
"permanent": true,
"source": "/docs/core-features/global/question-type/date"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/schedule-a-meeting",
"permanent": true,
"source": "/docs/core-features/global/question-type/schedule"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/free-text",
"permanent": true,
"source": "/docs/core-features/global/question-type/free-text"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/select-single",
"permanent": true,
"source": "/docs/core-features/global/question-type/single-select"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/select-multiple",
"permanent": true,
"source": "/docs/core-features/global/question-type/multiple-select"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/matrix",
"permanent": true,
"source": "/docs/core-features/global/question-type/matrix"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/make",
"permanent": true,
"source": "/docs/developer-docs/integrations/make"
},
{
"destination": "/docs/xm-and-surveys/core-features/integrations/overview",
"permanent": true,
"source": "/docs/developer-docs/integrations/overview"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/hidden-fields",
"permanent": true,
"source": "/docs/app-surveys/global/hidden-fields"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/limit-submissions",
"permanent": true,
"source": "/docs/app-surveys/global/limit-submissions"
},
{
"destination": "/docs/xm-and-surveys/core-features/question-type/net-promoter-score",
"permanent": true,
"source": "/docs/core-features/global/question-type/net-promoter-score"
},
{
"destination": "/docs/xm-and-surveys/surveys/link-surveys/data-prefilling",
"permanent": true,
"source": "/docs/link-surveys/data-prefilling"
},
{
"destination": "/docs/xm-and-surveys/surveys/general-features/multi-language-surveys",
"permanent": true,
"source": "/docs/app-surveys/global/multi-language-surveys"
}
],

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

After

Width:  |  Height:  |  Size: 160 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 101 KiB

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 119 KiB

After

Width:  |  Height:  |  Size: 43 KiB

View File

@@ -4,149 +4,6 @@ description: "Formbricks Self-hosted version migration"
icon: "arrow-right"
---
## v4.0
<Warning>
**Important: Migration Required**
Formbricks 4 introduces additional requirements for self-hosting setups and makes a dedicated Redis cache as well as S3-compatible file storage mandatory.
</Warning>
Formbricks 4.0 is a **major milestone** that sets up the technical foundation for future iterations and feature improvements. This release focuses on modernizing core infrastructure components to improve reliability, scalability, and enable advanced features going forward.
### What's New in Formbricks 4.0
**🚀 New Enterprise Features:**
- **Quotas Management**: Advanced quota controls for enterprise users
**🏗️ Technical Foundation Improvements:**
- **Enhanced File Storage**: Improved file handling with better performance and reliability
- **Improved Caching**: New caching functionality improving speed, extensibility and reliability
- **Database Optimization**: Removal of unused database tables and fields for better performance
- **Future-Ready Architecture**: Standardized infrastructure components for upcoming features
### What This Means for Your Self-Hosting Setup
These improvements in Formbricks 4.0 also make some infrastructure requirements mandatory going forward:
- **Redis** for caching
- **MinIO or S3-compatible storage** for file uploads
These services are already included in the updated one-click setup for self-hosters, but existing users need to upgrade their setup. More information on this below.
### Why We Made These Changes
We know this represents more moving parts in your infrastructure and might even introduce more complexity in hosting Formbricks, and we don't take this decision lightly. As Formbricks grows into a comprehensive Survey and Experience Management platform, we've reached a point where the simple, single-service approach was holding back our ability to deliver the reliable, feature-rich product our users demand and deserve.
By moving to dedicated, professional-grade services for these critical functions, we're building the foundation needed to deliver:
- **Enterprise-grade reliability** with proper redundancy and backup capabilities
- **Advanced features** that require sophisticated caching and file processing
- **Better performance** through optimized, dedicated services
- **Future scalability** to support larger deployments and more complex use cases without the need to maintain two different approaches
We believe this is the only path forward to build the comprehensive Survey and Experience Management software we're aiming for.
### Migration Steps for v4.0
Additional migration steps are needed if you are using a self-hosted Formbricks setup that uses either local file storage (not S3-compatible file storage) or doesn't already use a Redis cache.
### One-Click Setup
For users using our official one-click setup, we provide an automated migration using a migration script:
```bash
# Download the latest script
curl -fsSL -o migrate-to-v4.sh \
https://raw.githubusercontent.com/formbricks/formbricks/stable/docker/migrate-to-v4.sh
# Make it executable
chmod +x migrate-to-v4.sh
# Launch the guided migration
./migrate-to-v4.sh
```
This script guides you through the steps for the infrastructure migration and does the following:
- Adds a Redis service to your setup and configures it
- Adds a MinIO service (open source S3-alternative) to your setup, configures it and migrates local files to it
- Pulls the latest Formbricks image and updates your instance
### Manual Setup
If you use a different setup to host your Formbricks instance, you need to make sure to make the necessary adjustments to run Formbricks 4.0.
#### Redis
Formbricks 4.0 requires a Redis instance to work properly. Please add a Redis instance to your Docker setup, your K8s infrastructure, or however you are hosting Formbricks at the moment. Formbricks works with the latest versions of Redis as well as Valkey.
You need to configure the `REDIS_URL` environment variable and point it to your Redis instance.
#### S3-compatible storage
To use file storage (e.g., file upload questions, image choice questions, custom survey backgrounds, etc.), you need to have S3-compatible file storage set up and connected to Formbricks.
Formbricks supports multiple storage providers (among many other S3-compatible storages):
- AWS S3
- Digital Ocean Spaces
- Hetzner Object Storage
- Custom MinIO server
Please make sure to set up a storage bucket with one of these solutions and then link it to Formbricks using the following environment variables:
```
S3_ACCESS_KEY: your-access-key
S3_SECRET_KEY: your-secret-key
S3_REGION: us-east-1
S3_BUCKET_NAME: formbricks-uploads
S3_ENDPOINT_URL: http://minio:9000 # not needed for AWS S3
```
#### Upgrade Process
**1. Backup your Database**
**Critical Step**: Create a complete database backup before proceeding. Formbricks 4.0 will automatically remove unused database tables and fields during startup.
```bash
docker exec formbricks-postgres-1 pg_dump -Fc -U postgres -d formbricks > formbricks_pre_v4.0_$(date +%Y%m%d_%H%M%S).dump
```
<Info>
If you run into "**No such container**", use `docker ps` to find your container name,
e.g. `formbricks_postgres_1`.
</Info>
**2. Upgrade to Formbricks 4.0**
Pull the latest Docker images and restart the setup (example for docker-compose):
```bash
# Pull the latest version
docker compose pull
# Stop the current instance
docker compose down
# Start with Formbricks 4.0
docker compose up -d
```
**3. Automatic Database Migration**
When you start Formbricks 4.0 for the first time, it will **automatically**:
- Detect and apply required database schema updates
- Remove unused database tables and fields
- Optimize the database structure for better performance
No manual intervention is required for the database migration.
**4. Verify Your Upgrade**
- Access your Formbricks instance at the same URL as before
- Test file uploads to ensure S3/MinIO integration works correctly
- Verify that existing surveys and data are intact
- Check that previously uploaded files are accessible
### v3.3
<Info>
@@ -328,7 +185,7 @@ This major release brings a better approach to **data migrations**.
### Steps to Migrate
This guide is for users **self-hosting** Formbricks with the **one-click setup**. If you're using a different setup, you might adjust the commands.
This guide is for users **self-hosting** Formbricks with the **one-click setup**. If you're using a different setup, you may need to adjust the commands.
- &#x20;Navigate to the Formbricks Directory

View File

@@ -1,316 +0,0 @@
---
title: "File Uploads Configuration"
description: "Configure file storage for survey images, file uploads, and project assets in your self-hosted Formbricks instance"
icon: "upload"
---
Formbricks requires S3-compatible storage for file uploads. You can use external cloud storage services or the bundled MinIO option for a self-hosted solution.
## Why Configure File Uploads?
Setting up file storage enables important features in Formbricks, including:
- Adding images to surveys (questions, backgrounds, logos)
- 'File Upload' and 'Picture Selection' question types
- Project logos and branding
- Custom organization logos in emails
- Survey background images from uploads
<Warning>
If file uploads are not configured, the above features will be disabled and users won't be able to upload
files or images.
</Warning>
## Storage Options
Formbricks supports S3-compatible storage with two main configurations:
### 1. External S3-Compatible Storage
Use cloud storage services for production deployments:
- **AWS S3** (Amazon Web Services)
- **DigitalOcean Spaces**
- **Backblaze B2**
- **Wasabi**
- **StorJ**
- Any S3-compatible storage service
### 2. Bundled MinIO Storage (Self-Hosted)
<Warning>
**Important**: MinIO requires a dedicated subdomain to function properly. You must configure a subdomain
like `files.yourdomain.com` that points to your server. MinIO will not work without this subdomain setup.
</Warning>
MinIO provides a self-hosted S3-compatible storage solution that runs alongside Formbricks. This option:
- Runs in a Docker container alongside Formbricks
- Provides full S3 API compatibility
- Requires minimal additional configuration
## Configuration Methods
### Option 1: One-Click Setup Script
When using the Formbricks installation script, you'll be prompted to configure file uploads:
```bash
📁 Do you want to configure file uploads?
If you skip this, the following features will be disabled:
- Adding images to surveys (e.g., in questions or as background)
- 'File Upload' and 'Picture Selection' question types
- Project logos
- Custom organization logo in emails
Configure file uploads now? [Y/n] y
```
#### External S3-Compatible Storage
Choose this option for AWS S3, DigitalOcean Spaces, or other cloud providers:
```bash
🗄️ Do you want to use an external S3-compatible storage (AWS S3/DO Spaces/etc.)? [y/N] y
🔧 Enter S3 configuration (leave Endpoint empty for AWS S3):
S3 Access Key: your_access_key
S3 Secret Key: your_secret_key
S3 Region (e.g., us-east-1): us-east-1
S3 Bucket Name: your-bucket-name
S3 Endpoint URL (leave empty if you are using AWS S3): https://your-endpoint.com
```
#### Bundled MinIO Storage
Choose this option for a self-hosted S3-compatible storage that runs alongside Formbricks:
<Note>
**Critical Requirement**: Before proceeding, ensure you have configured a subdomain (e.g.,
`files.yourdomain.com`) that points to your server's IP address. MinIO will not function without this
subdomain setup.
</Note>
```bash
🗄️ Do you want to use an external S3-compatible storage (AWS S3/DO Spaces/etc.)? [y/N] n
🔗 Enter the files subdomain for object storage (e.g., files.yourdomain.com): files.yourdomain.com
```
The script will automatically:
- Generate secure MinIO credentials
- Create the storage bucket
- Configure SSL certificates for the files subdomain
- Configure Traefik routing for the subdomain
### Option 2: Manual Environment Variables
Add the following environment variables to your `docker-compose.yml` or `.env` file:
#### For S3-Compatible Storage
```bash
# S3 Storage Configuration
S3_ACCESS_KEY=your_access_key
S3_SECRET_KEY=your_secret_key
S3_REGION=us-east-1
S3_BUCKET_NAME=your-bucket-name
# Optional: For third-party S3-compatible services (leave empty for AWS S3)
S3_ENDPOINT_URL=https://your-endpoint.com
# Enable path-style URLs for third-party services (1 for enabled, 0 for disabled)
S3_FORCE_PATH_STYLE=1
```
## Provider-Specific Examples
### AWS S3
```bash
S3_ACCESS_KEY=AKIA1234567890EXAMPLE
S3_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
S3_REGION=us-east-1
S3_BUCKET_NAME=my-formbricks-uploads
# S3_ENDPOINT_URL is not needed for AWS S3
# S3_FORCE_PATH_STYLE=0
```
### DigitalOcean Spaces
```bash
S3_ACCESS_KEY=your_spaces_key
S3_SECRET_KEY=your_spaces_secret
S3_REGION=nyc3
S3_BUCKET_NAME=my-formbricks-space
S3_ENDPOINT_URL=https://nyc3.digitaloceanspaces.com
S3_FORCE_PATH_STYLE=1
```
### MinIO (Self-Hosted)
```bash
S3_ACCESS_KEY=minio_access_key
S3_SECRET_KEY=minio_secret_key
S3_REGION=us-east-1
S3_BUCKET_NAME=formbricks-uploads
S3_ENDPOINT_URL=https://files.yourdomain.com
S3_FORCE_PATH_STYLE=1
```
### Backblaze B2
```bash
S3_ACCESS_KEY=your_b2_key_id
S3_SECRET_KEY=your_b2_application_key
S3_REGION=us-west-000
S3_BUCKET_NAME=my-formbricks-bucket
S3_ENDPOINT_URL=https://s3.us-west-000.backblazeb2.com
S3_FORCE_PATH_STYLE=1
```
## Bundled MinIO Setup
When using the bundled MinIO option through the setup script, you get:
### Automatic Configuration
- **Storage Service**: MinIO running in a Docker container
- **Credentials**: Auto-generated secure access keys
- **Bucket**: Automatically created `formbricks-uploads` bucket
- **SSL**: Automatic certificate generation for the files subdomain
### Access Information
After setup, you'll see:
```bash
🗄️ MinIO Storage Setup Complete:
• S3 API: https://files.yourdomain.com
• Access Key: formbricks-a1b2c3d4
• Bucket: formbricks-uploads (✅ automatically created)
```
### DNS Requirements
<Warning>
**Critical for MinIO**: The subdomain configuration is mandatory for MinIO to function. Without proper
subdomain DNS setup, MinIO will fail to work entirely.
</Warning>
For the bundled MinIO setup, ensure:
1. **Main domain**: `yourdomain.com` points to your server IP
2. **Files subdomain**: `files.yourdomain.com` points to your server IP (this is required for MinIO to work)
3. **Firewall**: Ports 80 and 443 are open in your server's firewall
4. **DNS propagation**: Allow time for DNS changes to propagate globally
## Docker Compose Configuration
For manual setup, update your `docker-compose.yml`:
```yaml
services:
formbricks:
image: ghcr.io/formbricks/formbricks:latest
environment:
# ... other environment variables ...
# S3 Storage Configuration
S3_ACCESS_KEY: your_access_key
S3_SECRET_KEY: your_secret_key
S3_REGION: us-east-1
S3_BUCKET_NAME: your-bucket-name
S3_ENDPOINT_URL: https://your-endpoint.com # Optional
S3_FORCE_PATH_STYLE: 1 # For third-party services
volumes:
- uploads:/home/nextjs/apps/web/uploads/ # Still needed for temporary files
```
## Security Considerations
### S3 Bucket Permissions
Configure your S3 bucket with a least-privileged policy:
1. **Scoped Public Read Access**: Only allow public read access to specific prefixes where needed
2. **Restricted Write Access**: Only your Formbricks instance should be able to upload files
3. **CORS Configuration**: Allow requests from your Formbricks domain
Example least-privileged S3 bucket policy:
```json
{
"Statement": [
{
"Action": "s3:GetObject",
"Effect": "Allow",
"Principal": "*",
"Resource": "arn:aws:s3:::your-bucket-name/uploads/public/*",
"Sid": "PublicReadForPublicUploads"
},
{
"Action": ["s3:PutObject", "s3:PutObjectAcl"],
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::123456789012:user/formbricks-service"
},
"Resource": "arn:aws:s3:::your-bucket-name/*",
"Sid": "AllowFormbricksWrite"
}
],
"Version": "2012-10-17"
}
```
### MinIO Security
When using bundled MinIO:
- Credentials are auto-generated and secure
- Access is restricted through Traefik proxy
- CORS is automatically configured
- Rate limiting is applied to prevent abuse
- A bucket policy with the least privileges is applied to the bucket
## Troubleshooting
### Common Issues
**Files not uploading:**
1. Check that S3 credentials are correct
2. Verify bucket exists and is accessible
3. Ensure bucket permissions allow uploads from your server
4. Check network connectivity to S3 endpoint
**Images not displaying in surveys:**
1. Verify bucket has public read access
2. Check CORS configuration allows requests from your domain
3. Ensure S3_ENDPOINT_URL is correctly set for third-party services
**MinIO not starting:**
1. **Verify subdomain DNS**: Ensure `files.yourdomain.com` points to your server IP (this is the most common issue)
2. **Check DNS propagation**: Use tools like `nslookup` or `dig` to verify DNS resolution
3. **Verify ports**: Ensure ports 80 and 443 are open in your firewall
4. **SSL certificate**: Check that SSL certificate generation completed successfully
5. **Container logs**: Check Docker container logs: `docker compose logs minio`
### Testing Your Configuration
To test if file uploads are working:
1. **Admin Panel**: Try uploading a project logo in the project settings
2. **Survey Editor**: Attempt to add a background image to a survey
3. **Question Types**: Create a 'File Upload' or 'Picture Selection' question
4. **Check Logs**: Monitor container logs for any storage-related errors
```bash
# Check Formbricks logs
docker compose logs formbricks
# Check MinIO logs (if using bundled MinIO)
docker compose logs minio
```
For additional help, join the conversation on [GitHub Discussions](https://github.com/formbricks/formbricks/discussions).

View File

@@ -120,9 +120,7 @@ graph TD
## Redis Configuration
<Note>
Redis is required for Formbricks to function. The application will not start without a Redis URL configured.
</Note>
<Note>Redis is required for Formbricks to function. The application will not start without a Redis URL configured.</Note>
Configure Redis by adding the following **required** environment variable to your instances:
@@ -135,11 +133,13 @@ REDIS_URL=redis://your-redis-host:6379
Configure S3 storage by adding the following environment variables to your instances:
```sh env
# Required for file uploads in serverless environments
# Required
S3_BUCKET_NAME=your-bucket-name
# Optional - if not provided, AWS SDK will use defaults (us-east-1) or auto-detect
S3_ACCESS_KEY=your-access-key
S3_SECRET_KEY=your-secret-key
S3_REGION=your-region
S3_BUCKET_NAME=your-bucket-name
# For S3-compatible storage (e.g., StorJ, MinIO)
# Leave empty for Amazon S3

View File

@@ -11,8 +11,7 @@ The image is pre-built and requires minimal setup—just download it and start t
Make sure Docker and Docker Compose are installed on your system. These are usually included in tools like Docker Desktop and Rancher Desktop.
<Note>
`docker compose` without the hyphen is now the primary method of using docker-compose, according to the
Docker documentation.
`docker compose` without the hyphen is now the primary method of using docker-compose, according to the Docker documentation.
</Note>
## Start
@@ -30,7 +29,7 @@ Make sure Docker and Docker Compose are installed on your system. These are usua
Get the docker-compose file from the Formbricks repository by running:
```bash
curl -o docker-compose.yml https://raw.githubusercontent.com/formbricks/formbricks/stable/docker/docker-compose.yml
curl -o docker-compose.yml https://raw.githubusercontent.com/formbricks/formbricks/main/docker/docker-compose.yml
```
1. **Generate NextAuth Secret**
@@ -65,21 +64,21 @@ Make sure Docker and Docker Compose are installed on your system. These are usua
sed -i '' "s/ENCRYPTION_KEY:.*/ENCRYPTION_KEY: $(openssl rand -hex 32)/" docker-compose.yml
```
1. **Generate Cron Secret**
1. **Generate Cron Secret**
You require a Cron secret to secure API access for running cron jobs. Run one of the commands below based on your operating system:
You require a Cron secret to secure API access for running cron jobs. Run one of the commands below based on your operating system:
For Linux:
For Linux:
```bash
sed -i "/CRON_SECRET:$/s/CRON_SECRET:.*/CRON_SECRET: $(openssl rand -hex 32)/" docker-compose.yml
```
```bash
sed -i "/CRON_SECRET:$/s/CRON_SECRET:.*/CRON_SECRET: $(openssl rand -hex 32)/" docker-compose.yml
```
For macOS:
For macOS:
```bash
sed -i '' "s/CRON_SECRET:.*/CRON_SECRET: $(openssl rand -hex 32)/" docker-compose.yml
```
```bash
sed -i '' "s/CRON_SECRET:.*/CRON_SECRET: $(openssl rand -hex 32)/" docker-compose.yml
```
1. **Start the Docker Setup**

View File

@@ -9,34 +9,32 @@ icon: "rocket"
If youre looking to quickly set up a production instance of Formbricks on an Ubuntu server, this guide is for you. Using a convenient shell script, you can install everything—including Docker, Postgres DB, and an SSL certificate—in just a few steps. The script takes care of all the dependencies and configuration for your server, making the process smooth and simple.
<Note>
This setup uses **Traefik** as a **reverse proxy**, essential for directing incoming traffic to the correct
container and enabling secure internet access to Formbricks. Traefik is chosen for its simplicity and
automatic SSL management via Lets Encrypt.
This setup uses **Traefik** as a **reverse proxy**, essential for directing incoming traffic to the correct container and enabling secure internet access to Formbricks. Traefik is chosen for its simplicity and automatic SSL management via Lets Encrypt.
</Note>
For other operating systems or a more customized installation, please refer to the advanced installation guide with [Docker](/self-hosting/setup/docker).
### Requirements
- An Ubuntu Virtual Machine with SSH access.
* An Ubuntu Virtual Machine with SSH access.
- A custom domain with an **A record** pointing to your server.
* A custom domain with an **A record** pointing to your server.
- Ports **80** and **443** are open in your VM's Security Group, allowing Traefik to create an SSL certificate.
* Ports **80** and **443** are open in your VM's Security Group, allowing Traefik to create an SSL certificate.
### Deployment
Run this command in your terminal:
```bash
curl -fsSL https://raw.githubusercontent.com/formbricks/formbricks/stable/docker/formbricks.sh -o formbricks.sh && chmod +x formbricks.sh && ./formbricks.sh install
curl -fsSL https://raw.githubusercontent.com/formbricks/formbricks/main/docker/formbricks.sh -o formbricks.sh && chmod +x formbricks.sh && ./formbricks.sh install
```
### Script Prompts
During installation, the script will prompt you to provide some details:
- **Overwriting Docker GPG Keys**:
* **Overwriting Docker GPG Keys**:
If Docker GPG keys already exist, the script will ask whether you want to overwrite them.
```
@@ -52,7 +50,7 @@ During installation, the script will prompt you to provide some details:
File '/etc/apt/keyrings/docker.gpg' exists. Overwrite? (y/N)
```
- **Domain Name**:
* **Domain Name**:
Enter the domain name where youll host Formbricks. The domain will be used to generate an SSL certificate. Do not include the protocol (http/https).
```
@@ -76,7 +74,7 @@ File '/etc/apt/keyrings/docker.gpg' exists. Overwrite? (y/N) y
🔗 Please enter your domain name for the SSL certificate (🚨 do NOT enter the protocol (http/https/etc)):
```
- **HTTPS Certificate Setup**:
* **HTTPS Certificate Setup**:
The script will ask if youd like to create an HTTPS certificate for your domain. Enter `Y` to proceed (highly recommended for secure access).
```
@@ -102,7 +100,7 @@ my.hosted.url.com
🔗 Do you want us to set up an HTTPS certificate for you? [Y/n]
```
- **DNS Setup Prompt**: Ensure that your domain's DNS is correctly configured and ports 80 and 443 are open. Confirm this by entering `Y`. This step is crucial for proper SSL certificate issuance and secure server access.
* **DNS Setup Prompt**: Ensure that your domain's DNS is correctly configured and ports 80 and 443 are open. Confirm this by entering `Y`. This step is crucial for proper SSL certificate issuance and secure server access.
```
🚀 Executing default step of installing Formbricks
@@ -129,7 +127,7 @@ Y
🔗 Please make sure that the domain points to the server's IP address and that ports 80 & 443 are open in your server's firewall. Is everything set up? [Y/n]
```
- **Email Address for SSL Certificate**:
* **Email Address for SSL Certificate**:
Provide an email address to register the SSL certificate. Notifications regarding the certificate will be sent to this address.
```
@@ -159,7 +157,7 @@ Y
💡 Please enter your email address for the SSL certificate:
```
- **Enforce HTTPS with HSTS**:
* **Enforce HTTPS with HSTS**:
Enabling HTTP Strict Transport Security (HSTS) ensures all communication with your server is encrypted. Its a recommended best practice. Enter `Y` to enforce HTTPS.
```
@@ -191,7 +189,7 @@ docs@formbricks.com
🔗 Do you want to enforce HTTPS (HSTS)? [Y/n]
```
- **Email Service Setup Prompt**: The script will ask if you want to set up the email service. Enter `Y` to proceed.(default is `N`). You can skip this step if you don't want to set up the email service. You will still be able to use Formbricks without setting up the email service.
* **Email Service Setup Prompt**: The script will ask if you want to set up the email service. Enter `Y` to proceed.(default is `N`). You can skip this step if you don't want to set up the email service. You will still be able to use Formbricks without setting up the email service.
```
🚀 Executing default step of installing Formbricks
@@ -269,7 +267,7 @@ Y
🚙 Updating docker-compose.yml with your custom inputs...
🚗 NEXTAUTH_SECRET updated successfully!
🚗 ENCRYPTION_KEY updated successfully!
🚗 CRON_SECRET updated successfully!
🚗 CRON_SECRET updated successfully!
[+] Running 4/4
✔ Network formbricks_default Created 0.2s
@@ -312,6 +310,18 @@ To restart Formbricks, simply run the following command:
The script will automatically restart all the Formbricks related containers and brings the entire stack up with the previous configuration.
## Cleanup MinIO init (optional)
During the one-click setup, a temporary `minio-init` service configures MinIO (bucket, policy, service user). It is idempotent and safe to leave in place; it will do nothing on subsequent starts once configuration exists.
If you prefer to remove the `minio-init` service and its references after a successful setup, run:
```
./formbricks.sh cleanup-minio-init
```
This only removes the init job and its Compose references; it does not delete any data or affect your MinIO configuration.
## Uninstall
To uninstall Formbricks, simply run the following command, but keep in mind that this will delete all your data!
@@ -334,13 +344,13 @@ If you encounter any issues, you can check the logs of the containers with:
If you encounter any issues, consider the following steps:
- **Inbound Rules**: Make sure you have added inbound rules for Port 80 and 443 in your VM's Security Group.
* **Inbound Rules**: Make sure you have added inbound rules for Port 80 and 443 in your VM's Security Group.
- **A Record**: Verify that you have set up an A record for your domain, pointing to your VM's IP address.
* **A Record**: Verify that you have set up an A record for your domain, pointing to your VM's IP address.
- **Check Docker Instances**: Run `docker ps` to check the status of the Docker instances.
* **Check Docker Instances**: Run `docker ps` to check the status of the Docker instances.
- **Check Formbricks Logs**: Run `cd formbricks && docker compose logs` to check the logs of the Formbricks stack.
* **Check Formbricks Logs**: Run `cd formbricks && docker compose logs` to check the logs of the Formbricks stack.
If you have any questions or require help, feel free to reach out to us on [**GitHub Discussions**](https://github.com/formbricks/formbricks/discussions). 😃[
](https://formbricks.com/docs/developer-docs/rest-api)

View File

@@ -4,16 +4,14 @@ description: "Branding the emails that are sent to your respondents."
icon: "envelope"
---
<Note>
**Self-Hosting Requirements**: Uploading custom organization logos for emails requires file upload storage
to be configured. If you're self-hosting Formbricks, make sure to [configure file
uploads](/self-hosting/configuration/file-uploads) before using this feature.
</Note>
Email branding is a white-label feature that allows you to customize the email that is sent to your users. You can upload a logo of your company and use it in the email.
<Note>Email branding is part of the Formbricks [Enterprise Edition](/self-hosting/advanced/license).</Note>
<Info>Only the Owner and Managers of the organization can modify the logo.</Info>
<Note>
Email branding is part of the Formbricks [Enterprise Edition](/self-hosting/advanced/license).
</Note>
<Info>
Only the Owner and Managers of the organization can modify the logo.
</Info>
## How to upload a logo

View File

@@ -6,14 +6,12 @@ description: "A step-by-step guide to integrate Airtable with Formbricks Cloud."
The Airtable integration allows you to automatically send responses to an Airtable of your choice.
<Note>
If you are on a self-hosted instance, you will need to configure this integration separately. Please follow
the guides [here](/self-hosting/configuration/integrations) to configure integrations on your self-hosted
instance.
If you are on a self-hosted instance, you will need to configure this integration separately. Please follow the guides [here](/self-hosting/configuration/integrations) to configure integrations on your self-hosted instance.
</Note>
## Formbricks Cloud
1. Click on the `Configuration` tab in the left sidebar and then click on the `Integrations` tab and click on the `connect` button under the `Airtable` card.
1. Go to the Integrations tab in your [Formbricks Cloud dashboard](https://app.formbricks.com/) and click on the "Connect" button under Airtable integration.
![Formbricks Integration Tab](/images/xm-and-surveys/core-features/integrations/airtable/integrations-tab.webp)
@@ -30,8 +28,7 @@ The Airtable integration allows you to automatically send responses to an Airtab
![Formbricks is now connected with Google](/images/xm-and-surveys/core-features/integrations/airtable/airtable-connected.webp)
<Note>
Before the next step, make sure that you have a Formbricks Survey with at least one question and a Airtable
base with atleast one table in the Airtable account you integrated.
Before the next step, make sure that you have a Formbricks Survey with at least one question and a Airtable base with atleast one table in the Airtable account you integrated.
</Note>
1. Now click on the "Link New Table" button to link an Airtable with Formbricks and a modal will open up.
@@ -64,4 +61,4 @@ To remove the integration with Airtable,
![Delete Airtable Integration with Formbricks](/images/xm-and-surveys/core-features/integrations/airtable/delete-integration.webp)
Still struggling or something not working as expected? [Join our Github Discussions](https://github.com/formbricks/formbricks/discussions) and we'd be glad to assist you!
Still struggling or something not working as expected? [Join our Github Discussions](https://github.com/formbricks/formbricks/discussions) and we'd be glad to assist you!

View File

@@ -1,17 +1,16 @@
---
title: "Google Sheets"
description: "The Google Sheets integration allows you to automatically send responses to a Google Sheet of your choice."
description:
"The Google Sheets integration allows you to automatically send responses to a Google Sheet of your choice."
---
<Note>
If you are on a self-hosted instance, you will need to configure this integration separately. Please follow
the guides [here](/self-hosting/configuration/integrations) to configure integrations on your self-hosted
instance.
If you are on a self-hosted instance, you will need to configure this integration separately. Please follow the guides [here](/self-hosting/configuration/integrations) to configure integrations on your self-hosted instance.
</Note>
## Connect Google Sheets
1. Click on the `Configuration` tab in the left sidebar and then click on the `Integrations` tab and click on the `connect` button under the `Google Sheets` card.
1. Go to the Integrations tab in your [Formbricks Cloud dashboard](https://app.formbricks.com/) and click on the "Connect" button under Google Sheets integration.
![Formbricks Integrations Tab](/images/xm-and-surveys/core-features/integrations/google-sheets/integrations-tab.webp)
@@ -26,8 +25,7 @@ description: "The Google Sheets integration allows you to automatically send res
![Formbricks is now connected with Google](/images/xm-and-surveys/core-features/integrations/google-sheets/google-connected.webp)
<Note>
Before the next step, make sure that you have a Formbricks Survey with at least one question and a Google
Sheet in the Google account you integrated.
Before the next step, make sure that you have a Formbricks Survey with at least one question and a Google Sheet in the Google account you integrated.
</Note>
1. Now click on the "Link New Sheet" button to link a Google Sheet with Formbricks and a modal will open up.
@@ -60,11 +58,11 @@ To remove the integration with Google Account,
## What info do you need?
- Your **Email ID** for authentication (We use this to identify you)
* Your **Email ID** for authentication (We use this to identify you)
- Your **Google Sheets Names and IDs** (We fetch this to list and show you the options of choosing a sheet to integrate with)
* Your **Google Sheets Names and IDs** (We fetch this to list and show you the options of choosing a sheet to integrate with)
- Write access to **selected Google Sheet** (The google sheet you choose to integrate it with, we write survey responses to it)
* Write access to **selected Google Sheet** (The google sheet you choose to integrate it with, we write survey responses to it)
For the above, we ask for:
@@ -74,4 +72,4 @@ For the above, we ask for:
<Note>We store as little personal information as possible.</Note>
Still struggling or something not working as expected? [Join our Github Discussions](https://github.com/formbricks/formbricks/discussions) and we'd be glad to assist you!
Still struggling or something not working as expected? [Join our Github Discussions](https://github.com/formbricks/formbricks/discussions) and we'd be glad to assist you!

View File

@@ -1,17 +1,16 @@
---
title: "Notion"
description: "The notion integration allows you to automatically send responses to a Notion database of your choice."
description:
"The notion integration allows you to automatically send responses to a Notion database of your choice."
---
<Note>
If you are on a self-hosted instance, you will need to configure this integration separately. Please follow
the guides [here](/self-hosting/configuration/integrations) to configure integrations on your self-hosted
instance.
If you are on a self-hosted instance, you will need to configure this integration separately. Please follow the guides [here](/self-hosting/configuration/integrations) to configure integrations on your self-hosted instance.
</Note>
## Formbricks Cloud
1. Click on the `Configuration` tab in the left sidebar and then click on the `Integrations` tab and click on the `connect` button under the `Notion` card.
1. Go to the Integrations tab in your [Formbricks Cloud dashboard](https://app.formbricks.com/) and click on the "Connect" button under Notion integration.
![Formbricks Integrations Tab](/images/xm-and-surveys/core-features/integrations/notion/integrations-tab.webp)
@@ -26,8 +25,8 @@ description: "The notion integration allows you to automatically send responses
![Formbricks is now connected with Notion](/images/xm-and-surveys/core-features/integrations/notion/notion-connected.webp)
<Note>
Before the next step, make sure that you have a Formbricks Survey with at least one question and a Notion
database in the Notion account you integrated.
Before the next step, make sure that you have a Formbricks Survey with at
least one question and a Notion database in the Notion account you integrated.
</Note>
1. Now click on the "Link New Database" button to link a Notion database with Formbricks and a modal will open up.
@@ -58,17 +57,17 @@ Enabling the Notion Integration in a self-hosted environment requires a setup us
5. Now provide it the details such as requested. Under **Redirect URIs** field:
- If you are running formbricks locally, you can enter `http://localhost:3000/api/v1/integrations/notion/callback`.
* If you are running formbricks locally, you can enter `http://localhost:3000/api/v1/integrations/notion/callback`.
- Or, you can enter `https://<your-public-facing-url>/api/v1/integrations/notion/callback`
* Or, you can enter `https://<your-public-facing-url>/api/v1/integrations/notion/callback`
6. Once you've filled all the necessary details, click on **Submit**.
7. A screen will appear which will have **Client ID** and **Client secret**. Copy them and set them as the environment variables in your Formbricks instance as:
- `NOTION_OAUTH_CLIENT_ID` - OAuth Client ID
* `NOTION_OAUTH_CLIENT_ID` - OAuth Client ID
- `NOTION_OAUTH_CLIENT_SECRET` - OAuth Client Secret
* `NOTION_OAUTH_CLIENT_SECRET` - OAuth Client Secret
Voila! You have successfully enabled the Notion integration in your self-hosted Formbricks instance. Now you can follow the steps mentioned in the [Formbricks Cloud](#formbricks-cloud) section to link a Notion database with Formbricks.
@@ -86,4 +85,4 @@ To remove the integration with Slack Workspace,
![Delete Notion Integration with Formbricks](/images/xm-and-surveys/core-features/integrations/notion/delete-connection.webp)
Still struggling or something not working as expected? [Join our Github Discussions](https://github.com/formbricks/formbricks/discussions) and we'd be glad to assist you!
Still struggling or something not working as expected? [Join our Github Discussions](https://github.com/formbricks/formbricks/discussions) and we'd be glad to assist you!

View File

@@ -10,7 +10,7 @@ description:
## Formbricks Cloud
1. Click on the `Configuration` tab in the left sidebar and then click on the `Integrations` tab and click on the `connect` button under the `Slack` card.
1. Go to the Integrations tab in your [Formbricks Cloud dashboard](https://app.formbricks.com/) and click on the "Connect" button under Slack integration.
![Formbricks Integrations Tab](/images/xm-and-surveys/core-features/integrations/slack/integrations-tab.webp)

View File

@@ -22,9 +22,9 @@ You can create webhooks either through the **Formbricks App UI** or programmatic
## **Creating Webhooks via UI**
- **Log in to Formbricks**
and click on the `Configuration` tab in the left sidebar and then click on the `Integrations` tab.
Navigate to the **Integrations** Tab after logging in.
![Step one](/images/xm-and-surveys/core-features/integrations/webhooks/integrations-tab.webp)
![Step one](https://res.cloudinary.com/dwdb9tvii/image/upload/v1738093544/mugcz9gn3wxg2cucq6wj.webp)
- Click on **Manage Webhooks** & then **Add Webhook** button:
@@ -58,130 +58,133 @@ Example of Response Created webhook payload:
```json
[
{
"data": {
"contact": null,
"contactAttributes": null,
"createdAt": "2025-07-24T07:47:29.507Z",
"data": {
"q1": "clicked"
},
"displayId": "displayId",
"endingId": null,
"finished": false,
"id": "responseId",
"language": "en",
"meta": {
"country": "DE",
"url": "https://app.formbricks.com/s/surveyId",
"userAgent": {
"browser": "Chrome",
"device": "desktop",
"os": "macOS"
{
"webhookId": "webhookId",
"event": "responseCreated",
"data": {
"id": "responseId",
"createdAt": "2025-07-24T07:47:29.507Z",
"updatedAt": "2025-07-24T07:47:29.507Z",
"surveyId": "surveyId",
"displayId": "displayId",
"contact": null,
"contactAttributes": null,
"finished": false,
"endingId": null,
"data": {
"q1": "clicked"
},
"variables": {},
"ttc": {
"q1": 2154.700000047684
},
"tags": [],
"meta": {
"url": "https://app.formbricks.com/s/surveyId",
"userAgent": {
"browser": "Chrome",
"os": "macOS",
"device": "desktop"
},
"country": "DE"
},
"singleUseId": null,
"language": "en"
}
},
"singleUseId": null,
"surveyId": "surveyId",
"tags": [],
"ttc": {
"q1": 2154.700000047684
},
"updatedAt": "2025-07-24T07:47:29.507Z",
"variables": {}
},
"event": "responseCreated",
"webhookId": "webhookId"
}
}
]
```
### Response Updated
Example of Response Updated webhook payload:
```json
[
{
"data": {
"contact": null,
"contactAttributes": null,
"createdAt": "2025-07-24T07:47:29.507Z",
"data": {
"q1": "clicked",
"q2": "Just browsing"
},
"displayId": "displayId",
"endingId": null,
"finished": false,
"id": "responseId",
"language": "en",
"meta": {
"country": "DE",
"url": "https://app.formbricks.com/s/surveyId",
"userAgent": {
"browser": "Chrome",
"device": "desktop",
"os": "macOS"
{
"webhookId": "webhookId",
"event": "responseUpdated",
"data": {
"id": "responseId",
"createdAt": "2025-07-24T07:47:29.507Z",
"updatedAt": "2025-07-24T07:47:33.696Z",
"surveyId": "surveyId",
"displayId": "displayId",
"contact": null,
"contactAttributes": null,
"finished": false,
"endingId": null,
"data": {
"q1": "clicked",
"q2": "Just browsing"
},
"variables": {},
"ttc": {
"q1": 2154.700000047684,
"q2": 3855.799999952316
},
"tags": [],
"meta": {
"url": "https://app.formbricks.com/s/surveyId",
"userAgent": {
"browser": "Chrome",
"os": "macOS",
"device": "desktop"
},
"country": "DE"
},
"singleUseId": null,
"language": "en"
}
},
"singleUseId": null,
"surveyId": "surveyId",
"tags": [],
"ttc": {
"q1": 2154.700000047684,
"q2": 3855.799999952316
},
"updatedAt": "2025-07-24T07:47:33.696Z",
"variables": {}
},
"event": "responseUpdated",
"webhookId": "webhookId"
}
}
]
```
### Response Finished
Example of Response Finished webhook payload:
```json
[
{
"data": {
"contact": null,
"contactAttributes": null,
"createdAt": "2025-07-24T07:47:29.507Z",
"data": {
"q1": "clicked",
"q2": "accepted"
},
"displayId": "displayId",
"endingId": "endingId",
"finished": true,
"id": "responseId",
"language": "en",
"meta": {
"country": "DE",
"url": "https://app.formbricks.com/s/surveyId",
"userAgent": {
"browser": "Chrome",
"device": "desktop",
"os": "macOS"
{
"webhookId": "webhookId",
"event": "responseFinished",
"data": {
"id": "responseId",
"createdAt": "2025-07-24T07:47:29.507Z",
"updatedAt": "2025-07-24T07:47:56.116Z",
"surveyId": "surveyId",
"displayId": "displayId",
"contact": null,
"contactAttributes": null,
"finished": true,
"endingId": "endingId",
"data": {
"q1": "clicked",
"q2": "accepted"
},
"variables": {},
"ttc": {
"_total": 4947.899999035763,
"q1": 2154.700000047684,
"q2": 2793.199999988079
},
"tags": [],
"meta": {
"url": "https://app.formbricks.com/s/surveyId",
"userAgent": {
"browser": "Chrome",
"os": "macOS",
"device": "desktop"
},
"country": "DE"
},
"singleUseId": null,
"language": "en"
}
},
"singleUseId": null,
"surveyId": "surveyId",
"tags": [],
"ttc": {
"_total": 4947.899999035763,
"q1": 2154.700000047684,
"q2": 2793.199999988079
},
"updatedAt": "2025-07-24T07:47:56.116Z",
"variables": {}
},
"event": "responseFinished",
"webhookId": "webhookId"
}
}
]
```

View File

@@ -1,6 +1,7 @@
---
title: "Wordpress"
description: "Target specific visitors with a survey on your WordPress page using Formbricks for free. Show survey on specific page or on button click."
description:
"Target specific visitors with a survey on your WordPress page using Formbricks for free. Show survey on specific page or on button click."
---
To run a targeted survey on your WordPress website, Formbricks is the way to go!&#x20;
@@ -35,7 +36,7 @@ When you see this screen, youre there:
## Step 3: Find and copy the environmentId
Go to `Configuration` > `Website & App Connection` where youll find your environmentId:
Go to Settings > Setup Checklist where youll find your environmentId:
![Run targeted surveys for free on WordPress pages](/images/xm-and-surveys/core-features/integrations/wordpress/3-wordpress-setup-survey-on-website-targeted-free-open-source.webp)
@@ -79,4 +80,4 @@ You did it! Reload the WordPress page and your survey should appear!
## Doesn't work?
If you have any questions or need help, feel free to reach out to us on [Github Discussions](https://github.com/formbricks/formbricks/discussions)
If you have any questions or need help, feel free to reach out to us on [Github Discussions](https://github.com/formbricks/formbricks/discussions)

View File

@@ -1,15 +1,10 @@
---
title: "Styling Theme"
description: "Keep the survey styling consistent over all surveys with a Styling Theme. Customize the colors, fonts, and other styling options to match your brand's aesthetic."
description:
"Keep the survey styling consistent over all surveys with a Styling Theme. Customize the colors, fonts, and other styling options to match your brand's aesthetic."
icon: "palette"
---
<Note>
**Self-Hosting Requirements**: Uploading custom background images and brand logos requires file upload
storage to be configured. If you're self-hosting Formbricks, make sure to [configure file
uploads](/self-hosting/configuration/file-uploads) before using these features.
</Note>
Keep the survey styling consistent over all surveys with a Styling Theme. Customize the colors, fonts, and other styling options to match your brand's aesthetic.
## Configuration
@@ -25,6 +20,7 @@ In the left side bar, you find the `Configuration` page. On this page you find t
![Form styling options UI](/images/xm-and-surveys/core-features/styling-theme/form-settings.webp)
- **Brand Color**: Sets the primary color tone of the survey.
- **Text Color**: This is a single color scheme that will be used across to display all the text on your survey. Ensures all text is readable against the background.
- **Input Color:** Alters the border color of input fields.
@@ -67,14 +63,17 @@ Customize your survey with your brand's logo.
![Choose a link survey template](/images/xm-and-surveys/core-features/styling-theme/step-five.webp)
3. Add a background color: If youve uploaded a transparent image and want to add background to it, enable this toggle and select the color of your choice.
![Choose a link survey template](/images/xm-and-surveys/core-features/styling-theme/step-six.webp)
4. Remember to save your changes!
![Choose a link survey template](/images/xm-and-surveys/core-features/styling-theme/step-seven.webp)
<Note>The logo settings apply across all Link Surveys pages.</Note>
## Overwrite Styling Theme

View File

@@ -9,10 +9,10 @@ Add new members to your Formbricks organization to collaborate on surveys and ma
## Prerequisites
To invite members, you need:
- **Owner** or **Manager** role in the organization
- Valid email addresses for the people you want to invite
## Individual invitations
Use this method when inviting a few people or when you need to carefully control each invitation.
@@ -22,21 +22,18 @@ Use this method when inviting a few people or when you need to carefully control
<Steps>
<Step title="Navigate to Organization Settings > Access Control">
Go to the organization settings page and click on the "Access Control" tab.
![Access Control Tab](/images/xm-and-surveys/core-features/access-roles/access-control.webp)
</Step>
<Step title="Start the invitation process">
Click on the `Add member` button:
![Add member Button Position](/images/xm-and-surveys/core-features/access-roles/add-member.webp)
</Step>
<Step title="Fill in member details">
In the modal, add the Name, Email and Role of the organization member you want to invite:
![Individual Invite Modal Tab](/images/xm-and-surveys/core-features/access-roles/individual-invite.webp)
</Step>
<Step title="Send the invitation">
@@ -65,14 +62,12 @@ Use bulk invitations when you need to invite many people at once, such as when o
Click on the `Add member` button:
![Add member Button Position](/images/xm-and-surveys/core-features/access-roles/add-member.webp)
</Step>
<Step title="Switch to bulk invite">
In the modal, switch to `Bulk Invite`. You can download an example .CSV file to fill in the Name, Email and Role of the organization members you want to invite:
![Individual Invite Modal Tab](/images/xm-and-surveys/core-features/access-roles/bulk-invite.webp)
</Step>
<Step title="Prepare your CSV file">
@@ -104,7 +99,6 @@ Use bulk invitations when you need to invite many people at once, such as when o
### Invitation status
Monitor the status of your invitations:
- **Pending**: Invitation sent but not yet accepted
- **Accepted**: User has joined the organization
- **Expired**: Invitation has expired and needs to be resent
- **Expired**: Invitation has expired and needs to be resent

View File

@@ -4,12 +4,6 @@ description: "Enhance your questions by adding images or videos. This makes inst
icon: "image"
---
<Note>
**Self-Hosting Requirements**: Adding images to questions requires file upload storage to be configured. If
you're self-hosting Formbricks, make sure to [configure file
uploads](/self-hosting/configuration/file-uploads) before using this feature.
</Note>
## How to Add Images
Click the icon on the right side of the question to add an image or video:
@@ -31,6 +25,6 @@ Toggle to add a video via link:
We support YouTube, Vimeo, and Loom URLs.
<Note>
**YouTube Privacy Mode**: This option reduces tracking by converting YouTube URLs to no-cookie URLs. It only
works with YouTube.
**YouTube Privacy Mode**: This option reduces tracking by converting YouTube
URLs to no-cookie URLs. It only works with YouTube.
</Note>

View File

@@ -8,6 +8,10 @@ icon: "chart-pie"
Quota Management allows you to set limits on the number of responses collected for specific segments or criteria in your survey. This feature helps ensure you collect a balanced and representative dataset while preventing oversaturation of certain response types.
<Note type="warning">
Quota Management is currently in beta and only available to select customers.
</Note>
<Note>
Quota Management is part of the [Enterprise Edition](/self-hosting/advanced/license).
</Note>

View File

@@ -0,0 +1,34 @@
---
title: "Schedule Start & End Dates"
description: "Optimize your survey management with custom Start & End Conditions in Formbricks. This feature allows you to control exactly when your survey is available for responses and when it should close, making it ideal for time-sensitive or number-of-response-limited surveys."
icon: "calendar-days"
---
Configure your surveys to open and close based on specific criteria. Heres how to set up these conditions:
## **Schedule a Survey Release**
- **How to**: Open the Survey Editor, switch to the Settings tab. Scroll down to Response Options, Toggle the “Release Survey on Date”.
![Choose a link survey template](/images/xm-and-surveys/surveys/general-features/schedule-start-end-dates/step-one.webp)
- **Details**: Choose the date and time when the survey should become available to respondents. All times follow UTC timezone.
- **Use Case**: This is useful for launching surveys in alignment with events, product releases, or specific marketing campaigns.
## **Automatically Closing a Survey**
- **How to**: Open the Survey Editor, switch to the Settings tab. Scroll down to Response Options, Toggle the “Close survey on date”.
![Choose a link survey template](/images/xm-and-surveys/surveys/general-features/schedule-start-end-dates/step-three.webp)
- **Details**: Define a specific date and time for the survey to close. This also follows UTC timezone.&#x20;
- **Use Case**: Essential for surveys linked to time-bound events or studies where data collection needs to end
at a specific point.
### **Summary**
Setting up Start & End Dates in Formbricks allows you to control the availability and duration of your surveys with precision. Whether you are conducting academic research, market analysis, or gathering event feedback, these settings help ensure that your data collection aligns perfectly with your objectives.
---

View File

@@ -4,12 +4,6 @@ description: "Customize link titles, descriptions, and preview images to make yo
icon: "gear"
---
<Note>
**Self-Hosting Requirements**: Adding a preview image requires file upload storage to be configured. If
you're self-hosting Formbricks, make sure to [configure file
uploads](/self-hosting/configuration/file-uploads) before using this feature.
</Note>
## What are Link Settings?
Link Settings allow you to configure the metadata (Open Graph tags) for your survey links, controlling how they appear when shared:
@@ -20,6 +14,7 @@ Link Settings allow you to configure the metadata (Open Graph tags) for your sur
![Link Settings](/images/xm-and-surveys/surveys/link-surveys/link-settings/link-settings.webp)
## Configuring Link Metadata
<Steps>
@@ -27,22 +22,21 @@ Link Settings allow you to configure the metadata (Open Graph tags) for your sur
Navigate to your survey's Summary page and click the **Share survey** button in the top toolbar.
</Step>
<Step title="Open Link Settings tab">
In the Share Modal, click on the **Link Settings** tab to access the customization options.
</Step>
<Step title="Open Link Settings tab">
In the Share Modal, click on the **Link Settings** tab to access the customization options.
</Step>
<Step title="Customize your link title">
Enter a title for your survey link. This will appear as the main headline when your link is shared.
</Step>
<Step title="Customize your link title">
Enter a title for your survey link. This will appear as the main headline when your link is shared.
</Step>
<Step title="Add a link description">
Write a brief description for your survey. This will appear as the description of your Survey Link.
</Step>
<Step title="Add a link description">
Write a brief description for your survey. This will appear as the description of your Survey Link.
</Step>
<Step title="Upload a preview image">
Add a custom image that will display when your link is shared. This makes your survey more visually
appealing and can increase engagement.
</Step>
<Step title="Upload a preview image">
Add a custom image that will display when your link is shared. This makes your survey more visually appealing and can increase engagement.
</Step>
<Step title="Save your settings">
Click **Save** to apply your link settings. These changes will take effect immediately for all future link shares.

View File

@@ -4,12 +4,6 @@ description: "The File Upload question type allows respondents to upload files r
icon: "upload"
---
<Note>
**Self-Hosting Requirements**: This question type requires file upload storage to be configured. If you're
self-hosting Formbricks, make sure to [configure file uploads](/self-hosting/configuration/file-uploads)
before using this feature.
</Note>
<iframe
title="Survey Embed"
src="https://app.formbricks.com/s/oo4e6vva48w0trn01ht8krwo"

View File

@@ -1,15 +1,10 @@
---
title: "Picture Selection"
description: "Picture selection questions allow respondents to select one or more images from a list"
description:
"Picture selection questions allow respondents to select one or more images from a list"
icon: "image"
---
<Note>
**Self-Hosting Requirements**: This question type requires file upload storage to be configured for image
uploads. If you're self-hosting Formbricks, make sure to [configure file
uploads](/self-hosting/configuration/file-uploads) before using this feature.
</Note>
Picture selection questions allow respondents to select one or more images from a list. Displays a title and a list of images for the respondent to choose from.
<iframe
@@ -29,7 +24,6 @@ Picture selection questions allow respondents to select one or more images from
## Elements
![Overview of Picture Selection question type](/images/xm-and-surveys/core-features/question-type/select-picture.webp)
### Title
Add a clear title to inform the respondent what information you are asking for.

View File

@@ -60,7 +60,7 @@ Formbricks offers an intuitive No-Code interface that allows you to configure ac
</Note>
<Steps>
<Step title="Visit the Configuration tab and click on the `Website & App Connection`">
<Step title="Visit the Actions tab via the main navigation">
![Action overview on Formbricks Open Source Survey Solution](/images/xm-and-surveys/surveys/website-app-surveys/actions/actions-view.webp "Action overview on Formbricks Open Source Survey Solution")
</Step>

View File

@@ -4,10 +4,9 @@
### Redis-Only Architecture
- **Mandatory Redis**: All deployments MUST use Redis via `REDIS_URL` environment variable
- **Singleton Client**: Use `getCacheService()` - returns singleton instance per process using `globalThis`
- **Singleton Client**: Use `getCacheService()` - returns singleton instance per process
- **Result Types**: Core operations return `Result<T, CacheError>` for explicit error handling
- **Never-Failing Wrappers**: `withCache()` always returns function result, handling cache errors internally
- **Cross-Platform**: Uses `globalThis` for Edge Runtime, Lambda, and HMR compatibility
### Type Safety & Validation
- **Branded Cache Keys**: Use `CacheKey` type to prevent raw string usage
@@ -18,41 +17,35 @@
```text
src/
├── index.ts # Main exports
├── client.ts # globalThis singleton with getCacheService()
├── service.ts # CacheService class with Result types + withCache
├── cache-keys.ts # Cache key generators with branded types
├── cache-integration.test.ts # E2E tests exercising Redis operations
├── index.ts # Main exports (getCacheService, createCacheKey, types)
├── client.ts # Singleton cache service client with Redis connection
├── service.ts # Core CacheService class with Result types + withCache helpers
├── cache-keys.ts # Cache key generators with branded types
├── utils/
│ ├── validation.ts # Zod validation utilities
│ └── key.ts # makeCacheKey utility (not exported)
└── *.test.ts # Unit tests
│ ├── validation.ts # Zod validation utilities
│ └── key.ts # makeCacheKey utility (not exported)
└── *.test.ts # Unit tests
types/
├── keys.ts # Branded CacheKey type & CustomCacheNamespace
├── client.ts # RedisClient type definition
├── service.ts # Zod schemas and validateInputs function
├── error.ts # Result type system and error definitions
└── *.test.ts # Type tests
├── keys.ts # Branded CacheKey type & CustomCacheNamespace
├── client.ts # RedisClient type definition
├── service.ts # Zod schemas and validateInputs function
├── error.ts # Result type system and error definitions
└── *.test.ts # Type tests
```
## Required Patterns
### globalThis Singleton Pattern
### Singleton Client Pattern
```typescript
// ✅ GOOD - Use globalThis singleton client
// ✅ GOOD - Use singleton client
import { getCacheService } from "@formbricks/cache";
const result = await getCacheService();
if (!result.ok) {
// Handle initialization error - Redis connection failed
logger.error({ error: result.error }, "Cache service unavailable");
// Handle initialization error
throw new Error(`Cache failed: ${result.error.code}`);
}
const cacheService = result.data;
// ✅ GOOD - Production validation (index.ts)
import { validateRedisConfig } from "@formbricks/cache";
validateRedisConfig(); // Throws if REDIS_URL missing in production
// ❌ BAD - CacheService class not exported for direct instantiation
import { CacheService } from "@formbricks/cache"; // Won't work!
```
@@ -78,10 +71,6 @@ const environmentData = await cacheService.withCache(
createCacheKey.environment.state(environmentId),
60000
); // Returns T directly, handles cache errors internally
// ✅ GOOD - Structured logging with context first
logger.error({ error, key, operation: "cache_get" }, "Cache operation failed");
logger.warn({ error }, "Cache unavailable; executing function directly");
```
### Core Validation & Error Types
@@ -102,7 +91,7 @@ export const ZCacheKey = z.string().min(1).refine(k => k.trim().length > 0);
// TTL validation: min 1000ms for Redis seconds conversion
export const ZTtlMs = z.number().int().min(1000).finite();
// Generic validation function (returns array of validated values)
// Generic validation function
export function validateInputs(...pairs: [unknown, ZodType][]): Result<unknown[], CacheError>;
```
@@ -148,21 +137,10 @@ await cacheService.exists(key): Promise<Result<boolean, CacheError>>
// withCache never fails - returns T directly, handles cache errors internally
await cacheService.withCache<T>(fn, key, ttlMs): Promise<T>
// Redis availability check with ping test (standardized across codebase)
await cacheService.isRedisAvailable(): Promise<boolean>
// Direct Redis access for advanced operations (rate limiting, etc.)
cacheService.getRedisClient(): RedisClient | null
```
### Redis Availability Method
Standardized Redis connectivity check across the codebase.
**Method Implementation:**
- `isRedisAvailable()`: Checks client state (`isReady && isOpen`) + Redis ping test
- Returns `Promise<boolean>` - true if Redis is available and responsive
- Used for health monitoring, status checks, and external validation
### Service Implementation - Cognitive Complexity Reduction
The `withCache` method is split into helper methods to reduce cognitive complexity:
@@ -245,42 +223,13 @@ return await fn(); // Always return function result
## Testing Patterns
### Unit Tests (*.test.ts)
### Key Test Areas
- **Result error cases**: Validation, Redis, corruption errors
- **Null vs undefined**: Caching behavior differences
- **withCache fallbacks**: Cache failures gracefully handled
- **Edge cases**: Empty arrays, invalid TTLs, malformed keys
- **Mock dependencies**: Redis client, logger with all levels
### Integration Tests (cache-integration.test.ts)
- **End-to-End Redis Operations**: Tests against live Redis instance
- **Auto-Skip Logic**: Automatically skips when Redis unavailable (`REDIS_URL` not set)
- **Comprehensive Coverage**: All cache operations through real code paths
- **CI Integration**: Runs in E2E workflow with Redis/Valkey service
- **Logger Integration**: Uses `@formbricks/logger` with structured logging
```typescript
// ✅ Integration test pattern
describe("Cache Integration Tests", () => {
beforeAll(async () => {
isRedisAvailable = await checkRedisAvailability();
if (!isRedisAvailable) {
logger.info("🟡 Tests skipped - Redis not available");
return;
}
logger.info("🟢 Tests will run - Redis available");
});
test("withCache miss/hit pattern", async () => {
if (!isRedisAvailable) {
logger.info("Skipping test: Redis not available");
return;
}
// Test cache miss -> hit behavior with real Redis
});
});
```
## Web App Integration Pattern
### Cache Facade (apps/web/lib/cache/index.ts)
@@ -306,41 +255,37 @@ const redis = await cache.getRedisClient();
```
### Proxy Implementation
- **Lazy Initialization**: Calls `getCacheService()` for each operation via Proxy
- **Graceful Degradation**: `withCache` falls back to direct execution on cache failure
- **No Singleton Management**: Calls `getCacheService()` for each operation
- **Proxy Pattern**: Transparent method forwarding to underlying cache service
- **Graceful Degradation**: withCache falls back to direct execution on cache failure
- **Server-Only**: Uses "server-only" import to prevent client-side usage
- **Production Validation**: Validates `REDIS_URL` at module initialization
## Architecture Updates
## Import/Export Standards
### globalThis Singleton (client.ts)
```typescript
// Cross-platform singleton using globalThis (not global)
const globalForCache = globalThis as unknown as {
formbricksCache: CacheService | undefined;
formbricksCacheInitializing: Promise<Result<CacheService, CacheError>> | undefined;
};
// ✅ GOOD - Package root exports (index.ts)
export { getCacheService } from "./client";
export type { CacheService } from "./service";
export { createCacheKey } from "./cache-keys";
export type { CacheKey } from "../types/keys";
export type { Result, CacheError } from "../types/error";
export { CacheErrorClass, ErrorCode } from "../types/error";
// Prevents multiple Redis connections in HMR/serverless/Edge Runtime
export async function getCacheService(): Promise<Result<CacheService, CacheError>>;
// ❌ BAD - Don't export these (encapsulation)
// export { createRedisClientFromEnv } from "./client"; // Internal only
// export type { RedisClient } from "../types/client"; // Internal only
// export { CacheService } from "./service"; // Only type exported
```
### Fast-Fail Connection Strategy
- **No Reconnection in Factory**: Redis client uses fast-fail connection
- **Background Reconnection**: Handled by Redis client's built-in retry logic
- **Early Checks**: `isReady` check at method start to avoid 1-second timeouts
- **Graceful Degradation**: `withCache` executes function when cache unavailable
## Key Rules Summary
1. **globalThis Singleton**: Use `getCacheService()` - cross-platform singleton
2. **Result Types**: Core ops return `Result<T, CacheError>` - no throwing
1. **Singleton Client**: Use `getCacheService()` - returns singleton per process
2. **Result Types**: Core ops return `Result<T, CacheError>` - no throwing
3. **Never-Failing withCache**: Returns `T` directly, handles cache errors internally
4. **Standardized Redis Check**: Use `isRedisAvailable()` method with ping test
5. **Structured Logging**: Context object first, then message string
6. **Fast-Fail Strategy**: Early Redis availability checks, no blocking timeouts
7. **Integration Testing**: E2E tests with auto-skip logic for development
8. **Production Validation**: Mandatory `REDIS_URL` with startup validation
9. **Cross-Platform**: Uses `globalThis` for Edge Runtime/Lambda compatibility
10. **CI Integration**: Cache tests run in E2E workflow with Redis service
11. **Cognitive Complexity**: Split complex methods into focused helper methods
4. **Validation**: Use `validateInputs()` function for all input validation
5. **Error Interface**: Single `CacheError` interface with just `code` field
6. **Logging**: Rich logging at source, clean Results for consumers
7. **TTL Minimum**: 1000ms minimum for Redis conversion (ms → seconds)
8. **Type Safety**: Branded `CacheKey` type prevents raw string usage
9. **Encapsulation**: RedisClient and createRedisClientFromEnv are internal only
10. **Cognitive Complexity**: Split complex methods into focused helper methods

View File

@@ -1,550 +0,0 @@
/* eslint-disable @typescript-eslint/restrict-template-expressions, @typescript-eslint/no-non-null-assertion, @typescript-eslint/require-await -- Test file needs template expressions for test output */
import { afterAll, beforeAll, describe, expect, test } from "vitest";
import { logger } from "@formbricks/logger";
import { createCacheKey } from "./cache-keys";
import { getCacheService } from "./client";
import type { CacheService } from "./service";
// Check if Redis is available
let isRedisAvailable = false;
let cacheService: CacheService | null = null;
// Helper to reduce nesting depth
const delay = (ms: number): Promise<void> =>
new Promise((resolve) => {
setTimeout(resolve, ms);
});
// Test Redis availability
async function checkRedisAvailability(): Promise<boolean> {
try {
const cacheServiceResult = await getCacheService();
if (!cacheServiceResult.ok) {
logger.info("Cache service unavailable - Redis not available");
return false;
}
const isAvailable = await cacheServiceResult.data.isRedisAvailable();
if (isAvailable) {
logger.info("Redis availability check successful - Redis is available");
cacheService = cacheServiceResult.data;
return true;
}
logger.info("Redis availability check failed - Redis not available");
return false;
} catch (error) {
logger.error({ error }, "Error checking Redis availability");
return false;
}
}
/**
* Cache Integration Tests - End-to-End Redis Operations
*
* This test suite verifies that cache operations work correctly through the actual
* CacheService API against a live Redis instance. These tests exercise real code paths
* that the application uses in production.
*
* Prerequisites:
* - Redis server must be running and accessible
* - REDIS_URL environment variable must be set to a valid Redis connection string
* - Tests will be automatically skipped if REDIS_URL is empty or Redis client is not available
*
* Running the tests:
* Local development: cd packages/cache && npx vitest run src/cache-integration.test.ts
* CI Environment: Tests run automatically in E2E workflow with Redis/Valkey service
*
* Test Scenarios:
*
* 1. Basic Cache Operations
* - Purpose: Verify basic get/set/del operations work correctly
* - Method: Set a value, get it, delete it, verify deletion
* - Expected: All operations succeed with correct return values
* - Failure Indicates: Basic Redis connectivity or operation issues
*
* 2. withCache Miss/Hit Pattern
* - Purpose: Verify cache-aside pattern implementation
* - Method: Call withCache twice with expensive function
* - Expected: First call executes function (miss), second call returns cached value (hit)
* - Failure Indicates: Cache miss/hit logic not working correctly
*
* 3. Cache Invalidation
* - Purpose: Verify that del() clears cache and forces recomputation
* - Method: Cache a value, invalidate it, call withCache again
* - Expected: Function executes again after invalidation
* - Failure Indicates: Cache invalidation not working
*
* 4. TTL Expiry Behavior
* - Purpose: Verify automatic cache expiration
* - Method: Set value with short TTL, wait for expiration, verify gone
* - Expected: Value expires automatically and subsequent calls recompute
* - Failure Indicates: TTL not working correctly
*
* 5. Concurrent Cache Operations
* - Purpose: Test thread safety of cache operations
* - Method: Multiple concurrent get/set operations on same key
* - Expected: No corruption, consistent behavior
* - Failure Indicates: Race conditions in cache operations
*
* 6. Different Data Types
* - Purpose: Verify serialization works for various data types
* - Method: Store objects, arrays, primitives, complex nested data
* - Expected: Data round-trips correctly without corruption
* - Failure Indicates: Serialization/deserialization issues
*
* 7. Error Handling
* - Purpose: Verify graceful error handling when Redis is unavailable
* - Method: Test operations when Redis connection is lost
* - Expected: Graceful degradation, proper error types returned
* - Failure Indicates: Poor error handling
*
* Success Indicators:
* ✅ All cache operations complete successfully
* ✅ Cache hits/misses behave as expected
* ✅ TTL expiration works correctly
* ✅ Data integrity maintained across operations
* ✅ Proper error handling when Redis unavailable
*
* Failure Indicators:
* ❌ Cache operations fail unexpectedly
* ❌ Cache hits don't work (always executing expensive operations)
* ❌ TTL not expiring keys
* ❌ Data corruption or serialization issues
* ❌ Poor error handling
*/
describe("Cache Integration Tests - End-to-End Redis Operations", () => {
beforeAll(async () => {
// Check Redis availability first
isRedisAvailable = await checkRedisAvailability();
if (!isRedisAvailable) {
logger.info("🟡 Cache Integration Tests: Redis not available - tests will be skipped");
logger.info(" To run these tests locally, ensure Redis is running and REDIS_URL is set");
return;
}
logger.info("🟢 Cache Integration Tests: Redis available - tests will run");
// Clear any existing test keys
if (cacheService) {
const redis = cacheService.getRedisClient();
if (redis) {
const testKeys = await redis.keys("fb:cache:test:*");
if (testKeys.length > 0) {
await redis.del(testKeys);
}
}
}
});
afterAll(async () => {
// Clean up test keys
if (!isRedisAvailable || !cacheService) {
logger.info("Skipping cleanup: Redis not available");
return;
}
const redis = cacheService.getRedisClient();
if (redis) {
const testKeys = await redis.keys("fb:cache:test:*");
if (testKeys.length > 0) {
await redis.del(testKeys);
}
}
});
test("Basic cache operations: set, get, exists, del", async () => {
if (!isRedisAvailable || !cacheService) {
logger.info("Skipping test: Redis not available");
return;
}
const key = createCacheKey.environment.state("basic-ops-test");
const testValue = { message: "Hello Cache!", timestamp: Date.now(), count: 42 };
// Test set operation
const setResult = await cacheService.set(key, testValue, 60000); // 60 seconds TTL
expect(setResult.ok).toBe(true);
logger.info("✅ Set operation successful");
// Test exists operation
const existsResult = await cacheService.exists(key);
expect(existsResult.ok).toBe(true);
if (existsResult.ok) {
expect(existsResult.data).toBe(true);
}
logger.info("✅ Exists operation confirmed key exists");
// Test get operation
const getResult = await cacheService.get<typeof testValue>(key);
expect(getResult.ok).toBe(true);
if (getResult.ok) {
expect(getResult.data).toEqual(testValue);
}
logger.info("✅ Get operation returned correct value");
// Test del operation
const delResult = await cacheService.del([key]);
expect(delResult.ok).toBe(true);
logger.info("✅ Del operation successful");
// Verify key no longer exists
const existsAfterDelResult = await cacheService.exists(key);
expect(existsAfterDelResult.ok).toBe(true);
if (existsAfterDelResult.ok) {
expect(existsAfterDelResult.data).toBe(false);
}
logger.info("✅ Key confirmed deleted");
// Verify get returns null after deletion
const getAfterDelResult = await cacheService.get(key);
expect(getAfterDelResult.ok).toBe(true);
if (getAfterDelResult.ok) {
expect(getAfterDelResult.data).toBe(null);
}
logger.info("✅ Get after deletion returns null");
}, 10000);
test("withCache miss/hit pattern: first call miss, second call hit", async () => {
if (!isRedisAvailable || !cacheService) {
logger.info("Skipping test: Redis not available");
return;
}
const key = createCacheKey.environment.state("miss-hit-test");
let executionCount = 0;
// Expensive function that we want to cache
const expensiveFunction = async (): Promise<{ result: string; timestamp: number; execution: number }> => {
executionCount++;
// Simulate expensive operation
await delay(10);
return {
result: "expensive computation result",
timestamp: Date.now(),
execution: executionCount,
};
};
// Clear any existing cache for this key
await cacheService.del([key]);
logger.info("First call (cache miss expected)...");
const firstCall = await cacheService.withCache(expensiveFunction, key, 60000);
expect(firstCall.execution).toBe(1);
expect(executionCount).toBe(1);
logger.info(`✅ First call executed function: execution=${firstCall.execution}`);
logger.info("Second call (cache hit expected)...");
const secondCall = await cacheService.withCache(expensiveFunction, key, 60000);
expect(secondCall.execution).toBe(1); // Should be the cached value from first call
expect(executionCount).toBe(1); // Function should not have been called again
expect(secondCall.result).toBe(firstCall.result);
logger.info(`✅ Second call returned cached value: execution=${secondCall.execution}`);
// Verify the values are identical (cache hit)
expect(secondCall).toEqual(firstCall);
logger.info("✅ Cache hit confirmed - identical values returned");
}, 15000);
test("Cache invalidation: del() clears cache and forces recomputation", async () => {
if (!isRedisAvailable || !cacheService) {
logger.info("Skipping test: Redis not available");
return;
}
const key = createCacheKey.environment.state("invalidation-test");
let executionCount = 0;
const expensiveFunction = async (): Promise<{ value: string; execution: number }> => {
executionCount++;
return {
value: `computation-${executionCount}`,
execution: executionCount,
};
};
// Clear any existing cache
await cacheService.del([key]);
logger.info("First call - populate cache...");
const firstResult = await cacheService.withCache(expensiveFunction, key, 60000);
expect(firstResult.execution).toBe(1);
expect(executionCount).toBe(1);
logger.info(`✅ Cache populated: ${firstResult.value}`);
logger.info("Second call - should hit cache...");
const secondResult = await cacheService.withCache(expensiveFunction, key, 60000);
expect(secondResult.execution).toBe(1); // Same as first call (cached)
expect(executionCount).toBe(1); // Function not executed again
expect(secondResult).toEqual(firstResult);
logger.info(`✅ Cache hit confirmed: ${secondResult.value}`);
logger.info("Invalidating cache...");
const delResult = await cacheService.del([key]);
expect(delResult.ok).toBe(true);
logger.info("✅ Cache invalidated");
logger.info("Third call after invalidation - should miss cache and recompute...");
const thirdResult = await cacheService.withCache(expensiveFunction, key, 60000);
expect(thirdResult.execution).toBe(2); // New execution
expect(executionCount).toBe(2); // Function executed again
expect(thirdResult.value).toBe("computation-2");
expect(thirdResult).not.toEqual(firstResult);
logger.info(`✅ Cache miss after invalidation confirmed: ${thirdResult.value}`);
logger.info("Fourth call - should hit cache again...");
const fourthResult = await cacheService.withCache(expensiveFunction, key, 60000);
expect(fourthResult.execution).toBe(2); // Same as third call (cached)
expect(executionCount).toBe(2); // Function not executed again
expect(fourthResult).toEqual(thirdResult);
logger.info(`✅ Cache repopulated and hit: ${fourthResult.value}`);
}, 15000);
test("TTL expiry behavior: cache expires automatically", async () => {
if (!isRedisAvailable || !cacheService) {
logger.info("Skipping test: Redis not available");
return;
}
const key = createCacheKey.environment.state("ttl-expiry-test");
let executionCount = 0;
const expensiveFunction = async (): Promise<{ value: string; execution: number }> => {
executionCount++;
return {
value: `ttl-computation-${executionCount}`,
execution: executionCount,
};
};
// Clear any existing cache
await cacheService.del([key]);
logger.info("First call with short TTL (2 seconds)...");
const firstResult = await cacheService.withCache(expensiveFunction, key, 2000); // 2 second TTL
expect(firstResult.execution).toBe(1);
expect(executionCount).toBe(1);
logger.info(`✅ Cache populated with TTL: ${firstResult.value}`);
logger.info("Second call within TTL - should hit cache...");
const secondResult = await cacheService.withCache(expensiveFunction, key, 2000);
expect(secondResult.execution).toBe(1); // Same as first call (cached)
expect(executionCount).toBe(1); // Function not executed again
expect(secondResult).toEqual(firstResult);
logger.info(`✅ Cache hit within TTL: ${secondResult.value}`);
logger.info("Waiting for TTL expiry (3 seconds)...");
await delay(3000);
logger.info("Third call after TTL expiry - should miss cache and recompute...");
const thirdResult = await cacheService.withCache(expensiveFunction, key, 2000);
expect(thirdResult.execution).toBe(2); // New execution
expect(executionCount).toBe(2); // Function executed again
expect(thirdResult.value).toBe("ttl-computation-2");
expect(thirdResult).not.toEqual(firstResult);
logger.info(`✅ Cache miss after TTL expiry confirmed: ${thirdResult.value}`);
// Verify the key was automatically removed by Redis TTL
const redis = cacheService.getRedisClient();
if (redis) {
// The old key should be gone, but there might be a new one from the third call
const currentKeys = await redis.keys(`fb:cache:${key}*`);
logger.info(`Current cache keys: ${currentKeys.length > 0 ? currentKeys.join(", ") : "none"}`);
// We expect either 0 keys (if TTL expired) or 1 key (new one from third call)
expect(currentKeys.length).toBeLessThanOrEqual(1);
}
logger.info("✅ TTL expiry working correctly");
}, 20000);
test("Concurrent cache operations: thread safety", async () => {
if (!isRedisAvailable || !cacheService) {
logger.info("Skipping test: Redis not available");
return;
}
const baseKey = "concurrent-test";
let globalExecutionCount = 0;
const expensiveFunction = async (
id: number
): Promise<{ id: number; execution: number; timestamp: number }> => {
globalExecutionCount++;
// Simulate expensive operation with variable delay
await delay(Math.random() * 50 + 10);
return {
id,
execution: globalExecutionCount,
timestamp: Date.now(),
};
};
// Clear any existing cache keys
const redis = cacheService.getRedisClient();
if (redis) {
const existingKeys = await redis.keys(`fb:cache:${baseKey}*`);
if (existingKeys.length > 0) {
await redis.del(existingKeys);
}
}
logger.info("Starting concurrent cache operations...");
// Create multiple concurrent operations on different keys
const concurrentOperations = Array.from({ length: 10 }, async (_, i) => {
const key = createCacheKey.environment.state(`${baseKey}-${i}`);
// Each "thread" makes the same call twice - first should miss, second should hit
const firstCall = await cacheService!.withCache(() => expensiveFunction(i), key, 30000);
const secondCall = await cacheService!.withCache(() => expensiveFunction(i), key, 30000);
return { i, firstCall, secondCall };
});
const results = await Promise.all(concurrentOperations);
logger.info(`Completed ${results.length} concurrent operations`);
// Verify each operation behaved correctly
results.forEach(({ i, firstCall, secondCall }) => {
// First call should have executed the function
expect(firstCall.id).toBe(i);
// Second call should return the cached value (identical to first)
expect(secondCall).toEqual(firstCall);
logger.info(`Operation ${i}: first=${firstCall.execution}, second=${secondCall.execution} (cached)`);
});
// Verify we executed exactly 10 functions (one per unique key)
expect(globalExecutionCount).toBe(10);
logger.info(
`✅ Concurrent operations completed successfully - ${globalExecutionCount} function executions`
);
}, 30000);
test("Different data types: serialization correctness", async () => {
if (!isRedisAvailable || !cacheService) {
logger.info("Skipping test: Redis not available");
return;
}
const testCases = [
{ name: "string", value: "Hello, World!" },
{ name: "number", value: 42.5 },
{ name: "boolean", value: true },
{ name: "null", value: null },
{ name: "array", value: [1, "two", { three: 3 }, null, true] },
{
name: "object",
value: {
id: 123,
name: "Test Object",
nested: {
array: [1, 2, 3],
date: new Date().toISOString(),
bool: false,
},
},
},
{
name: "complex",
value: {
users: [
{ id: 1, name: "Alice", roles: ["admin", "user"] },
{ id: 2, name: "Bob", roles: ["user"] },
],
metadata: {
version: "1.0.0",
created: new Date().toISOString(),
features: {
cache: true,
rateLimit: true,
audit: false,
},
},
},
},
];
logger.info(`Testing serialization for ${testCases.length} data types...`);
for (const testCase of testCases) {
const key = createCacheKey.environment.state(`serialization-${testCase.name}`);
logger.info(`Testing ${testCase.name} type...`);
// Set the value
const setResult = await cacheService.set(key, testCase.value, 30000);
expect(setResult.ok).toBe(true);
// Get the value back
const getResult = await cacheService.get(key);
expect(getResult.ok).toBe(true);
if (getResult.ok) {
expect(getResult.data).toEqual(testCase.value);
}
// Test through withCache as well
let functionCalled = false;
const cachedResult = await cacheService.withCache(
async () => {
functionCalled = true;
return testCase.value;
},
key,
30000
);
// Should hit cache, not call function
expect(functionCalled).toBe(false);
expect(cachedResult).toEqual(testCase.value);
logger.info(`${testCase.name} serialization successful`);
}
logger.info("✅ All data types serialized correctly");
}, 20000);
test("Error handling: graceful degradation when operations fail", async () => {
if (!isRedisAvailable || !cacheService) {
logger.info("Skipping test: Redis not available");
return;
}
// Test with invalid TTL (should handle gracefully)
const validKey = createCacheKey.environment.state("error-test");
const invalidTtl = -1000; // Negative TTL should be invalid
logger.info("Testing error handling with invalid inputs...");
const setResult = await cacheService.set(validKey, "test", invalidTtl);
expect(setResult.ok).toBe(false);
if (!setResult.ok) {
expect(setResult.error.code).toBeDefined();
logger.info(`✅ Set with invalid TTL handled gracefully: ${setResult.error.code}`);
}
// Test withCache error handling with invalid TTL
let functionCalled = false;
const withCacheResult = await cacheService.withCache(
async () => {
functionCalled = true;
return "test result";
},
validKey,
invalidTtl
);
// Function should still be called even if cache fails
expect(functionCalled).toBe(true);
expect(withCacheResult).toBe("test result");
logger.info("✅ withCache gracefully degraded to function execution when cache failed");
logger.info("✅ Error handling tests completed successfully");
}, 15000);
});

View File

@@ -1,5 +1,5 @@
import { describe, expect, test } from "vitest";
import type { CacheKey } from "@/types/keys";
import { describe, expect, test } from "vitest";
import { createCacheKey } from "./cache-keys";
describe("@formbricks/cache cacheKeys", () => {

View File

@@ -1,7 +1,7 @@
import { createClient } from "redis";
import { beforeEach, describe, expect, test, vi } from "vitest";
import type { RedisClient } from "@/types/client";
import { ErrorCode } from "@/types/error";
import { createClient } from "redis";
import { beforeEach, describe, expect, test, vi } from "vitest";
import { createRedisClientFromEnv, getCacheService, resetCacheFactory } from "./client";
// Mock the redis module

View File

@@ -1,7 +1,7 @@
import { createClient } from "redis";
import { logger } from "@formbricks/logger";
import type { RedisClient } from "@/types/client";
import { type CacheError, ErrorCode, type Result, err, ok } from "@/types/error";
import { createClient } from "redis";
import { logger } from "@formbricks/logger";
import { CacheService } from "./service";
/**

View File

@@ -20,7 +20,6 @@ interface MockRedisClient {
setEx: ReturnType<typeof vi.fn>;
del: ReturnType<typeof vi.fn>;
exists: ReturnType<typeof vi.fn>;
ping: ReturnType<typeof vi.fn>;
isReady: boolean;
isOpen: boolean;
}
@@ -35,7 +34,6 @@ describe("CacheService", () => {
setEx: vi.fn(),
del: vi.fn(),
exists: vi.fn(),
ping: vi.fn().mockResolvedValue("PONG"),
isReady: true,
isOpen: true,
};
@@ -471,89 +469,6 @@ describe("CacheService", () => {
});
});
describe("isRedisAvailable", () => {
test("should return true when Redis is ready, open, and ping succeeds", async () => {
mockRedis.ping.mockResolvedValue("PONG");
const result = await cacheService.isRedisAvailable();
expect(result).toBe(true);
expect(mockRedis.ping).toHaveBeenCalledOnce();
});
test("should return false when Redis is not ready", async () => {
mockRedis.isReady = false;
mockRedis.ping.mockResolvedValue("PONG");
const result = await cacheService.isRedisAvailable();
expect(result).toBe(false);
expect(mockRedis.ping).not.toHaveBeenCalled();
});
test("should return false when Redis is not open", async () => {
mockRedis.isOpen = false;
mockRedis.ping.mockResolvedValue("PONG");
const result = await cacheService.isRedisAvailable();
expect(result).toBe(false);
expect(mockRedis.ping).not.toHaveBeenCalled();
});
test("should return false when Redis ping fails", async () => {
mockRedis.ping.mockRejectedValue(new Error("Connection lost"));
const result = await cacheService.isRedisAvailable();
expect(result).toBe(false);
expect(mockRedis.ping).toHaveBeenCalledOnce();
expect(logger.debug).toHaveBeenCalledWith(
{ error: expect.any(Error) }, // eslint-disable-line @typescript-eslint/no-unsafe-assignment -- Testing error handling with any Error type
"Redis ping failed during availability check"
);
});
test("should return false when ping times out", async () => {
// Mock ping to hang indefinitely
const hangingPromise = new Promise(() => {
// This promise never resolves to simulate timeout
});
mockRedis.ping.mockImplementation(() => hangingPromise);
const result = await cacheService.isRedisAvailable();
expect(result).toBe(false);
expect(mockRedis.ping).toHaveBeenCalledOnce();
});
test("should handle different ping responses correctly", async () => {
// Test with standard PONG response
mockRedis.ping.mockResolvedValue("PONG");
let result = await cacheService.isRedisAvailable();
expect(result).toBe(true);
// Test with custom ping message
mockRedis.ping.mockResolvedValue("custom-message");
result = await cacheService.isRedisAvailable();
expect(result).toBe(true);
// Test with empty response (still success if no error thrown)
mockRedis.ping.mockResolvedValue("");
result = await cacheService.isRedisAvailable();
expect(result).toBe(true);
});
test("should be async and return Promise<boolean>", async () => {
mockRedis.ping.mockResolvedValue("PONG");
const result = cacheService.isRedisAvailable();
expect(result).toBeInstanceOf(Promise);
expect(await result).toBe(true);
});
});
describe("withCache", () => {
test("should return cached value when available", async () => {
const key = "test:key" as CacheKey;

View File

@@ -1,9 +1,9 @@
import { logger } from "@formbricks/logger";
import type { RedisClient } from "@/types/client";
import { type CacheError, CacheErrorClass, ErrorCode, type Result, err, ok } from "@/types/error";
import type { CacheKey } from "@/types/keys";
import { ZCacheKey } from "@/types/keys";
import { ZTtlMs } from "@/types/service";
import { logger } from "@formbricks/logger";
import { validateInputs } from "./utils/validation";
/**
@@ -32,7 +32,7 @@ export class CacheService {
* @returns The Redis client instance or null if not ready
*/
getRedisClient(): RedisClient | null {
if (!this.isRedisClientReady()) {
if (!this.isRedisAvailable()) {
return null;
}
return this.redis;
@@ -45,7 +45,7 @@ export class CacheService {
*/
async get<T>(key: CacheKey): Promise<Result<T | null, CacheError>> {
// Check Redis availability first
if (!this.isRedisClientReady()) {
if (!this.isRedisAvailable()) {
return err({
code: ErrorCode.RedisConnectionError,
});
@@ -90,7 +90,7 @@ export class CacheService {
*/
async exists(key: CacheKey): Promise<Result<boolean, CacheError>> {
// Check Redis availability first
if (!this.isRedisClientReady()) {
if (!this.isRedisAvailable()) {
return err({
code: ErrorCode.RedisConnectionError,
});
@@ -121,7 +121,7 @@ export class CacheService {
*/
async set(key: CacheKey, value: unknown, ttlMs: number): Promise<Result<void, CacheError>> {
// Check Redis availability first
if (!this.isRedisClientReady()) {
if (!this.isRedisAvailable()) {
return err({
code: ErrorCode.RedisConnectionError,
});
@@ -155,7 +155,7 @@ export class CacheService {
*/
async del(keys: CacheKey[]): Promise<Result<void, CacheError>> {
// Check Redis availability first
if (!this.isRedisClientReady()) {
if (!this.isRedisAvailable()) {
return err({
code: ErrorCode.RedisConnectionError,
});
@@ -192,7 +192,7 @@ export class CacheService {
* @returns Cached value if present, otherwise fresh result from fn()
*/
async withCache<T>(fn: () => Promise<T>, key: CacheKey, ttlMs: number): Promise<T> {
if (!this.isRedisClientReady()) {
if (!this.isRedisAvailable()) {
return await fn();
}
@@ -257,29 +257,7 @@ export class CacheService {
}
}
/**
* Check if Redis is available and healthy by testing connectivity with ping
* @returns Promise<boolean> indicating if Redis is available and responsive
*/
async isRedisAvailable(): Promise<boolean> {
if (!this.isRedisClientReady()) {
return false;
}
try {
await this.withTimeout(this.redis.ping());
return true;
} catch (error) {
logger.debug({ error }, "Redis ping failed during availability check");
return false;
}
}
/**
* Fast synchronous check of Redis client state for internal use
* @returns Boolean indicating if Redis client is ready and connected
*/
private isRedisClientReady(): boolean {
private isRedisAvailable(): boolean {
return this.redis.isReady && this.redis.isOpen;
}
}

View File

@@ -1,5 +1,5 @@
import { describe, expect, test } from "vitest";
import type { CacheKey } from "@/types/keys";
import { describe, expect, test } from "vitest";
import { makeCacheKey } from "./key";
describe("@formbricks/cache utils/key", () => {

View File

@@ -1,5 +1,5 @@
import { logger } from "@formbricks/logger";
import type { CacheKey } from "@/types/keys";
import { logger } from "@formbricks/logger";
/**
* Helper function to create cache keys with runtime validation

View File

@@ -1,6 +1,6 @@
import { ErrorCode } from "@/types/error";
import { describe, expect, test, vi } from "vitest";
import { z } from "zod";
import { ErrorCode } from "@/types/error";
import { validateInputs } from "./validation";
// Mock logger

View File

@@ -1,7 +1,7 @@
import type { z } from "zod";
import { logger } from "@formbricks/logger";
import type { CacheError, Result } from "@/types/error";
import { ErrorCode, err, ok } from "@/types/error";
import type { z } from "zod";
import { logger } from "@formbricks/logger";
/**
* Generic validation function using Zod schemas with Result types

View File

@@ -14,7 +14,6 @@ module.exports = {
"<THIRD_PARTY_MODULES>",
"^@formbricks/(.*)$",
"^~/(.*)$",
"^@/(.*)$",
"^[./]",
],
importOrderSeparation: false,

View File

@@ -82,8 +82,8 @@ describe("client.ts", () => {
}
});
test("should return error when access key is missing", async () => {
// Mock constants with missing access key
test("should create S3 client without credentials (IAM role authentication)", async () => {
// Mock constants with missing access key (IAM role scenario)
vi.doMock("./constants", () => ({
...mockConstants,
S3_ACCESS_KEY: undefined,
@@ -93,14 +93,20 @@ describe("client.ts", () => {
const result = createS3ClientFromEnv();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.code).toBe("s3_credentials_error");
expect(mockS3Client).toHaveBeenCalledWith({
region: mockConstants.S3_REGION,
endpoint: mockConstants.S3_ENDPOINT_URL,
forcePathStyle: mockConstants.S3_FORCE_PATH_STYLE,
});
expect(result.ok).toBe(true);
if (result.ok) {
expect(result.data).toBeDefined();
}
});
test("should return error when secret key is missing", async () => {
// Mock constants with missing secret key
test("should create S3 client without secret key (IAM role authentication)", async () => {
// Mock constants with missing secret key (IAM role scenario)
vi.doMock("./constants", () => ({
...mockConstants,
S3_SECRET_KEY: undefined,
@@ -110,14 +116,20 @@ describe("client.ts", () => {
const result = createS3ClientFromEnv();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.code).toBe("s3_credentials_error");
expect(mockS3Client).toHaveBeenCalledWith({
region: mockConstants.S3_REGION,
endpoint: mockConstants.S3_ENDPOINT_URL,
forcePathStyle: mockConstants.S3_FORCE_PATH_STYLE,
});
expect(result.ok).toBe(true);
if (result.ok) {
expect(result.data).toBeDefined();
}
});
test("should return error when both credentials are missing", async () => {
// Mock constants with no credentials
test("should create S3 client without any credentials (IAM role authentication)", async () => {
// Mock constants with no credentials (full IAM role scenario)
vi.doMock("./constants", () => ({
...mockConstants,
S3_ACCESS_KEY: undefined,
@@ -128,14 +140,20 @@ describe("client.ts", () => {
const result = createS3ClientFromEnv();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.code).toBe("s3_credentials_error");
expect(mockS3Client).toHaveBeenCalledWith({
region: mockConstants.S3_REGION,
endpoint: mockConstants.S3_ENDPOINT_URL,
forcePathStyle: mockConstants.S3_FORCE_PATH_STYLE,
});
expect(result.ok).toBe(true);
if (result.ok) {
expect(result.data).toBeDefined();
}
});
test("should return error when credentials are empty strings", async () => {
// Mock constants with empty string credentials
test("should create S3 client with empty string credentials (IAM role authentication)", async () => {
// Mock constants with empty string credentials (treated as undefined)
vi.doMock("./constants", () => ({
...mockConstants,
S3_ACCESS_KEY: "",
@@ -146,14 +164,20 @@ describe("client.ts", () => {
const result = createS3ClientFromEnv();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.code).toBe("s3_credentials_error");
expect(mockS3Client).toHaveBeenCalledWith({
region: mockConstants.S3_REGION,
endpoint: mockConstants.S3_ENDPOINT_URL,
forcePathStyle: mockConstants.S3_FORCE_PATH_STYLE,
});
expect(result.ok).toBe(true);
if (result.ok) {
expect(result.data).toBeDefined();
}
});
test("should return error when mixed empty and undefined credentials", async () => {
// Mock constants with mixed empty and undefined
test("should create S3 client with mixed empty and undefined credentials (IAM role authentication)", async () => {
// Mock constants with mixed empty and undefined (both treated as missing)
vi.doMock("./constants", () => ({
...mockConstants,
S3_ACCESS_KEY: "",
@@ -164,9 +188,15 @@ describe("client.ts", () => {
const result = createS3ClientFromEnv();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.code).toBe("s3_credentials_error");
expect(mockS3Client).toHaveBeenCalledWith({
region: mockConstants.S3_REGION,
endpoint: mockConstants.S3_ENDPOINT_URL,
forcePathStyle: mockConstants.S3_FORCE_PATH_STYLE,
});
expect(result.ok).toBe(true);
if (result.ok) {
expect(result.data).toBeDefined();
}
});
@@ -197,6 +227,75 @@ describe("client.ts", () => {
}
});
test("should create S3 client when region is missing (uses AWS SDK defaults)", async () => {
// Mock constants with missing region - should still work
vi.doMock("./constants", () => ({
...mockConstants,
S3_REGION: undefined,
}));
const { createS3ClientFromEnv } = await import("./client");
const result = createS3ClientFromEnv();
expect(mockS3Client).toHaveBeenCalledWith({
credentials: {
accessKeyId: mockConstants.S3_ACCESS_KEY,
secretAccessKey: mockConstants.S3_SECRET_KEY,
},
endpoint: mockConstants.S3_ENDPOINT_URL,
forcePathStyle: mockConstants.S3_FORCE_PATH_STYLE,
});
expect(result.ok).toBe(true);
if (result.ok) {
expect(result.data).toBeDefined();
}
});
test("should create S3 client with only bucket name (minimal config for IAM roles)", async () => {
// Mock constants with only bucket name - minimal required config
vi.doMock("./constants", () => ({
S3_ACCESS_KEY: undefined,
S3_SECRET_KEY: undefined,
S3_REGION: undefined,
S3_BUCKET_NAME: "test-bucket",
S3_ENDPOINT_URL: undefined,
S3_FORCE_PATH_STYLE: false,
}));
const { createS3ClientFromEnv } = await import("./client");
const result = createS3ClientFromEnv();
expect(mockS3Client).toHaveBeenCalledWith({
endpoint: undefined,
forcePathStyle: false,
});
expect(result.ok).toBe(true);
if (result.ok) {
expect(result.data).toBeDefined();
}
});
test("should return error when bucket name is missing", async () => {
// Mock constants with missing bucket name
vi.doMock("./constants", () => ({
...mockConstants,
S3_BUCKET_NAME: undefined,
}));
const { createS3ClientFromEnv } = await import("./client");
const result = createS3ClientFromEnv();
expect(result.ok).toBe(false);
if (!result.ok) {
expect(result.error.code).toBe("s3_credentials_error");
}
});
test("should return unknown error when S3Client constructor throws", async () => {
// Provide valid credentials so we reach the constructor path
vi.doMock("./constants", () => ({
@@ -254,11 +353,10 @@ describe("client.ts", () => {
});
test("should return undefined when creating from env fails and no client provided", async () => {
// Mock constants with missing credentials
// Mock constants with missing required field (bucket name only)
vi.doMock("./constants", () => ({
...mockConstants,
S3_ACCESS_KEY: undefined,
S3_SECRET_KEY: undefined,
S3_BUCKET_NAME: undefined,
}));
const { createS3Client } = await import("./client");
@@ -290,8 +388,7 @@ describe("client.ts", () => {
test("returns undefined when env is invalid and does not construct client", async () => {
vi.doMock("./constants", () => ({
...mockConstants,
S3_ACCESS_KEY: undefined,
S3_SECRET_KEY: undefined,
S3_BUCKET_NAME: undefined,
}));
const { getCachedS3Client } = await import("./client");

View File

@@ -1,4 +1,4 @@
import { S3Client } from "@aws-sdk/client-s3";
import { S3Client, type S3ClientConfig } from "@aws-sdk/client-s3";
import { logger } from "@formbricks/logger";
import { type Result, type StorageError, StorageErrorCode, err, ok } from "../types/error";
import {
@@ -19,19 +19,35 @@ let cachedS3Client: S3Client | undefined;
*/
export const createS3ClientFromEnv = (): Result<S3Client, StorageError> => {
try {
if (!S3_ACCESS_KEY || !S3_SECRET_KEY || !S3_BUCKET_NAME || !S3_REGION) {
logger.error("S3 Client: S3 credentials are not set");
// Only S3_BUCKET_NAME is required - S3_REGION is optional and will default to AWS SDK defaults
if (!S3_BUCKET_NAME) {
logger.error("S3 Client: S3_BUCKET_NAME is required");
return err({
code: StorageErrorCode.S3CredentialsError,
});
}
const s3ClientInstance = new S3Client({
credentials: { accessKeyId: S3_ACCESS_KEY, secretAccessKey: S3_SECRET_KEY },
region: S3_REGION,
// Build S3 client configuration
const s3Config: S3ClientConfig = {
endpoint: S3_ENDPOINT_URL,
forcePathStyle: S3_FORCE_PATH_STYLE,
});
};
// Only set region if it's provided, otherwise let AWS SDK use its defaults
if (S3_REGION) {
s3Config.region = S3_REGION;
}
// Only add credentials if both access key and secret key are provided
// This allows the AWS SDK to use IAM roles, instance profiles, or other credential providers
if (S3_ACCESS_KEY && S3_SECRET_KEY) {
s3Config.credentials = {
accessKeyId: S3_ACCESS_KEY,
secretAccessKey: S3_SECRET_KEY,
};
}
const s3ClientInstance = new S3Client(s3Config);
return ok(s3ClientInstance);
} catch (error) {