mirror of
https://github.com/formbricks/formbricks.git
synced 2026-02-20 10:09:20 -06:00
Compare commits
8 Commits
fix/notifi
...
4.7.1-rc.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b776b04b1 | ||
|
|
e3f3516fa7 | ||
|
|
91486f3dc9 | ||
|
|
8b9179bfcc | ||
|
|
1b337aeac3 | ||
|
|
cd64848cc5 | ||
|
|
42411694a7 | ||
|
|
721d972901 |
@@ -30,7 +30,7 @@ export const NotificationSwitch = ({
|
||||
const isChecked =
|
||||
notificationType === "unsubscribedOrganizationIds"
|
||||
? !notificationSettings.unsubscribedOrganizationIds?.includes(surveyOrProjectOrOrganizationId)
|
||||
: notificationSettings[notificationType]?.[surveyOrProjectOrOrganizationId] === true;
|
||||
: notificationSettings[notificationType][surveyOrProjectOrOrganizationId] === true;
|
||||
|
||||
const handleSwitchChange = async () => {
|
||||
setIsLoading(true);
|
||||
@@ -49,11 +49,8 @@ export const NotificationSwitch = ({
|
||||
];
|
||||
}
|
||||
} else {
|
||||
updatedNotificationSettings[notificationType] = {
|
||||
...updatedNotificationSettings[notificationType],
|
||||
[surveyOrProjectOrOrganizationId]:
|
||||
!updatedNotificationSettings[notificationType]?.[surveyOrProjectOrOrganizationId],
|
||||
};
|
||||
updatedNotificationSettings[notificationType][surveyOrProjectOrOrganizationId] =
|
||||
!updatedNotificationSettings[notificationType][surveyOrProjectOrOrganizationId];
|
||||
}
|
||||
|
||||
const updatedNotificationSettingsActionResponse = await updateNotificationSettingsAction({
|
||||
@@ -81,7 +78,7 @@ export const NotificationSwitch = ({
|
||||
) {
|
||||
switch (notificationType) {
|
||||
case "alert":
|
||||
if (notificationSettings[notificationType]?.[surveyOrProjectOrOrganizationId] === true) {
|
||||
if (notificationSettings[notificationType][surveyOrProjectOrOrganizationId] === true) {
|
||||
handleSwitchChange();
|
||||
toast.success(
|
||||
t(
|
||||
|
||||
@@ -21,7 +21,6 @@ import { getElementsFromBlocks } from "@/lib/survey/utils";
|
||||
import { getFormattedDateTimeString } from "@/lib/utils/datetime";
|
||||
import { parseRecallInfo } from "@/lib/utils/recall";
|
||||
import { truncateText } from "@/lib/utils/strings";
|
||||
import { resolveStorageUrlAuto } from "@/modules/storage/utils";
|
||||
|
||||
const convertMetaObjectToString = (metadata: TResponseMeta): string => {
|
||||
let result: string[] = [];
|
||||
@@ -257,16 +256,10 @@ const processElementResponse = (
|
||||
const selectedChoiceIds = responseValue as string[];
|
||||
return element.choices
|
||||
.filter((choice) => selectedChoiceIds.includes(choice.id))
|
||||
.map((choice) => resolveStorageUrlAuto(choice.imageUrl))
|
||||
.map((choice) => choice.imageUrl)
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
if (element.type === TSurveyElementTypeEnum.FileUpload && Array.isArray(responseValue)) {
|
||||
return responseValue
|
||||
.map((url) => (typeof url === "string" ? resolveStorageUrlAuto(url) : url))
|
||||
.join("; ");
|
||||
}
|
||||
|
||||
return processResponseData(responseValue);
|
||||
};
|
||||
|
||||
@@ -375,7 +368,7 @@ const buildNotionPayloadProperties = (
|
||||
|
||||
responses[resp] = (pictureElement as any)?.choices
|
||||
.filter((choice) => selectedChoiceIds.includes(choice.id))
|
||||
.map((choice) => resolveStorageUrlAuto(choice.imageUrl));
|
||||
.map((choice) => choice.imageUrl);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ import { convertDatesInObject } from "@/lib/time";
|
||||
import { queueAuditEvent } from "@/modules/ee/audit-logs/lib/handler";
|
||||
import { TAuditStatus, UNKNOWN_DATA } from "@/modules/ee/audit-logs/types/audit-log";
|
||||
import { sendResponseFinishedEmail } from "@/modules/email";
|
||||
import { resolveStorageUrlsInObject } from "@/modules/storage/utils";
|
||||
import { sendFollowUpsForResponse } from "@/modules/survey/follow-ups/lib/follow-ups";
|
||||
import { FollowUpSendError } from "@/modules/survey/follow-ups/types/follow-up";
|
||||
import { handleIntegrations } from "./lib/handleIntegrations";
|
||||
@@ -96,15 +95,12 @@ export const POST = async (request: Request) => {
|
||||
]);
|
||||
};
|
||||
|
||||
const resolvedResponseData = resolveStorageUrlsInObject(response.data);
|
||||
|
||||
const webhookPromises = webhooks.map((webhook) => {
|
||||
const body = JSON.stringify({
|
||||
webhookId: webhook.id,
|
||||
event,
|
||||
data: {
|
||||
...response,
|
||||
data: resolvedResponseData,
|
||||
survey: {
|
||||
title: survey.name,
|
||||
type: survey.type,
|
||||
|
||||
@@ -10,7 +10,6 @@ import {
|
||||
TJsEnvironmentStateSurvey,
|
||||
} from "@formbricks/types/js";
|
||||
import { validateInputs } from "@/lib/utils/validate";
|
||||
import { resolveStorageUrlsInObject } from "@/modules/storage/utils";
|
||||
import { transformPrismaSurvey } from "@/modules/survey/lib/utils";
|
||||
|
||||
/**
|
||||
@@ -178,14 +177,14 @@ export const getEnvironmentStateData = async (environmentId: string): Promise<En
|
||||
overlay: environmentData.project.overlay,
|
||||
placement: environmentData.project.placement,
|
||||
inAppSurveyBranding: environmentData.project.inAppSurveyBranding,
|
||||
styling: resolveStorageUrlsInObject(environmentData.project.styling),
|
||||
styling: environmentData.project.styling,
|
||||
},
|
||||
},
|
||||
organization: {
|
||||
id: environmentData.project.organization.id,
|
||||
billing: environmentData.project.organization.billing,
|
||||
},
|
||||
surveys: resolveStorageUrlsInObject(transformedSurveys),
|
||||
surveys: transformedSurveys,
|
||||
actionClasses: environmentData.actionClasses as TJsEnvironmentStateActionClass[],
|
||||
};
|
||||
} catch (error) {
|
||||
|
||||
@@ -44,10 +44,13 @@ const validateResponse = (
|
||||
...responseUpdateInput.data,
|
||||
};
|
||||
|
||||
const isFinished = responseUpdateInput.finished ?? false;
|
||||
|
||||
const validationErrors = validateResponseData(
|
||||
survey.blocks,
|
||||
mergedData,
|
||||
responseUpdateInput.language ?? response.language ?? "en",
|
||||
isFinished,
|
||||
survey.questions
|
||||
);
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ const validateResponse = (responseInputData: TResponseInput, survey: TSurvey) =>
|
||||
survey.blocks,
|
||||
responseInputData.data,
|
||||
responseInputData.language ?? "en",
|
||||
responseInputData.finished,
|
||||
survey.questions
|
||||
);
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import { deleteResponse, getResponse } from "@/lib/response/service";
|
||||
import { getSurvey } from "@/lib/survey/service";
|
||||
import { formatValidationErrorsForV1Api, validateResponseData } from "@/modules/api/lib/validation";
|
||||
import { hasPermission } from "@/modules/organization/settings/api-keys/lib/utils";
|
||||
import { resolveStorageUrlsInObject, validateFileUploads } from "@/modules/storage/utils";
|
||||
import { validateFileUploads } from "@/modules/storage/utils";
|
||||
import { updateResponseWithQuotaEvaluation } from "./lib/response";
|
||||
|
||||
async function fetchAndAuthorizeResponse(
|
||||
@@ -57,10 +57,7 @@ export const GET = withV1ApiWrapper({
|
||||
}
|
||||
|
||||
return {
|
||||
response: responses.successResponse({
|
||||
...result.response,
|
||||
data: resolveStorageUrlsInObject(result.response.data),
|
||||
}),
|
||||
response: responses.successResponse(result.response),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
@@ -149,6 +146,7 @@ export const PUT = withV1ApiWrapper({
|
||||
result.survey.blocks,
|
||||
responseUpdate.data,
|
||||
responseUpdate.language ?? "en",
|
||||
responseUpdate.finished,
|
||||
result.survey.questions
|
||||
);
|
||||
|
||||
@@ -192,7 +190,7 @@ export const PUT = withV1ApiWrapper({
|
||||
}
|
||||
|
||||
return {
|
||||
response: responses.successResponse({ ...updated, data: resolveStorageUrlsInObject(updated.data) }),
|
||||
response: responses.successResponse(updated),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
|
||||
@@ -9,7 +9,7 @@ import { sendToPipeline } from "@/app/lib/pipelines";
|
||||
import { getSurvey } from "@/lib/survey/service";
|
||||
import { formatValidationErrorsForV1Api, validateResponseData } from "@/modules/api/lib/validation";
|
||||
import { hasPermission } from "@/modules/organization/settings/api-keys/lib/utils";
|
||||
import { resolveStorageUrlsInObject, validateFileUploads } from "@/modules/storage/utils";
|
||||
import { validateFileUploads } from "@/modules/storage/utils";
|
||||
import {
|
||||
createResponseWithQuotaEvaluation,
|
||||
getResponses,
|
||||
@@ -54,9 +54,7 @@ export const GET = withV1ApiWrapper({
|
||||
allResponses.push(...environmentResponses);
|
||||
}
|
||||
return {
|
||||
response: responses.successResponse(
|
||||
allResponses.map((r) => ({ ...r, data: resolveStorageUrlsInObject(r.data) }))
|
||||
),
|
||||
response: responses.successResponse(allResponses),
|
||||
};
|
||||
} catch (error) {
|
||||
if (error instanceof DatabaseError) {
|
||||
@@ -157,6 +155,7 @@ export const POST = withV1ApiWrapper({
|
||||
surveyResult.survey.blocks,
|
||||
responseInput.data,
|
||||
responseInput.language ?? "en",
|
||||
responseInput.finished,
|
||||
surveyResult.survey.questions
|
||||
);
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import { TApiAuditLog, TApiKeyAuthentication, withV1ApiWrapper } from "@/app/lib
|
||||
import { getOrganizationByEnvironmentId } from "@/lib/organization/service";
|
||||
import { getSurvey, updateSurvey } from "@/lib/survey/service";
|
||||
import { hasPermission } from "@/modules/organization/settings/api-keys/lib/utils";
|
||||
import { resolveStorageUrlsInObject } from "@/modules/storage/utils";
|
||||
|
||||
const fetchAndAuthorizeSurvey = async (
|
||||
surveyId: string,
|
||||
@@ -59,18 +58,16 @@ export const GET = withV1ApiWrapper({
|
||||
|
||||
if (shouldTransformToQuestions) {
|
||||
return {
|
||||
response: responses.successResponse(
|
||||
resolveStorageUrlsInObject({
|
||||
...result.survey,
|
||||
questions: transformBlocksToQuestions(result.survey.blocks, result.survey.endings),
|
||||
blocks: [],
|
||||
})
|
||||
),
|
||||
response: responses.successResponse({
|
||||
...result.survey,
|
||||
questions: transformBlocksToQuestions(result.survey.blocks, result.survey.endings),
|
||||
blocks: [],
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
response: responses.successResponse(resolveStorageUrlsInObject(result.survey)),
|
||||
response: responses.successResponse(result.survey),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
@@ -205,12 +202,12 @@ export const PUT = withV1ApiWrapper({
|
||||
};
|
||||
|
||||
return {
|
||||
response: responses.successResponse(resolveStorageUrlsInObject(surveyWithQuestions)),
|
||||
response: responses.successResponse(surveyWithQuestions),
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
response: responses.successResponse(resolveStorageUrlsInObject(updatedSurvey)),
|
||||
response: responses.successResponse(updatedSurvey),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
|
||||
@@ -14,7 +14,6 @@ import { TApiAuditLog, TApiKeyAuthentication, withV1ApiWrapper } from "@/app/lib
|
||||
import { getOrganizationByEnvironmentId } from "@/lib/organization/service";
|
||||
import { createSurvey } from "@/lib/survey/service";
|
||||
import { hasPermission } from "@/modules/organization/settings/api-keys/lib/utils";
|
||||
import { resolveStorageUrlsInObject } from "@/modules/storage/utils";
|
||||
import { getSurveys } from "./lib/surveys";
|
||||
|
||||
export const GET = withV1ApiWrapper({
|
||||
@@ -56,7 +55,7 @@ export const GET = withV1ApiWrapper({
|
||||
});
|
||||
|
||||
return {
|
||||
response: responses.successResponse(resolveStorageUrlsInObject(surveysWithQuestions)),
|
||||
response: responses.successResponse(surveysWithQuestions),
|
||||
};
|
||||
} catch (error) {
|
||||
if (error instanceof DatabaseError) {
|
||||
|
||||
@@ -112,6 +112,7 @@ export const POST = async (request: Request, context: Context): Promise<Response
|
||||
survey.blocks,
|
||||
responseInputData.data,
|
||||
responseInputData.language ?? "en",
|
||||
responseInputData.finished,
|
||||
survey.questions
|
||||
);
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ import { getElementsFromBlocks } from "@/lib/survey/utils";
|
||||
import { getIsQuotasEnabled } from "@/modules/ee/license-check/lib/utils";
|
||||
import { reduceQuotaLimits } from "@/modules/ee/quotas/lib/quotas";
|
||||
import { deleteFile } from "@/modules/storage/service";
|
||||
import { resolveStorageUrlsInObject } from "@/modules/storage/utils";
|
||||
import { getOrganizationIdFromEnvironmentId } from "@/modules/survey/lib/organization";
|
||||
import { getOrganizationBilling } from "@/modules/survey/lib/survey";
|
||||
import { ITEMS_PER_PAGE } from "../constants";
|
||||
@@ -409,10 +408,9 @@ export const getResponseDownloadFile = async (
|
||||
if (survey.isVerifyEmailEnabled) {
|
||||
headers.push("Verified Email");
|
||||
}
|
||||
const resolvedResponses = responses.map((r) => ({ ...r, data: resolveStorageUrlsInObject(r.data) }));
|
||||
const jsonData = getResponsesJson(
|
||||
survey,
|
||||
resolvedResponses,
|
||||
responses,
|
||||
elements,
|
||||
userAttributes,
|
||||
hiddenFields,
|
||||
|
||||
@@ -95,7 +95,7 @@ describe("validateResponseData", () => {
|
||||
mockGetElementsFromBlocks.mockReturnValue(mockElements);
|
||||
mockValidateBlockResponses.mockReturnValue({});
|
||||
|
||||
validateResponseData([], mockResponseData, "en", mockQuestions);
|
||||
validateResponseData([], mockResponseData, "en", true, mockQuestions);
|
||||
|
||||
expect(mockTransformQuestionsToBlocks).toHaveBeenCalledWith(mockQuestions, []);
|
||||
expect(mockGetElementsFromBlocks).toHaveBeenCalledWith(transformedBlocks);
|
||||
@@ -105,15 +105,15 @@ describe("validateResponseData", () => {
|
||||
mockGetElementsFromBlocks.mockReturnValue(mockElements);
|
||||
mockValidateBlockResponses.mockReturnValue({});
|
||||
|
||||
validateResponseData(mockBlocks, mockResponseData, "en", mockQuestions);
|
||||
validateResponseData(mockBlocks, mockResponseData, "en", true, mockQuestions);
|
||||
|
||||
expect(mockTransformQuestionsToBlocks).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test("should return null when both blocks and questions are empty", () => {
|
||||
expect(validateResponseData([], mockResponseData, "en", [])).toBeNull();
|
||||
expect(validateResponseData(null, mockResponseData, "en", [])).toBeNull();
|
||||
expect(validateResponseData(undefined, mockResponseData, "en", null)).toBeNull();
|
||||
expect(validateResponseData([], mockResponseData, "en", true, [])).toBeNull();
|
||||
expect(validateResponseData(null, mockResponseData, "en", true, [])).toBeNull();
|
||||
expect(validateResponseData(undefined, mockResponseData, "en", true, null)).toBeNull();
|
||||
});
|
||||
|
||||
test("should use default language code", () => {
|
||||
@@ -125,58 +125,25 @@ describe("validateResponseData", () => {
|
||||
expect(mockValidateBlockResponses).toHaveBeenCalledWith(mockElements, mockResponseData, "en");
|
||||
});
|
||||
|
||||
test("should validate only fields present in responseData", () => {
|
||||
test("should validate only present fields when finished is false", () => {
|
||||
const partialResponseData: TResponseData = { element1: "test" };
|
||||
const elementsToValidate = [mockElements[0]];
|
||||
const partialElements = [mockElements[0]];
|
||||
mockGetElementsFromBlocks.mockReturnValue(mockElements);
|
||||
mockValidateBlockResponses.mockReturnValue({});
|
||||
|
||||
validateResponseData(mockBlocks, partialResponseData, "en");
|
||||
validateResponseData(mockBlocks, partialResponseData, "en", false);
|
||||
|
||||
expect(mockValidateBlockResponses).toHaveBeenCalledWith(elementsToValidate, partialResponseData, "en");
|
||||
expect(mockValidateBlockResponses).toHaveBeenCalledWith(partialElements, partialResponseData, "en");
|
||||
});
|
||||
|
||||
test("should never validate elements not in responseData", () => {
|
||||
const blocksWithTwoElements: TSurveyBlock[] = [
|
||||
...mockBlocks,
|
||||
{
|
||||
id: "block2",
|
||||
name: "Block 2",
|
||||
elements: [
|
||||
{
|
||||
id: "element2",
|
||||
type: TSurveyElementTypeEnum.OpenText,
|
||||
headline: { default: "Q2" },
|
||||
required: true,
|
||||
inputType: "text",
|
||||
charLimit: { enabled: false },
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
const allElements = [
|
||||
...mockElements,
|
||||
{
|
||||
id: "element2",
|
||||
type: TSurveyElementTypeEnum.OpenText,
|
||||
headline: { default: "Q2" },
|
||||
required: true,
|
||||
inputType: "text",
|
||||
charLimit: { enabled: false },
|
||||
},
|
||||
];
|
||||
const responseDataWithOnlyElement1: TResponseData = { element1: "test" };
|
||||
mockGetElementsFromBlocks.mockReturnValue(allElements);
|
||||
test("should validate all fields when finished is true", () => {
|
||||
const partialResponseData: TResponseData = { element1: "test" };
|
||||
mockGetElementsFromBlocks.mockReturnValue(mockElements);
|
||||
mockValidateBlockResponses.mockReturnValue({});
|
||||
|
||||
validateResponseData(blocksWithTwoElements, responseDataWithOnlyElement1, "en");
|
||||
validateResponseData(mockBlocks, partialResponseData, "en", true);
|
||||
|
||||
// Only element1 should be validated, not element2 (even though it's required)
|
||||
expect(mockValidateBlockResponses).toHaveBeenCalledWith(
|
||||
[allElements[0]],
|
||||
responseDataWithOnlyElement1,
|
||||
"en"
|
||||
);
|
||||
expect(mockValidateBlockResponses).toHaveBeenCalledWith(mockElements, partialResponseData, "en");
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -9,13 +9,13 @@ import { getElementsFromBlocks } from "@/lib/survey/utils";
|
||||
import { ApiErrorDetails } from "@/modules/api/v2/types/api-error";
|
||||
|
||||
/**
|
||||
* Validates response data against survey validation rules.
|
||||
* Only validates elements that have data in responseData - never validates
|
||||
* all survey elements regardless of completion status.
|
||||
* Validates response data against survey validation rules
|
||||
* Handles partial responses (in-progress) by only validating present fields when finished is false
|
||||
*
|
||||
* @param blocks - Survey blocks containing elements with validation rules (preferred)
|
||||
* @param responseData - Response data to validate (keyed by element ID)
|
||||
* @param languageCode - Language code for error messages (defaults to "en")
|
||||
* @param finished - Whether the response is finished (defaults to true for management APIs)
|
||||
* @param questions - Survey questions (legacy format, used as fallback if blocks are empty)
|
||||
* @returns Validation error map keyed by element ID, or null if validation passes
|
||||
*/
|
||||
@@ -23,6 +23,7 @@ export const validateResponseData = (
|
||||
blocks: TSurveyBlock[] | undefined | null,
|
||||
responseData: TResponseData,
|
||||
languageCode: string = "en",
|
||||
finished: boolean = true,
|
||||
questions?: TSurveyQuestion[] | undefined | null
|
||||
): TValidationErrorMap | null => {
|
||||
// Use blocks if available, otherwise transform questions to blocks
|
||||
@@ -41,8 +42,11 @@ export const validateResponseData = (
|
||||
// Extract elements from blocks
|
||||
const allElements = getElementsFromBlocks(blocksToUse);
|
||||
|
||||
// Always validate only elements that are present in responseData
|
||||
const elementsToValidate = allElements.filter((element) => Object.keys(responseData).includes(element.id));
|
||||
// If response is not finished, only validate elements that are present in the response data
|
||||
// This prevents "required" errors for fields the user hasn't reached yet
|
||||
const elementsToValidate = finished
|
||||
? allElements
|
||||
: allElements.filter((element) => Object.keys(responseData).includes(element.id));
|
||||
|
||||
// Validate selected elements
|
||||
const errorMap = validateBlockResponses(elementsToValidate, responseData, languageCode);
|
||||
|
||||
@@ -15,7 +15,7 @@ import {
|
||||
import { getSurveyQuestions } from "@/modules/api/v2/management/responses/[responseId]/lib/survey";
|
||||
import { ApiErrorResponseV2 } from "@/modules/api/v2/types/api-error";
|
||||
import { hasPermission } from "@/modules/organization/settings/api-keys/lib/utils";
|
||||
import { resolveStorageUrlsInObject, validateFileUploads } from "@/modules/storage/utils";
|
||||
import { validateFileUploads } from "@/modules/storage/utils";
|
||||
import { ZResponseIdSchema, ZResponseUpdateSchema } from "./types/responses";
|
||||
|
||||
export const GET = async (request: Request, props: { params: Promise<{ responseId: string }> }) =>
|
||||
@@ -51,10 +51,7 @@ export const GET = async (request: Request, props: { params: Promise<{ responseI
|
||||
return handleApiError(request, response.error as ApiErrorResponseV2);
|
||||
}
|
||||
|
||||
return responses.successResponse({
|
||||
...response,
|
||||
data: { ...response.data, data: resolveStorageUrlsInObject(response.data.data) },
|
||||
});
|
||||
return responses.successResponse(response);
|
||||
},
|
||||
});
|
||||
|
||||
@@ -201,6 +198,7 @@ export const PUT = (request: Request, props: { params: Promise<{ responseId: str
|
||||
questionsResponse.data.blocks,
|
||||
body.data,
|
||||
body.language ?? "en",
|
||||
body.finished,
|
||||
questionsResponse.data.questions
|
||||
);
|
||||
|
||||
@@ -246,10 +244,7 @@ export const PUT = (request: Request, props: { params: Promise<{ responseId: str
|
||||
auditLog.newObject = response.data;
|
||||
}
|
||||
|
||||
return responses.successResponse({
|
||||
...response,
|
||||
data: { ...response.data, data: resolveStorageUrlsInObject(response.data.data) },
|
||||
});
|
||||
return responses.successResponse(response);
|
||||
},
|
||||
action: "updated",
|
||||
targetType: "response",
|
||||
|
||||
@@ -12,7 +12,7 @@ import { getSurveyQuestions } from "@/modules/api/v2/management/responses/[respo
|
||||
import { ZGetResponsesFilter, ZResponseInput } from "@/modules/api/v2/management/responses/types/responses";
|
||||
import { ApiErrorResponseV2 } from "@/modules/api/v2/types/api-error";
|
||||
import { hasPermission } from "@/modules/organization/settings/api-keys/lib/utils";
|
||||
import { resolveStorageUrlsInObject, validateFileUploads } from "@/modules/storage/utils";
|
||||
import { validateFileUploads } from "@/modules/storage/utils";
|
||||
import { createResponseWithQuotaEvaluation, getResponses } from "./lib/response";
|
||||
|
||||
export const GET = async (request: NextRequest) =>
|
||||
@@ -44,9 +44,7 @@ export const GET = async (request: NextRequest) =>
|
||||
|
||||
environmentResponses.push(...res.data.data);
|
||||
|
||||
return responses.successResponse({
|
||||
data: environmentResponses.map((r) => ({ ...r, data: resolveStorageUrlsInObject(r.data) })),
|
||||
});
|
||||
return responses.successResponse({ data: environmentResponses });
|
||||
},
|
||||
});
|
||||
|
||||
@@ -136,6 +134,7 @@ export const POST = async (request: Request) =>
|
||||
surveyQuestions.data.blocks,
|
||||
body.data,
|
||||
body.language ?? "en",
|
||||
body.finished,
|
||||
surveyQuestions.data.questions
|
||||
);
|
||||
|
||||
|
||||
@@ -30,4 +30,4 @@ export const rateLimitConfigs = {
|
||||
upload: { interval: 60, allowedPerInterval: 5, namespace: "storage:upload" }, // 5 per minute
|
||||
delete: { interval: 60, allowedPerInterval: 5, namespace: "storage:delete" }, // 5 per minute
|
||||
},
|
||||
} as const;
|
||||
};
|
||||
|
||||
@@ -1229,104 +1229,6 @@ describe("segmentFilterToPrismaQuery", () => {
|
||||
}
|
||||
});
|
||||
|
||||
test("number filter falls back to raw SQL when un-migrated rows exist", async () => {
|
||||
mockFindFirst.mockResolvedValue({ id: "unmigrated-row-1" });
|
||||
mockQueryRawUnsafe.mockResolvedValue([{ contactId: "mock-contact-1" }]);
|
||||
|
||||
const filters: TBaseFilters = [
|
||||
{
|
||||
id: "filter_1",
|
||||
connector: null,
|
||||
resource: {
|
||||
id: "attr_1",
|
||||
root: {
|
||||
type: "attribute" as const,
|
||||
contactAttributeKey: "age",
|
||||
},
|
||||
value: 25,
|
||||
qualifier: {
|
||||
operator: "greaterThan",
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const result = await segmentFilterToPrismaQuery(mockSegmentId, filters, mockEnvironmentId);
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
if (result.ok) {
|
||||
const filterClause = result.data.whereClause.AND?.[1] as any;
|
||||
expect(filterClause.AND[0]).toEqual({
|
||||
OR: [
|
||||
{
|
||||
attributes: {
|
||||
some: {
|
||||
attributeKey: { key: "age" },
|
||||
valueNumber: { gt: 25 },
|
||||
},
|
||||
},
|
||||
},
|
||||
{ id: { in: ["mock-contact-1"] } },
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
expect(mockFindFirst).toHaveBeenCalledWith({
|
||||
where: {
|
||||
attributeKey: {
|
||||
key: "age",
|
||||
environmentId: mockEnvironmentId,
|
||||
dataType: "number",
|
||||
},
|
||||
valueNumber: null,
|
||||
},
|
||||
select: { id: true },
|
||||
});
|
||||
|
||||
expect(mockQueryRawUnsafe).toHaveBeenCalled();
|
||||
const sqlCall = mockQueryRawUnsafe.mock.calls[0];
|
||||
expect(sqlCall[0]).toContain('cak."environmentId" = $4');
|
||||
expect(sqlCall[4]).toBe(mockEnvironmentId);
|
||||
});
|
||||
|
||||
test("number filter uses clean Prisma query when backfill is complete", async () => {
|
||||
const filters: TBaseFilters = [
|
||||
{
|
||||
id: "filter_1",
|
||||
connector: null,
|
||||
resource: {
|
||||
id: "attr_1",
|
||||
root: {
|
||||
type: "attribute" as const,
|
||||
contactAttributeKey: "score",
|
||||
},
|
||||
value: 100,
|
||||
qualifier: {
|
||||
operator: "lessEqual",
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const result = await segmentFilterToPrismaQuery(mockSegmentId, filters, mockEnvironmentId);
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
if (result.ok) {
|
||||
const filterClause = result.data.whereClause.AND?.[1] as any;
|
||||
expect(filterClause.AND[0]).toEqual({
|
||||
attributes: {
|
||||
some: {
|
||||
attributeKey: { key: "score" },
|
||||
valueNumber: { lte: 100 },
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
expect(mockFindFirst).toHaveBeenCalled();
|
||||
expect(mockQueryRawUnsafe).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// ==========================================
|
||||
// DATE FILTER TESTS
|
||||
// ==========================================
|
||||
@@ -1363,7 +1265,7 @@ describe("segmentFilterToPrismaQuery", () => {
|
||||
{
|
||||
attributes: {
|
||||
some: {
|
||||
attributeKey: { key: "purchaseDate", dataType: "date" },
|
||||
attributeKey: { key: "purchaseDate" },
|
||||
OR: [
|
||||
{ valueDate: { lt: new Date(targetDate) } },
|
||||
{ valueDate: null, value: { lt: new Date(targetDate).toISOString() } },
|
||||
@@ -1407,7 +1309,7 @@ describe("segmentFilterToPrismaQuery", () => {
|
||||
{
|
||||
attributes: {
|
||||
some: {
|
||||
attributeKey: { key: "signupDate", dataType: "date" },
|
||||
attributeKey: { key: "signupDate" },
|
||||
OR: [
|
||||
{ valueDate: { gt: new Date(targetDate) } },
|
||||
{ valueDate: null, value: { gt: new Date(targetDate).toISOString() } },
|
||||
@@ -1452,7 +1354,7 @@ describe("segmentFilterToPrismaQuery", () => {
|
||||
{
|
||||
attributes: {
|
||||
some: {
|
||||
attributeKey: { key: "lastActivityDate", dataType: "date" },
|
||||
attributeKey: { key: "lastActivityDate" },
|
||||
OR: [
|
||||
{ valueDate: { gte: new Date(startDate), lte: new Date(endDate) } },
|
||||
{
|
||||
|
||||
@@ -107,7 +107,7 @@ const buildDateAttributeFilterWhereClause = (filter: TSegmentAttributeFilter): P
|
||||
return {
|
||||
attributes: {
|
||||
some: {
|
||||
attributeKey: { key: contactAttributeKey, dataType: "date" },
|
||||
attributeKey: { key: contactAttributeKey },
|
||||
OR: [{ valueDate: dateCondition }, { valueDate: null, value: stringDateCondition }],
|
||||
},
|
||||
},
|
||||
@@ -165,7 +165,6 @@ const buildNumberAttributeFilterWhereClause = async (
|
||||
attributeKey: {
|
||||
key: contactAttributeKey,
|
||||
environmentId,
|
||||
dataType: "number",
|
||||
},
|
||||
valueNumber: null,
|
||||
},
|
||||
@@ -184,7 +183,6 @@ const buildNumberAttributeFilterWhereClause = async (
|
||||
JOIN "ContactAttributeKey" cak ON ca."attributeKeyId" = cak.id
|
||||
WHERE cak.key = $1
|
||||
AND cak."environmentId" = $4
|
||||
AND cak."dataType" = 'number'
|
||||
AND ca."valueNumber" IS NULL
|
||||
AND ca.value ~ $3
|
||||
AND ca.value::double precision ${sqlOp} $2
|
||||
|
||||
@@ -37,7 +37,6 @@ vi.mock("@formbricks/database", () => ({
|
||||
create: vi.fn(),
|
||||
delete: vi.fn(),
|
||||
update: vi.fn(),
|
||||
upsert: vi.fn(),
|
||||
findFirst: vi.fn(),
|
||||
},
|
||||
survey: {
|
||||
@@ -207,73 +206,6 @@ describe("Segment Service Tests", () => {
|
||||
vi.mocked(prisma.segment.create).mockRejectedValue(new Error("DB error"));
|
||||
await expect(createSegment(mockSegmentCreateInput)).rejects.toThrow(Error);
|
||||
});
|
||||
|
||||
test("should upsert a private segment without surveyId", async () => {
|
||||
const privateInput: TSegmentCreateInput = {
|
||||
...mockSegmentCreateInput,
|
||||
isPrivate: true,
|
||||
};
|
||||
const privateSegmentPrisma = { ...mockSegmentPrisma, isPrivate: true };
|
||||
vi.mocked(prisma.segment.upsert).mockResolvedValue(privateSegmentPrisma);
|
||||
const segment = await createSegment(privateInput);
|
||||
expect(segment).toEqual({ ...mockSegment, isPrivate: true });
|
||||
expect(prisma.segment.upsert).toHaveBeenCalledWith({
|
||||
where: {
|
||||
environmentId_title: {
|
||||
environmentId,
|
||||
title: privateInput.title,
|
||||
},
|
||||
},
|
||||
create: {
|
||||
environmentId,
|
||||
title: privateInput.title,
|
||||
description: undefined,
|
||||
isPrivate: true,
|
||||
filters: [],
|
||||
},
|
||||
update: {
|
||||
description: undefined,
|
||||
filters: [],
|
||||
},
|
||||
select: selectSegment,
|
||||
});
|
||||
expect(prisma.segment.create).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test("should upsert a private segment with surveyId", async () => {
|
||||
const privateInputWithSurvey: TSegmentCreateInput = {
|
||||
...mockSegmentCreateInput,
|
||||
isPrivate: true,
|
||||
surveyId,
|
||||
};
|
||||
const privateSegmentPrisma = { ...mockSegmentPrisma, isPrivate: true };
|
||||
vi.mocked(prisma.segment.upsert).mockResolvedValue(privateSegmentPrisma);
|
||||
const segment = await createSegment(privateInputWithSurvey);
|
||||
expect(segment).toEqual({ ...mockSegment, isPrivate: true });
|
||||
expect(prisma.segment.upsert).toHaveBeenCalledWith({
|
||||
where: {
|
||||
environmentId_title: {
|
||||
environmentId,
|
||||
title: privateInputWithSurvey.title,
|
||||
},
|
||||
},
|
||||
create: {
|
||||
environmentId,
|
||||
title: privateInputWithSurvey.title,
|
||||
description: undefined,
|
||||
isPrivate: true,
|
||||
filters: [],
|
||||
surveys: { connect: { id: surveyId } },
|
||||
},
|
||||
update: {
|
||||
description: undefined,
|
||||
filters: [],
|
||||
surveys: { connect: { id: surveyId } },
|
||||
},
|
||||
select: selectSegment,
|
||||
});
|
||||
expect(prisma.segment.create).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("cloneSegment", () => {
|
||||
|
||||
@@ -136,48 +136,28 @@ export const createSegment = async (segmentCreateInput: TSegmentCreateInput): Pr
|
||||
|
||||
const { description, environmentId, filters, isPrivate, surveyId, title } = segmentCreateInput;
|
||||
|
||||
const surveyConnect = surveyId ? { surveys: { connect: { id: surveyId } } } : {};
|
||||
let data: Prisma.SegmentCreateArgs["data"] = {
|
||||
environmentId,
|
||||
title,
|
||||
description,
|
||||
isPrivate,
|
||||
filters,
|
||||
};
|
||||
|
||||
if (surveyId) {
|
||||
data = {
|
||||
...data,
|
||||
surveys: {
|
||||
connect: {
|
||||
id: surveyId,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
// Private segments use upsert because auto-save may have already created a
|
||||
// default (empty-filter) segment via connectOrCreate before the user publishes.
|
||||
// Without upsert the second create hits the (environmentId, title) unique constraint.
|
||||
if (isPrivate) {
|
||||
const segment = await prisma.segment.upsert({
|
||||
where: {
|
||||
environmentId_title: {
|
||||
environmentId,
|
||||
title,
|
||||
},
|
||||
},
|
||||
create: {
|
||||
environmentId,
|
||||
title,
|
||||
description,
|
||||
isPrivate,
|
||||
filters,
|
||||
...surveyConnect,
|
||||
},
|
||||
update: {
|
||||
description,
|
||||
filters,
|
||||
...surveyConnect,
|
||||
},
|
||||
select: selectSegment,
|
||||
});
|
||||
|
||||
return transformPrismaSegment(segment);
|
||||
}
|
||||
|
||||
const segment = await prisma.segment.create({
|
||||
data: {
|
||||
environmentId,
|
||||
title,
|
||||
description,
|
||||
isPrivate,
|
||||
filters,
|
||||
...surveyConnect,
|
||||
},
|
||||
data,
|
||||
select: selectSegment,
|
||||
});
|
||||
|
||||
|
||||
@@ -8,8 +8,6 @@ import {
|
||||
isValidFileTypeForExtension,
|
||||
isValidImageFile,
|
||||
resolveStorageUrl,
|
||||
resolveStorageUrlAuto,
|
||||
resolveStorageUrlsInObject,
|
||||
sanitizeFileName,
|
||||
validateFileUploads,
|
||||
validateSingleFile,
|
||||
@@ -408,7 +406,7 @@ describe("storage utils", () => {
|
||||
expect(resolveStorageUrl("")).toBe("");
|
||||
});
|
||||
|
||||
test("should return absolute URL unchanged", () => {
|
||||
test("should return absolute URL unchanged (backward compatibility)", () => {
|
||||
const httpsUrl = "https://example.com/storage/env-123/public/image.jpg";
|
||||
const httpUrl = "http://example.com/storage/env-123/public/image.jpg";
|
||||
|
||||
@@ -417,12 +415,14 @@ describe("storage utils", () => {
|
||||
});
|
||||
|
||||
test("should resolve relative /storage/ path to absolute URL", async () => {
|
||||
// Use actual implementation with mocked dependencies
|
||||
const { resolveStorageUrl: actualResolveStorageUrl } =
|
||||
await vi.importActual<typeof import("@/modules/storage/utils")>("@/modules/storage/utils");
|
||||
|
||||
const relativePath = "/storage/env-123/public/image.jpg";
|
||||
const result = actualResolveStorageUrl(relativePath);
|
||||
|
||||
// Should prepend the base URL (from mocked WEBAPP_URL or getPublicDomain)
|
||||
expect(result).toContain("/storage/env-123/public/image.jpg");
|
||||
expect(result.startsWith("http")).toBe(true);
|
||||
});
|
||||
@@ -432,209 +432,4 @@ describe("storage utils", () => {
|
||||
expect(resolveStorageUrl("relative/path.jpg")).toBe("relative/path.jpg");
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveStorageUrlAuto", () => {
|
||||
test("should return non-storage strings unchanged", () => {
|
||||
expect(resolveStorageUrlAuto("hello world")).toBe("hello world");
|
||||
expect(resolveStorageUrlAuto("/some/other/path")).toBe("/some/other/path");
|
||||
expect(resolveStorageUrlAuto("https://example.com/image.jpg")).toBe("https://example.com/image.jpg");
|
||||
});
|
||||
|
||||
test("should NOT transform free-text values that merely start with /storage/", () => {
|
||||
expect(resolveStorageUrlAuto("/storage/help")).toBe("/storage/help");
|
||||
expect(resolveStorageUrlAuto("/storage/")).toBe("/storage/");
|
||||
expect(resolveStorageUrlAuto("/storage/some-text")).toBe("/storage/some-text");
|
||||
expect(resolveStorageUrlAuto("/storage/foo/bar")).toBe("/storage/foo/bar");
|
||||
});
|
||||
|
||||
test("should resolve public storage URL", async () => {
|
||||
const { resolveStorageUrlAuto: actual } =
|
||||
await vi.importActual<typeof import("@/modules/storage/utils")>("@/modules/storage/utils");
|
||||
|
||||
const result = actual("/storage/env-123/public/image.jpg");
|
||||
expect(result).toContain("/storage/env-123/public/image.jpg");
|
||||
expect(result.startsWith("http")).toBe(true);
|
||||
});
|
||||
|
||||
test("should detect private access type from URL path", () => {
|
||||
const privateUrl = "/storage/env-123/private/file.pdf";
|
||||
const publicUrl = "/storage/env-123/public/image.jpg";
|
||||
|
||||
expect(privateUrl.includes("/private/")).toBe(true);
|
||||
expect(publicUrl.includes("/private/")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveStorageUrlsInObject", () => {
|
||||
test("should return null and undefined as-is", () => {
|
||||
expect(resolveStorageUrlsInObject(null)).toBeNull();
|
||||
expect(resolveStorageUrlsInObject(undefined)).toBeUndefined();
|
||||
});
|
||||
|
||||
test("should return primitive values unchanged", () => {
|
||||
expect(resolveStorageUrlsInObject(42)).toBe(42);
|
||||
expect(resolveStorageUrlsInObject(true)).toBe(true);
|
||||
expect(resolveStorageUrlsInObject("hello")).toBe("hello");
|
||||
});
|
||||
|
||||
test("should NOT transform free-text that merely starts with /storage/", () => {
|
||||
expect(resolveStorageUrlsInObject("/storage/help")).toBe("/storage/help");
|
||||
expect(resolveStorageUrlsInObject("/storage/")).toBe("/storage/");
|
||||
|
||||
const input = {
|
||||
questionId1: "/storage/",
|
||||
questionId2: "/storage/help",
|
||||
questionId3: "/storage/some-text",
|
||||
questionId4: "/storage/foo/bar",
|
||||
realUrl: "/storage/env-123/public/image.jpg",
|
||||
};
|
||||
const result = resolveStorageUrlsInObject(input);
|
||||
expect(result.questionId1).toBe("/storage/");
|
||||
expect(result.questionId2).toBe("/storage/help");
|
||||
expect(result.questionId3).toBe("/storage/some-text");
|
||||
expect(result.questionId4).toBe("/storage/foo/bar");
|
||||
// realUrl still gets resolved because it matches the actual format
|
||||
expect(result.realUrl).not.toBe("/storage/env-123/public/image.jpg");
|
||||
});
|
||||
|
||||
test("should preserve Date instances", () => {
|
||||
const date = new Date("2026-01-01");
|
||||
expect(resolveStorageUrlsInObject(date)).toBe(date);
|
||||
});
|
||||
|
||||
test("should resolve storage URL strings", async () => {
|
||||
const { resolveStorageUrlsInObject: actual } =
|
||||
await vi.importActual<typeof import("@/modules/storage/utils")>("@/modules/storage/utils");
|
||||
|
||||
const result = actual("/storage/env-123/public/image.jpg");
|
||||
expect(typeof result).toBe("string");
|
||||
expect(result).toContain("/storage/env-123/public/image.jpg");
|
||||
expect((result as string).startsWith("http")).toBe(true);
|
||||
});
|
||||
|
||||
test("should resolve URLs in arrays", async () => {
|
||||
const { resolveStorageUrlsInObject: actual } =
|
||||
await vi.importActual<typeof import("@/modules/storage/utils")>("@/modules/storage/utils");
|
||||
|
||||
const input = ["/storage/env-123/public/a.jpg", "plain text"];
|
||||
const result = actual(input);
|
||||
|
||||
expect(result[0]).toContain("/storage/env-123/public/a.jpg");
|
||||
expect(result[0].startsWith("http")).toBe(true);
|
||||
expect(result[1]).toBe("plain text");
|
||||
});
|
||||
|
||||
test("should resolve URLs in nested objects", async () => {
|
||||
const { resolveStorageUrlsInObject: actual } =
|
||||
await vi.importActual<typeof import("@/modules/storage/utils")>("@/modules/storage/utils");
|
||||
|
||||
const input = {
|
||||
name: "Test Survey",
|
||||
welcomeCard: {
|
||||
fileUrl: "/storage/env-123/public/welcome.png",
|
||||
headline: "Hello",
|
||||
},
|
||||
elements: [
|
||||
{
|
||||
imageUrl: "/storage/env-123/public/q1.jpg",
|
||||
choices: [
|
||||
{ id: "c1", imageUrl: "/storage/env-123/public/choice1.jpg" },
|
||||
{ id: "c2", imageUrl: "https://external.com/image.jpg" },
|
||||
],
|
||||
},
|
||||
],
|
||||
count: 5,
|
||||
createdAt: new Date("2026-01-01"),
|
||||
};
|
||||
|
||||
const result = actual(input);
|
||||
|
||||
expect(result.welcomeCard.fileUrl.startsWith("http")).toBe(true);
|
||||
expect(result.welcomeCard.headline).toBe("Hello");
|
||||
expect(result.elements[0].imageUrl.startsWith("http")).toBe(true);
|
||||
expect(result.elements[0].choices[0].imageUrl.startsWith("http")).toBe(true);
|
||||
expect(result.elements[0].choices[1].imageUrl).toBe("https://external.com/image.jpg");
|
||||
expect(result.count).toBe(5);
|
||||
expect(result.createdAt).toEqual(new Date("2026-01-01"));
|
||||
expect(result.name).toBe("Test Survey");
|
||||
});
|
||||
|
||||
test("should resolve URLs in deeply nested objects", async () => {
|
||||
const { resolveStorageUrlsInObject: actual } =
|
||||
await vi.importActual<typeof import("@/modules/storage/utils")>("@/modules/storage/utils");
|
||||
|
||||
const input = {
|
||||
level1: {
|
||||
level2: {
|
||||
level3: {
|
||||
level4: {
|
||||
level5: {
|
||||
imageUrl: "/storage/env-123/public/deep.png",
|
||||
items: [
|
||||
{
|
||||
nested: {
|
||||
url: "/storage/env-123/public/nested.jpg",
|
||||
label: "keep me",
|
||||
},
|
||||
},
|
||||
"plain string",
|
||||
42,
|
||||
null,
|
||||
],
|
||||
},
|
||||
},
|
||||
sibling: "/storage/env-123/public/sibling.png",
|
||||
},
|
||||
},
|
||||
untouched: { a: { b: { c: "no change" } } },
|
||||
},
|
||||
};
|
||||
|
||||
const result = actual(input);
|
||||
|
||||
expect(result.level1.level2.level3.level4.level5.imageUrl).toContain(
|
||||
"/storage/env-123/public/deep.png"
|
||||
);
|
||||
expect(result.level1.level2.level3.level4.level5.imageUrl.startsWith("http")).toBe(true);
|
||||
|
||||
// @ts-expect-error - items is an array of unknown types
|
||||
expect(result.level1.level2.level3.level4.level5.items[0].nested.url).toContain(
|
||||
"/storage/env-123/public/nested.jpg"
|
||||
);
|
||||
// @ts-expect-error - items is an array of unknown types
|
||||
expect(result.level1.level2.level3.level4.level5.items[0].nested.url.startsWith("http")).toBe(true);
|
||||
// @ts-expect-error - items is an array of unknown types
|
||||
expect(result.level1.level2.level3.level4.level5.items[0].nested.label).toBe("keep me");
|
||||
|
||||
expect(result.level1.level2.level3.level4.level5.items[1]).toBe("plain string");
|
||||
expect(result.level1.level2.level3.level4.level5.items[2]).toBe(42);
|
||||
expect(result.level1.level2.level3.level4.level5.items[3]).toBeNull();
|
||||
|
||||
expect(result.level1.level2.level3.sibling).toContain("/storage/env-123/public/sibling.png");
|
||||
expect(result.level1.level2.level3.sibling.startsWith("http")).toBe(true);
|
||||
|
||||
expect(result.level1.untouched.a.b.c).toBe("no change");
|
||||
});
|
||||
|
||||
test("should handle response data with file upload URLs", async () => {
|
||||
const { resolveStorageUrlsInObject: actual } =
|
||||
await vi.importActual<typeof import("@/modules/storage/utils")>("@/modules/storage/utils");
|
||||
|
||||
const responseData = {
|
||||
questionId1: "text answer",
|
||||
questionId2: 42,
|
||||
fileUploadId: ["/storage/env-123/public/doc.pdf", "/storage/env-123/public/img.png"],
|
||||
};
|
||||
|
||||
const result = actual(responseData);
|
||||
|
||||
expect(result.questionId1).toBe("text answer");
|
||||
expect(result.questionId2).toBe(42);
|
||||
const fileUrls = result.fileUploadId;
|
||||
expect(fileUrls[0]).toContain("/storage/env-123/public/doc.pdf");
|
||||
expect(fileUrls[0].startsWith("http")).toBe(true);
|
||||
expect(fileUrls[1]).toContain("/storage/env-123/public/img.png");
|
||||
expect(fileUrls[1].startsWith("http")).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -151,7 +151,7 @@ export const getErrorResponseFromStorageError = (
|
||||
|
||||
/**
|
||||
* Resolves a storage URL to an absolute URL.
|
||||
* - If already absolute, returns as-is
|
||||
* - If already absolute, returns as-is (backward compatibility for old data)
|
||||
* - If relative (/storage/...), prepends the appropriate base URL
|
||||
* @param url The storage URL (relative or absolute)
|
||||
* @param accessType The access type to determine which base URL to use (defaults to "public")
|
||||
@@ -163,7 +163,7 @@ export const resolveStorageUrl = (
|
||||
): string => {
|
||||
if (!url) return "";
|
||||
|
||||
// Already absolute URL - return as-is
|
||||
// Already absolute URL - return as-is (backward compatibility for old data)
|
||||
if (url.startsWith("http://") || url.startsWith("https://")) {
|
||||
return url;
|
||||
}
|
||||
@@ -176,41 +176,3 @@ export const resolveStorageUrl = (
|
||||
|
||||
return url;
|
||||
};
|
||||
|
||||
// Matches the actual storage URL format: /storage/{id}/{public|private}/{filename...}
|
||||
const STORAGE_URL_PATTERN = /^\/storage\/[^/]+\/(public|private)\/.+/;
|
||||
|
||||
const isStorageUrl = (value: string): boolean => STORAGE_URL_PATTERN.test(value);
|
||||
|
||||
export const resolveStorageUrlAuto = (url: string): string => {
|
||||
if (!isStorageUrl(url)) return url;
|
||||
const accessType = url.includes("/private/") ? "private" : "public";
|
||||
return resolveStorageUrl(url, accessType);
|
||||
};
|
||||
|
||||
/**
|
||||
* Recursively walks an object/array and resolves all relative storage URLs
|
||||
* Preserves the original structure; skips Date instances and non-object primitives.
|
||||
*/
|
||||
export const resolveStorageUrlsInObject = <T>(obj: T): T => {
|
||||
if (obj === null || obj === undefined) return obj;
|
||||
|
||||
if (typeof obj === "string") {
|
||||
return resolveStorageUrlAuto(obj) as T;
|
||||
}
|
||||
|
||||
if (typeof obj !== "object") return obj;
|
||||
|
||||
if (obj instanceof Date) return obj;
|
||||
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map((item) => resolveStorageUrlsInObject(item)) as T;
|
||||
}
|
||||
|
||||
const result: Record<string, unknown> = {};
|
||||
for (const [key, value] of Object.entries(obj as Record<string, unknown>)) {
|
||||
result[key] = resolveStorageUrlsInObject(value);
|
||||
}
|
||||
|
||||
return result as T;
|
||||
};
|
||||
|
||||
@@ -40,10 +40,7 @@ export const SurveyInline = (props: Omit<SurveyContainerProps, "containerId">) =
|
||||
isLoadingScript = true;
|
||||
try {
|
||||
const scriptUrl = props.appUrl ? `${props.appUrl}/js/surveys.umd.cjs` : "/js/surveys.umd.cjs";
|
||||
const response = await fetch(
|
||||
scriptUrl,
|
||||
process.env.NODE_ENV === "development" ? { cache: "no-store" } : {}
|
||||
);
|
||||
const response = await fetch(scriptUrl);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error("Failed to load the surveys package");
|
||||
|
||||
@@ -4,182 +4,12 @@ description: "Formbricks Self-hosted version migration"
|
||||
icon: "arrow-right"
|
||||
---
|
||||
|
||||
## v4.7
|
||||
|
||||
Formbricks v4.7 introduces **typed contact attributes** with native `number` and `date` data types. This enables comparison-based segment filters (e.g. "signup date before 2025-01-01") that were previously not possible with string-only attribute values.
|
||||
|
||||
### What Happens Automatically
|
||||
|
||||
When Formbricks v4.7 starts for the first time, the data migration will:
|
||||
|
||||
1. Analyze all existing contact attribute keys and infer their data types (`text`, `number`, or `date`) based on the stored values
|
||||
2. Update the `ContactAttributeKey` table with the detected `dataType` for each key
|
||||
3. **If your instance has fewer than 1,000,000 contact attribute rows**: backfill the new `valueNumber` and `valueDate` columns inline. No manual action is needed.
|
||||
4. **If your instance has 1,000,000 or more contact attribute rows**: the value backfill is skipped to avoid hitting the migration timeout. You will need to run a standalone backfill script after the upgrade.
|
||||
|
||||
<Info>
|
||||
Most self-hosted instances have far fewer than 1,000,000 contact attribute rows (a typical setup with 100K
|
||||
contacts and 5-10 attributes each lands around 500K-1M rows). If you are below the threshold, the migration
|
||||
handles everything automatically and you can skip the manual backfill step below.
|
||||
</Info>
|
||||
|
||||
### Steps to Migrate
|
||||
|
||||
**1. Backup your Database**
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Docker">
|
||||
Before running these steps, navigate to the `formbricks` directory where your `docker-compose.yml` file is located.
|
||||
|
||||
```bash
|
||||
docker exec formbricks-postgres-1 pg_dump -Fc -U postgres -d formbricks > formbricks_pre_v4.7_$(date +%Y%m%d_%H%M%S).dump
|
||||
```
|
||||
|
||||
<Info>
|
||||
If you run into "**No such container**", use `docker ps` to find your container name, e.g.
|
||||
`formbricks_postgres_1`.
|
||||
</Info>
|
||||
</Tab>
|
||||
<Tab title="Kubernetes">
|
||||
If you are using the **in-cluster PostgreSQL** deployed by the Helm chart:
|
||||
|
||||
```bash
|
||||
kubectl exec -n formbricks formbricks-postgresql-0 -- pg_dump -Fc -U formbricks -d formbricks > formbricks_pre_v4.7_$(date +%Y%m%d_%H%M%S).dump
|
||||
```
|
||||
|
||||
<Info>
|
||||
If your PostgreSQL pod has a different name, run `kubectl get pods -n formbricks` to find it.
|
||||
</Info>
|
||||
|
||||
If you are using a **managed PostgreSQL** service (e.g. AWS RDS, Cloud SQL), use your provider's backup/snapshot feature or run `pg_dump` directly against the external host.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
**2. Upgrade to Formbricks v4.7**
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Docker">
|
||||
```bash
|
||||
# Pull the latest version
|
||||
docker compose pull
|
||||
|
||||
# Stop the current instance
|
||||
docker compose down
|
||||
|
||||
# Start with Formbricks v4.7
|
||||
docker compose up -d
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Kubernetes">
|
||||
```bash
|
||||
helm upgrade formbricks oci://ghcr.io/formbricks/helm-charts/formbricks \
|
||||
-n formbricks \
|
||||
--set deployment.image.tag=v4.7.0
|
||||
```
|
||||
|
||||
<Info>
|
||||
The Helm chart includes a migration Job that automatically runs Prisma schema migrations as a
|
||||
PreSync hook before the new pods start. No manual migration step is needed.
|
||||
</Info>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
**3. Check the Migration Logs**
|
||||
|
||||
After Formbricks starts, check the logs to see whether the value backfill was completed or skipped:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Docker">
|
||||
```bash
|
||||
docker compose logs formbricks | grep -i "backfill"
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Kubernetes">
|
||||
```bash
|
||||
# Check the application pod logs
|
||||
kubectl logs -n formbricks -l app.kubernetes.io/name=formbricks --tail=200 | grep -i "backfill"
|
||||
```
|
||||
|
||||
If the Helm migration Job ran, you can also inspect its logs:
|
||||
|
||||
```bash
|
||||
kubectl logs -n formbricks job/formbricks-migration
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
If you see a message like `Skipping value backfill (X rows >= 1000000 threshold)`, proceed to step 4. Otherwise, the migration is complete and no further action is needed.
|
||||
|
||||
**4. Run the Backfill Script (large datasets only)**
|
||||
|
||||
If the migration skipped the value backfill, run the standalone backfill script inside the running Formbricks container:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Docker">
|
||||
```bash
|
||||
docker exec formbricks node packages/database/dist/scripts/backfill-attribute-values.js
|
||||
```
|
||||
|
||||
<Info>Replace `formbricks` with your actual container name if it differs. Use `docker ps` to find it.</Info>
|
||||
</Tab>
|
||||
<Tab title="Kubernetes">
|
||||
```bash
|
||||
kubectl exec -n formbricks deploy/formbricks -- node packages/database/dist/scripts/backfill-attribute-values.js
|
||||
```
|
||||
|
||||
<Info>
|
||||
If your Formbricks deployment has a different name, run `kubectl get deploy -n formbricks` to find it.
|
||||
</Info>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
The script will output progress as it runs:
|
||||
|
||||
```
|
||||
========================================
|
||||
Attribute Value Backfill Script
|
||||
========================================
|
||||
|
||||
Fetching number-type attribute keys...
|
||||
Found 12 number-type keys. Backfilling valueNumber...
|
||||
Number backfill progress: 10/12 keys (48230 rows updated)
|
||||
Number backfill progress: 12/12 keys (52104 rows updated)
|
||||
|
||||
Fetching date-type attribute keys...
|
||||
Found 5 date-type keys. Backfilling valueDate...
|
||||
Date backfill progress: 5/5 keys (31200 rows updated)
|
||||
|
||||
========================================
|
||||
Backfill Complete!
|
||||
========================================
|
||||
valueNumber rows updated: 52104
|
||||
valueDate rows updated: 31200
|
||||
Duration: 42.3s
|
||||
========================================
|
||||
```
|
||||
|
||||
Key characteristics of the backfill script:
|
||||
|
||||
- **Safe to run while Formbricks is live** -- it does not lock the entire table or wrap work in a long transaction
|
||||
- **Idempotent** -- it only updates rows where the typed columns are still `NULL`, so you can safely run it multiple times
|
||||
- **Resumable** -- each batch commits independently, so if the process is interrupted you can re-run it and it picks up where it left off
|
||||
- **No timeout risk** -- unlike the migration, this script runs outside the migration transaction and has no time limit
|
||||
|
||||
**5. Verify the Upgrade**
|
||||
|
||||
- Access your Formbricks instance at the same URL as before
|
||||
- If you use contact segments with number or date filters, verify they return the expected results
|
||||
- Check that existing surveys and response data are intact
|
||||
|
||||
---
|
||||
|
||||
## v4.0
|
||||
|
||||
<Warning>
|
||||
**Important: Migration Required**
|
||||
|
||||
Formbricks 4 introduces additional requirements for self-hosting setups and makes a dedicated Redis cache as well as S3-compatible file storage mandatory.
|
||||
|
||||
Formbricks 4 introduces additional requirements for self-hosting setups and makes a dedicated Redis cache as well as S3-compatible file storage mandatory.
|
||||
</Warning>
|
||||
|
||||
Formbricks 4.0 is a **major milestone** that sets up the technical foundation for future iterations and feature improvements. This release focuses on modernizing core infrastructure components to improve reliability, scalability, and enable advanced features going forward.
|
||||
@@ -187,11 +17,9 @@ Formbricks 4.0 is a **major milestone** that sets up the technical foundation fo
|
||||
### What's New in Formbricks 4.0
|
||||
|
||||
**🚀 New Enterprise Features:**
|
||||
|
||||
- **Quotas Management**: Advanced quota controls for enterprise users
|
||||
|
||||
**🏗️ Technical Foundation Improvements:**
|
||||
|
||||
- **Enhanced File Storage**: Improved file handling with better performance and reliability
|
||||
- **Improved Caching**: New caching functionality improving speed, extensibility and reliability
|
||||
- **Database Optimization**: Removal of unused database tables and fields for better performance
|
||||
@@ -211,8 +39,7 @@ These services are already included in the updated one-click setup for self-host
|
||||
We know this represents more moving parts in your infrastructure and might even introduce more complexity in hosting Formbricks, and we don't take this decision lightly. As Formbricks grows into a comprehensive Survey and Experience Management platform, we've reached a point where the simple, single-service approach was holding back our ability to deliver the reliable, feature-rich product our users demand and deserve.
|
||||
|
||||
By moving to dedicated, professional-grade services for these critical functions, we're building the foundation needed to deliver:
|
||||
|
||||
- **Enterprise-grade reliability** with proper redundancy and backup capabilities
|
||||
- **Enterprise-grade reliability** with proper redundancy and backup capabilities
|
||||
- **Advanced features** that require sophisticated caching and file processing
|
||||
- **Better performance** through optimized, dedicated services
|
||||
- **Future scalability** to support larger deployments and more complex use cases without the need to maintain two different approaches
|
||||
@@ -225,7 +52,7 @@ Additional migration steps are needed if you are using a self-hosted Formbricks
|
||||
|
||||
### One-Click Setup
|
||||
|
||||
For users using our official one-click setup, we provide an automated migration using a migration script:
|
||||
For users using our official one-click setup, we provide an automated migration using a migration script:
|
||||
|
||||
```bash
|
||||
# Download the latest script
|
||||
@@ -240,11 +67,11 @@ chmod +x migrate-to-v4.sh
|
||||
```
|
||||
|
||||
This script guides you through the steps for the infrastructure migration and does the following:
|
||||
|
||||
- Adds a Redis service to your setup and configures it
|
||||
- Adds a MinIO service (open source S3-alternative) to your setup, configures it and migrates local files to it
|
||||
- Pulls the latest Formbricks image and updates your instance
|
||||
|
||||
|
||||
### Manual Setup
|
||||
|
||||
If you use a different setup to host your Formbricks instance, you need to make sure to make the necessary adjustments to run Formbricks 4.0.
|
||||
@@ -260,7 +87,6 @@ You need to configure the `REDIS_URL` environment variable and point it to your
|
||||
To use file storage (e.g., file upload questions, image choice questions, custom survey backgrounds, etc.), you need to have S3-compatible file storage set up and connected to Formbricks.
|
||||
|
||||
Formbricks supports multiple storage providers (among many other S3-compatible storages):
|
||||
|
||||
- AWS S3
|
||||
- Digital Ocean Spaces
|
||||
- Hetzner Object Storage
|
||||
@@ -275,7 +101,6 @@ Please make sure to set up a storage bucket with one of these solutions and then
|
||||
S3_BUCKET_NAME: formbricks-uploads
|
||||
S3_ENDPOINT_URL: http://minio:9000 # not needed for AWS S3
|
||||
```
|
||||
|
||||
#### Upgrade Process
|
||||
|
||||
**1. Backup your Database**
|
||||
@@ -287,8 +112,8 @@ docker exec formbricks-postgres-1 pg_dump -Fc -U postgres -d formbricks > formbr
|
||||
```
|
||||
|
||||
<Info>
|
||||
If you run into "**No such container**", use `docker ps` to find your container name, e.g.
|
||||
`formbricks_postgres_1`.
|
||||
If you run into "**No such container**", use `docker ps` to find your container name,
|
||||
e.g. `formbricks_postgres_1`.
|
||||
</Info>
|
||||
|
||||
**2. Upgrade to Formbricks 4.0**
|
||||
@@ -309,7 +134,6 @@ docker compose up -d
|
||||
**3. Automatic Database Migration**
|
||||
|
||||
When you start Formbricks 4.0 for the first time, it will **automatically**:
|
||||
|
||||
- Detect and apply required database schema updates
|
||||
- Remove unused database tables and fields
|
||||
- Optimize the database structure for better performance
|
||||
|
||||
@@ -1,94 +1,41 @@
|
||||
---
|
||||
title: "Rate Limiting"
|
||||
description: "Current request rate limits in Formbricks"
|
||||
description: "Rate limiting for Formbricks"
|
||||
icon: "timer"
|
||||
---
|
||||
|
||||
Formbricks applies request rate limits to protect against abuse and keep API usage fair.
|
||||
To protect the platform from abuse and ensure fair usage, rate limiting is enforced by default on an IP-address basis. If a client exceeds the allowed number of requests within the specified time window, the API will return a `429 Too Many Requests` status code.
|
||||
|
||||
Rate limits are scoped by identifier, depending on the endpoint:
|
||||
## Default Rate Limits
|
||||
|
||||
- IP hash (for unauthenticated/client-side routes and public actions)
|
||||
- API key ID (for authenticated API calls)
|
||||
- User ID (for authenticated session-based calls and server actions)
|
||||
- Organization ID (for follow-up email dispatch)
|
||||
The following rate limits apply to various endpoints:
|
||||
|
||||
When a limit is exceeded, the API returns `429 Too Many Requests`.
|
||||
| **Endpoint** | **Rate Limit** | **Time Window** |
|
||||
| ----------------------- | -------------- | --------------- |
|
||||
| `POST /login` | 30 requests | 15 minutes |
|
||||
| `POST /signup` | 30 requests | 60 minutes |
|
||||
| `POST /verify-email` | 10 requests | 60 minutes |
|
||||
| `POST /forgot-password` | 5 requests | 60 minutes |
|
||||
| `GET /client-side-api` | 100 requests | 1 minute |
|
||||
| `POST /share` | 100 requests | 60 minutes |
|
||||
|
||||
## Management API Rate Limits
|
||||
|
||||
These are the current limits for Management APIs:
|
||||
|
||||
| **Route Group** | **Limit** | **Window** | **Identifier** |
|
||||
| --- | --- | --- | --- |
|
||||
| `/api/v1/management/*` (except `/api/v1/management/storage`), `/api/v1/webhooks/*`, `/api/v1/integrations/*`, `/api/v1/management/me` | 100 requests | 1 minute | API key ID or session user ID |
|
||||
| `/api/v2/management/*` (and other v2 authenticated routes that use `authenticatedApiClient`) | 100 requests | 1 minute | API key ID |
|
||||
| `POST /api/v1/management/storage` | 5 requests | 1 minute | API key ID or session user ID |
|
||||
|
||||
## All Enforced Limits
|
||||
|
||||
| **Config** | **Limit** | **Window** | **Identifier** | **Used For** |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| `auth.login` | 10 requests | 15 minutes | IP hash | Email/password login flow (`/api/auth/callback/credentials`) |
|
||||
| `auth.signup` | 30 requests | 60 minutes | IP hash | Signup server action |
|
||||
| `auth.forgotPassword` | 5 requests | 60 minutes | IP hash | Forgot password server action |
|
||||
| `auth.verifyEmail` | 10 requests | 60 minutes | IP hash | Email verification callback + resend verification action |
|
||||
| `api.v1` | 100 requests | 1 minute | API key ID or session user ID | v1 management, webhooks, integrations, and `/api/v1/management/me` |
|
||||
| `api.v2` | 100 requests | 1 minute | API key ID | v2 authenticated API wrapper (`authenticatedApiClient`) |
|
||||
| `api.client` | 100 requests | 1 minute | IP hash | v1 client API routes (except `/api/v1/client/og` and storage upload override), plus v2 routes that re-use those v1 handlers |
|
||||
| `storage.upload` | 5 requests | 1 minute | IP hash or authenticated ID | Client storage upload and management storage upload |
|
||||
| `storage.delete` | 5 requests | 1 minute | API key ID or session user ID | `DELETE /storage/[environmentId]/[accessType]/[fileName]` |
|
||||
| `actions.emailUpdate` | 3 requests | 60 minutes | User ID | Profile email update action |
|
||||
| `actions.surveyFollowUp` | 50 requests | 60 minutes | Organization ID | Survey follow-up email processing |
|
||||
| `actions.sendLinkSurveyEmail` | 10 requests | 60 minutes | IP hash | Link survey email send action |
|
||||
| `actions.licenseRecheck` | 5 requests | 1 minute | User ID | Enterprise license recheck action |
|
||||
|
||||
## Current Endpoint Exceptions
|
||||
|
||||
The following routes are currently not rate-limited by the server-side limiter:
|
||||
|
||||
- `GET /api/v1/client/og` (explicitly excluded)
|
||||
- `POST /api/v2/client/[environmentId]/responses`
|
||||
- `POST /api/v2/client/[environmentId]/displays`
|
||||
- `GET /api/v2/health`
|
||||
|
||||
## 429 Response Shape
|
||||
|
||||
v1-style endpoints return:
|
||||
If a request exceeds the defined rate limit, the server will respond with:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": "too_many_requests",
|
||||
"message": "Maximum number of requests reached. Please try again later.",
|
||||
"details": {}
|
||||
}
|
||||
```
|
||||
|
||||
v2-style endpoints return:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": {
|
||||
"code": 429,
|
||||
"message": "Too Many Requests"
|
||||
}
|
||||
"code": 429,
|
||||
"error": "Too many requests, Please try after a while!"
|
||||
}
|
||||
```
|
||||
|
||||
## Disabling Rate Limiting
|
||||
|
||||
For self-hosters, rate limiting can be disabled if necessary. We strongly recommend keeping it enabled in production.
|
||||
For self-hosters, rate limiting can be disabled if necessary. However, we **strongly recommend keeping rate limiting enabled in production environments** to prevent abuse.
|
||||
|
||||
Set:
|
||||
To disable rate limiting, set the following environment variable:
|
||||
|
||||
```bash
|
||||
RATE_LIMITING_DISABLED=1
|
||||
```
|
||||
|
||||
After changing this value, restart the server.
|
||||
|
||||
## Operational Notes
|
||||
|
||||
- Redis/Valkey is required for robust rate limiting (`REDIS_URL`).
|
||||
- If Redis is unavailable at runtime, rate-limiter checks currently fail open (requests are allowed through without enforcement).
|
||||
- Authentication failure audit logging uses a separate throttle (`shouldLogAuthFailure()`) and is intentionally **fail-closed**: when Redis is unavailable or errors occur, audit log entries are **skipped entirely** rather than written without throttle control. This prevents spam while preserving the hash-integrity chain required for compliance. In other words, if Redis is down, no authentication-failure audit logs will be recorded—requests themselves are still allowed (fail-open rate limiting above), but the audit trail for those failures will not be written.
|
||||
After making this change, restart your server to apply the new setting.
|
||||
@@ -16,6 +16,8 @@ The Churn Survey is among the most effective ways to identify weaknesses in your
|
||||
|
||||
* Follow-up to prevent bad reviews
|
||||
|
||||
* Coming soon: Make survey mandatory
|
||||
|
||||
## Overview
|
||||
|
||||
To run the Churn Survey in your app you want to proceed as follows:
|
||||
@@ -78,6 +80,13 @@ Whenever a user visits this page, matches the filter conditions above and the re
|
||||
|
||||
Here is our complete [Actions manual](/xm-and-surveys/surveys/website-app-surveys/actions/) covering [No-Code](/xm-and-surveys/surveys/website-app-surveys/actions#setting-up-no-code-actions) and [Code](/xm-and-surveys/surveys/website-app-surveys/actions#setting-up-code-actions) Actions.
|
||||
|
||||
<Note>
|
||||
Pre-churn flow coming soon We’re currently building full-screen survey
|
||||
pop-ups. You’ll be able to prevent users from closing the survey unless they
|
||||
respond to it. It’s certainly debatable if you want that but you could force
|
||||
them to click through the survey before letting them cancel 🤷
|
||||
</Note>
|
||||
|
||||
### 5. Select Action in the “When to ask” card
|
||||
|
||||

|
||||
|
||||
@@ -46,7 +46,13 @@ _Want to change the button color? Adjust it in the project settings!_
|
||||
|
||||
Save, and move over to the **Audience** tab.
|
||||
|
||||
### 3. Pre-segment your audience
|
||||
### 3. Pre-segment your audience (coming soon)
|
||||
|
||||
<Note>
|
||||
### Filter by Attribute Coming Soon
|
||||
|
||||
We're working on pre-segmenting users by attributes. This manual will be updated in the coming days.
|
||||
</Note>
|
||||
|
||||
Pre-segmentation isn't needed for this survey since you likely want to target all users who cancel their trial. You can use a specific user action, like clicking **Cancel Trial**, to show the survey only to users trying your product.
|
||||
|
||||
@@ -56,13 +62,13 @@ How you trigger your survey depends on your product. There are two options:
|
||||
|
||||
- **Trigger by Page view:** If you have a page like `/trial-cancelled` for users who cancel their trial subscription, create a user action with the type "Page View." Select "Limit to specific pages" and apply URL filters with these settings:
|
||||
|
||||

|
||||

|
||||
|
||||
Whenever a user visits this page, the survey will be displayed ✅
|
||||
|
||||
- **Trigger by Button Click:** In a different case, you have a “Cancel Trial" button in your app. You can setup a user Action with the `Inner Text`:
|
||||
|
||||

|
||||

|
||||
|
||||
Please have a look at our complete [Actions manual](/xm-and-surveys/surveys/website-app-surveys/actions) if you have questions.
|
||||
|
||||
|
||||
@@ -54,7 +54,13 @@ In the button settings you have to make sure it is set to “External URL”. In
|
||||
|
||||
Save, and move over to the “Audience” tab.
|
||||
|
||||
### 3. Pre-segment your audience
|
||||
### 3. Pre-segment your audience (coming soon)
|
||||
|
||||
<Note>
|
||||
## Filter by attribute coming soon. We're working on pre-segmenting users by
|
||||
|
||||
attributes. We will update this manual in the next few days.
|
||||
</Note>
|
||||
|
||||
Once you clicked over to the “Audience” tab you can change the settings. In the **Who To Send** card, select “Filter audience by attribute”. This allows you to only show the prompt to a specific segment of your user base.
|
||||
|
||||
|
||||
@@ -137,11 +137,6 @@ const checkRequiredField = (
|
||||
return null;
|
||||
}
|
||||
|
||||
// CTA elements never block progression (informational only)
|
||||
if (element.type === TSurveyElementTypeEnum.CTA) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (element.type === TSurveyElementTypeEnum.Ranking) {
|
||||
return validateRequiredRanking(value, t);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user