Compare commits

..

98 Commits

Author SHA1 Message Date
Dhruwang
5cbfc6956b fix 2025-05-06 14:00:20 +05:30
Dhruwang
62f19ba4d9 fix 2025-05-06 13:53:12 +05:30
Dhruwang
70aba27e82 add go in each stage 2025-05-06 13:50:06 +05:30
Dhruwang
e94cf10c36 removed gcc 2025-05-06 12:58:13 +05:30
Dhruwang
0f324c75ab removed superchronic 2025-05-06 12:26:20 +05:30
Dhruwang
4814f8821a fix 2025-05-06 12:21:01 +05:30
Dhruwang
b44df3b6e1 fix 2025-05-06 11:53:19 +05:30
Dhruwang
a626600786 fix 2025-05-06 11:34:16 +05:30
Dhruwang
6fc1f77845 commented add step 2025-05-06 11:19:50 +05:30
Dhruwang
defc5b29e1 added release version 2025-05-06 11:03:02 +05:30
Dhruwang
e6c741bd3b fix 2025-05-06 11:02:44 +05:30
Dhruwang
3207350bd5 fix 2025-05-05 18:17:29 +05:30
Dhruwang
bbe423319e fix 2025-05-05 16:58:37 +05:30
Dhruwang
40d8d86cd6 fix 2025-05-05 16:53:02 +05:30
Dhruwang
87934d9a68 fix 2025-05-05 16:48:54 +05:30
Dhruwang
0d19569936 fix 2025-05-05 16:44:32 +05:30
Dhruwang
d67dd965ab fix 2025-05-05 16:41:10 +05:30
Dhruwang
328e2db17f fixed zstd veraion 2025-05-05 16:36:24 +05:30
Dhruwang
46e5975653 fix build 2025-05-05 16:32:53 +05:30
Dhruwang
6145f11ddf fix build 2025-05-05 16:28:34 +05:30
Dhruwang
88cff4e52f adding missing package versions and removed edge repo 2025-05-05 16:25:50 +05:30
Dhruwang
801446bb86 Merge branch 'main' of https://github.com/formbricks/formbricks into docker-package-version-update 2025-05-05 16:14:26 +05:30
Harsh Bhat
a53c13d6ed docs: add enterprise features listed under a subpaage (#5594)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-05-05 08:00:47 +00:00
dependabot[bot]
1a0c6e72b2 chore(deps): bump actions/dependency-review-action from 4.5.0 to 4.6.0 (#5270)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-05 10:08:58 +02:00
dependabot[bot]
ba7c8b79b1 chore(deps): bump actions/checkout from 2.7.0 to 4.2.2 (#5273)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matthias Nannt <mail@matthiasnannt.com>
2025-05-05 10:08:26 +02:00
dependabot[bot]
d7b504eed0 chore(deps): bump step-security/harden-runner from 2.11.0 to 2.12.0 (#5559)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-05 10:00:21 +02:00
dependabot[bot]
a1df10eb09 chore(deps): bump sigstore/cosign-installer from 3.5.0 to 3.8.2 (#5560)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-05 09:55:05 +02:00
victorvhs017
92be409d4f chore: add tests to api V1 - part 2 (#5605) 2025-05-05 05:55:18 +00:00
victorvhs017
665c7c6bf1 chore: add tests to api V1 (#5593)
Co-authored-by: Piyush Gupta <piyushguptaa2z123@gmail.com>
2025-05-05 05:38:16 +00:00
victorvhs017
6c2ff7ee08 chore: add tests to survey editor components - part 3 (#5587)
Co-authored-by: use-tusk[bot] <144006087+use-tusk[bot]@users.noreply.github.com>
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-05-05 04:43:37 +00:00
victorvhs017
295a1bf402 chore: add tests to survey editor components - part 2 (#5575)
Co-authored-by: use-tusk[bot] <144006087+use-tusk[bot]@users.noreply.github.com>
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-05-02 14:01:02 +00:00
Piyush Gupta
3e6f558b08 fix: recaptcha feature bugs (#5599) 2025-05-02 07:11:51 +00:00
Dhruwang Jariwala
aad5a59e82 fix: removed dynamic translation key (#5527)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-05-02 06:05:23 +00:00
victorvhs017
36d02480b2 chore: add tests to survey editor components (#5557)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-05-02 05:05:24 +00:00
Piyush Gupta
99454ac57b feat: add recaptcha v3 support to surveys (#5500)
Co-authored-by: Johannes <johannes@formbricks.com>
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-05-01 17:22:28 +00:00
Dhruwang Jariwala
e2915f878e chore: used ";" instead of "," for multi select response (#5596) 2025-05-01 06:23:01 +00:00
Dhruwang Jariwala
710a813e9b feat: added option 6 to rating (#5595) 2025-04-30 23:24:06 -07:00
Dhruwang Jariwala
8bdb818995 fix: server side checks for file upload (#5566)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-04-30 16:24:54 +00:00
Anshuman Pandey
20466c3800 fix: fixes js-core expiresAt check (#5591) 2025-04-30 14:44:22 +00:00
Dhruwang
bc5d048c39 fix 2025-04-30 19:29:10 +05:30
Dhruwang
f236047438 fix 2025-04-30 19:25:34 +05:30
Dhruwang
beb7ed0f3f fix redirect 2025-04-30 19:20:57 +05:30
Dhruwang
184bcd12c9 fix test 2025-04-30 19:17:15 +05:30
Dhruwang
a21911b777 sonarqube fixes 2025-04-30 19:15:24 +05:30
Dhruwang
c1df575b83 removed unrelated changes 2025-04-30 18:22:35 +05:30
Dhruwang
c6dba4454f fix 2025-04-30 18:16:39 +05:30
Dhruwang
81c7b54eae restored changes 2025-04-30 16:28:20 +05:30
Dhruwang
f0c2d75a4b fix 2025-04-30 16:07:21 +05:30
Dhruwang
44feb59cfc fix build 2025-04-30 16:03:51 +05:30
Dhruwang
3a4885c459 fix build 2025-04-30 16:00:54 +05:30
Dhruwang
6076ddd8c8 fix build 2025-04-30 15:58:12 +05:30
Dhruwang
f96530fef5 fix 2025-04-30 15:54:23 +05:30
Dhruwang
3c22bd3ccb fix build 2025-04-30 15:51:19 +05:30
Dhruwang
d05f5b26f8 added verification step 2025-04-30 15:41:47 +05:30
Dhruwang
3765e0da54 fix build 2025-04-30 14:43:47 +05:30
Dhruwang
9eea429b44 fix copy syntax 2025-04-30 14:40:42 +05:30
Dhruwang
a05a391080 fix location 2025-04-30 14:37:55 +05:30
Dhruwang
d10da85ac0 fix 2025-04-30 14:34:15 +05:30
Dhruwang
19ea25d483 fix build 2025-04-30 14:31:07 +05:30
Dhruwang
60e26a9ada fix build 2025-04-30 14:24:02 +05:30
Dhruwang
579351cdcd custom versions 2025-04-30 14:19:28 +05:30
Dhruwang
2dbc9559d5 fix build 2025-04-30 10:56:39 +05:30
Dhruwang
fdd84f84a5 fix build 2025-04-30 10:41:23 +05:30
Dhruwang
6bfc54b43c fix 2025-04-30 10:27:43 +05:30
Dhruwang
d18003507e updated openssl 2025-04-30 10:04:58 +05:30
Dhruwang
777485e63d fix alpine version 2025-04-30 09:43:06 +05:30
Dhruwang
0471a0f0c3 Merge branch 'main' of https://github.com/formbricks/formbricks into docker-package-version-update 2025-04-30 09:35:48 +05:30
Dhruwang
6290c6020d manual setup for libxml2 2025-04-30 09:35:43 +05:30
Matti Nannt
faf6c2d062 chore: migrate react-native to its own repo (#5583) 2025-04-29 22:25:15 +02:00
Piyush Jain
a760a3c341 chore(infra): update karpenter nodepool and add tailscale (#5582) 2025-04-29 18:29:33 +00:00
Matti Nannt
94e6d2f215 chore: remove unused dependencies (#5562) 2025-04-29 17:56:24 +02:00
victorvhs017
a6f1c0f63d chore: Added tests to modules/ee/contacts/segment (#5505)
Co-authored-by: Piyush Gupta <piyushguptaa2z123@gmail.com>
2025-04-29 12:55:35 +00:00
Dhruwang Jariwala
c653996cbb chore: introduced env variable to disable User management UI (#5526) 2025-04-29 11:58:58 +00:00
Dhruwang Jariwala
da44fef89d fix: tolgee config (#5567) 2025-04-29 10:56:37 +00:00
Piyush Jain
4dc2c5e3df chore(networking): add vpc CIDR blocks on database and cluster (#5569) 2025-04-29 08:51:11 +00:00
Piyush Gupta
1797c2ae20 fix: matrix question logic condition text (#5570) 2025-04-29 08:38:26 +00:00
Piyush Jain
3b5da01c0a chore(staging): add release for staging env in formbricks-stage ns (#5486)
Co-authored-by: Matti Nannt <mail@matthiasnannt.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2025-04-29 07:30:28 +00:00
Piyush Gupta
0f1bdce002 feat: advanced matrix question logic (#5408)
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-04-29 05:47:05 +00:00
Matti Nannt
7c8f3e826f chore: relocate locales to apps/web (#5564) 2025-04-28 23:21:48 +02:00
Matti Nannt
f21d63bb55 chore: remove unused changeset action (#5563) 2025-04-28 22:46:05 +02:00
Matti Nannt
f223bb3d3f chore: remove unused langfuse packages (#5561) 2025-04-28 22:00:34 +02:00
Matti Nannt
51001d07b6 chore: remove old AI classification feature (#5529)
Co-authored-by: Victor Santos <victor@formbricks.com>
2025-04-28 19:18:07 +00:00
Jakob Schott
a9eedd3c7a fix: Editing active surveys (#5015)
Co-authored-by: Piyush Gupta <piyushguptaa2z123@gmail.com>
2025-04-28 14:50:25 +00:00
dependabot[bot]
b0aa08fe4e chore(deps): bump docker/login-action from 3.3.0 to 3.4.0 (#5269)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-28 13:12:38 +02:00
Matti Nannt
8d45d24d55 fix: error should not be thrown if SMTP is unconfigured (#5524) 2025-04-28 09:48:14 +02:00
victorvhs017
8c1b9f81b9 chore: Added the tests to file upload summary (#5504)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-04-28 07:00:46 +00:00
Harsh Bhat
71fad1c22b docs: add swift sdk docs (#5423) 2025-04-27 23:35:21 -07:00
Harsh Bhat
292266c597 docs: add more inbound links (#5424) 2025-04-27 23:35:07 -07:00
Anshuman Pandey
54e589a6a0 fix: surveys package X button hover and modal bg fix (#5518) 2025-04-27 21:44:10 -07:00
Gulshan Kumar
fb3f425c27 fix: Enhances ux in input box in login-page (#5509) 2025-04-27 20:53:55 -07:00
Jakob Schott
1aaa30c6e9 fix: empty headlines; useage of Error as variable and excluded… (#5491)
Co-authored-by: Victor Santos <victor@formbricks.com>
2025-04-25 14:12:31 +00:00
Dhruwang Jariwala
8611410b21 chore: refactored templates file (#5492)
Co-authored-by: Victor Santos <victor@formbricks.com>
2025-04-25 13:34:31 +00:00
Dhruwang
304db65c66 fix 2025-04-25 11:00:57 +05:30
Dhruwang
1f979c91d3 fix 2025-04-25 10:55:57 +05:30
Dhruwang
3f532b859c added libxml2 version 2025-04-25 10:39:41 +05:30
Dhruwang
05043b1762 custom package versions 2025-04-25 10:22:56 +05:30
Dhruwang
6c724a0b1b updated base image 2025-04-24 17:18:31 +05:30
Dhruwang
f185ff85c5 updated alpine version 2025-04-24 17:09:28 +05:30
456 changed files with 32650 additions and 42294 deletions

View File

@@ -120,6 +120,10 @@ IMPRINT_ADDRESS=
# TURNSTILE_SITE_KEY=
# TURNSTILE_SECRET_KEY=
# Google reCAPTCHA v3 keys
RECAPTCHA_SITE_KEY=
RECAPTCHA_SECRET_KEY=
# Configure Github Login
GITHUB_ID=
GITHUB_SECRET=
@@ -206,12 +210,6 @@ UNKEY_ROOT_KEY=
# Disable custom cache handler if necessary (e.g. if deployed on Vercel)
# CUSTOM_CACHE_DISABLED=1
# Azure AI settings
# AI_AZURE_RESSOURCE_NAME=
# AI_AZURE_API_KEY=
# AI_AZURE_EMBEDDINGS_DEPLOYMENT_ID=
# AI_AZURE_LLM_DEPLOYMENT_ID=
# INTERCOM_APP_ID=
# INTERCOM_SECRET_KEY=
@@ -224,3 +222,6 @@ UNKEY_ROOT_KEY=
# The SENTRY_AUTH_TOKEN variable is picked up by the Sentry Build Plugin.
# It's used automatically by Sentry during the build for authentication when uploading source maps.
# SENTRY_AUTH_TOKEN=
# Disable the user management from UI
# DISABLE_USER_MANAGEMENT

View File

@@ -2,6 +2,7 @@
When generating test files inside the "/app/web" path, follow these rules:
- You are an experienced senior software engineer
- Use vitest
- Ensure 100% code coverage
- Add as few comments as possible
@@ -10,8 +11,10 @@ When generating test files inside the "/app/web" path, follow these rules:
- Follow the same test pattern used for other files in the package where the file is located
- All imports should be at the top of the file, not inside individual tests
- For mocking inside "test" blocks use "vi.mocked"
- Add the original file path to the "test.coverage.include"array in the "apps/web/vite.config.mts" file
- Add the original file path to the "test.coverage.include"array in the "apps/web/vite.config.mts" file. Do this only when the test file is created.
- Don't mock functions that are already mocked in the "apps/web/vitestSetup.ts" file
- When using "screen.getByText" check for the tolgee string if it is being used in the file.
- When mocking data check if the properties added are part of the type of the object being mocked. Don't add properties that are not part of the type.
If it's a test for a ".tsx" file, follow these extra instructions:
@@ -21,6 +24,7 @@ afterEach(() => {
cleanup();
});
- the "afterEach" function should only have "cleanup()" inside it and should be adde to the "vitest" imports
- The "afterEach" function should only have the "cleanup()" line inside it and should be adde to the "vitest" imports.
- For click events, import userEvent from "@testing-library/user-event"
- Mock other components that can make the text more complex and but at the same time mocking it wouldn't make the test flaky. It's ok to leave basic and simple components.
- You don't need to mock @tolgee/react

View File

@@ -19,7 +19,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -13,11 +13,11 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/dangerous-git-checkout
- name: Build & Cache Web Binaries

View File

@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -17,11 +17,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: 'Checkout Repository'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: 'Dependency Review'
uses: actions/dependency-review-action@3b139cfc5fae8b618d3eae3675e383bb1769c019 # v4.5.0
uses: actions/dependency-review-action@ce3cf9537a52e8119d91fd484ab5b8a807627bf8 # v4.6.0

View File

@@ -12,6 +12,13 @@ on:
required: false
type: string
default: 'ghcr.io/formbricks/formbricks'
ENVIRONMENT:
description: 'The environment to deploy to'
required: true
type: choice
options:
- stage
- prod
workflow_call:
inputs:
VERSION:
@@ -23,6 +30,10 @@ on:
required: false
type: string
default: 'ghcr.io/formbricks/formbricks'
ENVIRONMENT:
description: 'The environment to deploy to'
required: true
type: string
permissions:
id-token: write
@@ -33,7 +44,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v4.2.2
- name: Tailscale
uses: tailscale/github-action@v3
with:
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
tags: tag:github
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
@@ -48,6 +66,8 @@ jobs:
AWS_REGION: eu-central-1
- uses: helmfile/helmfile-action@v2
name: Deploy Formbricks Cloud Prod
if: (github.event_name == 'workflow_call' || github.event_name == 'workflow_dispatch') && github.event.inputs.ENVIRONMENT == 'prod'
env:
VERSION: ${{ inputs.VERSION }}
REPOSITORY: ${{ inputs.REPOSITORY }}
@@ -58,7 +78,23 @@ jobs:
helm-plugins: >
https://github.com/databus23/helm-diff,
https://github.com/jkroepke/helm-secrets
helmfile-args: apply
helmfile-args: apply -l environment=prod
helmfile-auto-init: "false"
helmfile-workdirectory: infra/formbricks-cloud-helm
- uses: helmfile/helmfile-action@v2
name: Deploy Formbricks Cloud Stage
if: github.event_name == 'workflow_dispatch' && github.event.inputs.ENVIRONMENT == 'stage'
env:
VERSION: ${{ inputs.VERSION }}
REPOSITORY: ${{ inputs.REPOSITORY }}
FORMBRICKS_INGRESS_CERT_ARN: ${{ secrets.STAGE_FORMBRICKS_INGRESS_CERT_ARN }}
FORMBRICKS_ROLE_ARN: ${{ secrets.STAGE_FORMBRICKS_ROLE_ARN }}
with:
helm-plugins: >
https://github.com/databus23/helm-diff,
https://github.com/jkroepke/helm-secrets
helmfile-args: apply -l environment=stage
helmfile-auto-init: "false"
helmfile-workdirectory: infra/formbricks-cloud-helm

View File

@@ -40,7 +40,7 @@ jobs:
steps:
- name: Checkout Repository
uses: actions/checkout@v3
uses: actions/checkout@v4.2.2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

View File

@@ -46,11 +46,11 @@ jobs:
--health-retries=5
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/dangerous-git-checkout
- name: Setup Node.js 20.x

View File

@@ -31,3 +31,4 @@ jobs:
- helm-chart-release
with:
VERSION: ${{ needs.docker-build.outputs.VERSION }}
ENVIRONMENT: "prod"

View File

@@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -13,7 +13,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -51,7 +51,7 @@ jobs:
statuses: write
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0
with:
egress-policy: audit
- name: fail if conditional jobs failed

View File

@@ -1,56 +0,0 @@
name: Release Changesets
on:
workflow_dispatch:
#push:
# branches:
# - main
permissions:
contents: write
pull-requests: write
packages: write
concurrency: ${{ github.workflow }}-${{ github.ref }}
env:
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}
TURBO_TEAM: ${{ secrets.TURBO_TEAM }}
jobs:
release:
name: Release
runs-on: ubuntu-latest
timeout-minutes: 15
env:
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}
TURBO_TEAM: ${{ secrets.TURBO_TEAM }}
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
with:
egress-policy: audit
- name: Checkout Repo
uses: actions/checkout@ee0669bd1cc54295c223e0bb666b733df41de1c5 # v2.7.0
- name: Setup Node.js 18.x
uses: actions/setup-node@7c12f8017d5436eb855f1ed4399f037a36fbd9e8 # v2.5.2
with:
node-version: 18.x
- name: Install pnpm
uses: pnpm/action-setup@c3b53f6a16e57305370b4ae5a540c2077a1d50dd # v2.2.4
- name: Install Dependencies
run: pnpm install --config.platform=linux --config.architecture=x64
- name: Create Release Pull Request or Publish to npm
id: changesets
uses: changesets/action@c8bada60c408975afd1a20b3db81d6eee6789308 # v1.4.9
with:
# This expects you to have a script called release which does a build for your packages and calls changeset publish
publish: pnpm release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}

View File

@@ -31,12 +31,12 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Depot CLI
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
@@ -45,13 +45,13 @@ jobs:
# https://github.com/sigstore/cosign-installer
- name: Install cosign
if: github.event_name != 'pull_request'
uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
# Login against a Docker registry except on PR
# https://github.com/docker/login-action
- name: Log into registry ${{ env.REGISTRY }}
if: github.event_name != 'pull_request'
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}

View File

@@ -38,12 +38,12 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Get Release Tag
id: extract_release_tag
@@ -65,13 +65,13 @@ jobs:
# https://github.com/sigstore/cosign-installer
- name: Install cosign
if: github.event_name != 'pull_request'
uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
# Login against a Docker registry except on PR
# https://github.com/docker/login-action
- name: Log into registry ${{ env.REGISTRY }}
if: github.event_name != 'pull_request'
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}

View File

@@ -19,7 +19,7 @@ jobs:
contents: read
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -35,12 +35,12 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: "Checkout code"
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false

View File

@@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -26,13 +26,20 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Tailscale
uses: tailscale/github-action@v3
with:
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
tags: tag:github
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2
with:

View File

@@ -14,11 +14,11 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/dangerous-git-checkout
- name: Setup Node.js 20.x

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -16,7 +16,7 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -18,7 +18,7 @@ jobs:
if: github.event.action == 'opened'
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit

View File

@@ -16,6 +16,6 @@ if [ -f branch.json ]; then
echo "Skipping tolgee-pull: NEXT_PUBLIC_TOLGEE_API_KEY is not set"
else
pnpm run tolgee-pull
git add packages/lib/messages
git add apps/web/locales
fi
fi

View File

@@ -4,33 +4,33 @@
"patterns": ["./apps/web/**/*.ts?(x)"],
"projectId": 10304,
"pull": {
"path": "./packages/lib/messages"
"path": "./apps/web/locales"
},
"push": {
"files": [
{
"language": "en-US",
"path": "./packages/lib/messages/en-US.json"
"path": "./apps/web/locales/en-US.json"
},
{
"language": "de-DE",
"path": "./packages/lib/messages/de-DE.json"
"path": "./apps/web/locales/de-DE.json"
},
{
"language": "fr-FR",
"path": "./packages/lib/messages/fr-FR.json"
"path": "./apps/web/locales/fr-FR.json"
},
{
"language": "pt-BR",
"path": "./packages/lib/messages/pt-BR.json"
"path": "./apps/web/locales/pt-BR.json"
},
{
"language": "zh-Hant-TW",
"path": "./packages/lib/messages/zh-Hant-TW.json"
"path": "./apps/web/locales/zh-Hant-TW.json"
},
{
"language": "pt-PT",
"path": "./packages/lib/messages/pt-PT.json"
"path": "./apps/web/locales/pt-PT.json"
}
],
"forceMode": "OVERRIDE"

View File

@@ -3,7 +3,7 @@ Copyright (c) 2024 Formbricks GmbH
Portions of this software are licensed as follows:
- All content that resides under the "apps/web/modules/ee" directory of this repository, if these directories exist, is licensed under the license defined in "apps/web/modules/ee/LICENSE".
- All content that resides under the "packages/js/", "packages/react-native/", "packages/android/", "packages/ios/" and "packages/api/" directories of this repository, if that directories exist, is licensed under the "MIT" license as defined in the "LICENSE" files of these packages.
- All content that resides under the "packages/js/", "packages/android/", "packages/ios/" and "packages/api/" directories of this repository, if that directories exist, is licensed under the "MIT" license as defined in the "LICENSE" files of these packages.
- All third party components incorporated into the Formbricks Software are licensed under the original license provided by the owner of the applicable component.
- Content outside of the above mentioned directories or restrictions above is available under the "AGPLv3" license as defined below.

View File

@@ -1,2 +0,0 @@
EXPO_PUBLIC_APP_URL=http://192.168.0.197:3000
EXPO_PUBLIC_FORMBRICKS_ENVIRONMENT_ID=cm5p0cs7r000819182b32j0a1

View File

@@ -1,7 +0,0 @@
module.exports = {
extends: ["@formbricks/eslint-config/react.js"],
parserOptions: {
project: "tsconfig.json",
tsconfigRootDir: __dirname,
},
};

View File

@@ -1,35 +0,0 @@
# Learn more https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files
# dependencies
node_modules/
# Expo
.expo/
dist/
web-build/
# Native
*.orig.*
*.jks
*.p8
*.p12
*.key
*.mobileprovision
# Metro
.metro-health-check*
# debug
npm-debug.*
yarn-debug.*
yarn-error.*
# macOS
.DS_Store
*.pem
# local env files
.env*.local
# typescript
*.tsbuildinfo

View File

@@ -1,35 +0,0 @@
{
"expo": {
"android": {
"adaptiveIcon": {
"backgroundColor": "#ffffff",
"foregroundImage": "./assets/adaptive-icon.png"
}
},
"assetBundlePatterns": ["**/*"],
"icon": "./assets/icon.png",
"ios": {
"infoPlist": {
"NSCameraUsageDescription": "Take pictures for certain activities.",
"NSMicrophoneUsageDescription": "Need microphone access for recording videos.",
"NSPhotoLibraryUsageDescription": "Select pictures for certain activities."
},
"supportsTablet": true
},
"jsEngine": "hermes",
"name": "react-native-demo",
"newArchEnabled": true,
"orientation": "portrait",
"slug": "react-native-demo",
"splash": {
"backgroundColor": "#ffffff",
"image": "./assets/splash.png",
"resizeMode": "contain"
},
"userInterfaceStyle": "light",
"version": "1.0.0",
"web": {
"favicon": "./assets/favicon.png"
}
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 46 KiB

View File

@@ -1,6 +0,0 @@
module.exports = function babel(api) {
api.cache(true);
return {
presets: ["babel-preset-expo"],
};
};

View File

@@ -1,7 +0,0 @@
import { registerRootComponent } from "expo";
import { LogBox } from "react-native";
import App from "./src/app";
registerRootComponent(App);
LogBox.ignoreAllLogs();

View File

@@ -1,21 +0,0 @@
// Learn more https://docs.expo.io/guides/customizing-metro
const path = require("node:path");
const { getDefaultConfig } = require("expo/metro-config");
// Find the workspace root, this can be replaced with `find-yarn-workspace-root`
const workspaceRoot = path.resolve(__dirname, "../..");
const projectRoot = __dirname;
const config = getDefaultConfig(projectRoot);
// 1. Watch all files within the monorepo
config.watchFolders = [workspaceRoot];
// 2. Let Metro know where to resolve packages, and in what order
config.resolver.nodeModulesPaths = [
path.resolve(projectRoot, "node_modules"),
path.resolve(workspaceRoot, "node_modules"),
];
// 3. Force Metro to resolve (sub)dependencies only from the `nodeModulesPaths`
config.resolver.disableHierarchicalLookup = true;
module.exports = config;

View File

@@ -1,30 +0,0 @@
{
"name": "@formbricks/demo-react-native",
"version": "1.0.0",
"main": "./index.js",
"scripts": {
"dev": "expo start",
"android": "expo start --android",
"ios": "expo start --ios",
"web": "expo start --web",
"eject": "expo eject",
"clean": "rimraf .turbo node_modules .expo"
},
"dependencies": {
"@formbricks/js": "workspace:*",
"@formbricks/react-native": "workspace:*",
"@react-native-async-storage/async-storage": "2.1.0",
"expo": "52.0.28",
"expo-status-bar": "2.0.1",
"react": "18.3.1",
"react-dom": "18.3.1",
"react-native": "0.76.6",
"react-native-webview": "13.12.5"
},
"devDependencies": {
"@babel/core": "7.26.0",
"@types/react": "18.3.18",
"typescript": "5.7.2"
},
"private": true
}

View File

@@ -1,117 +0,0 @@
import { StatusBar } from "expo-status-bar";
import React, { type JSX } from "react";
import { Button, LogBox, StyleSheet, Text, View } from "react-native";
import Formbricks, {
logout,
setAttribute,
setAttributes,
setLanguage,
setUserId,
track,
} from "@formbricks/react-native";
LogBox.ignoreAllLogs();
export default function App(): JSX.Element {
if (!process.env.EXPO_PUBLIC_FORMBRICKS_ENVIRONMENT_ID) {
throw new Error("EXPO_PUBLIC_FORMBRICKS_ENVIRONMENT_ID is required");
}
if (!process.env.EXPO_PUBLIC_APP_URL) {
throw new Error("EXPO_PUBLIC_APP_URL is required");
}
return (
<View style={styles.container}>
<Text>Formbricks React Native SDK Demo</Text>
<View
style={{
display: "flex",
flexDirection: "column",
gap: 10,
}}>
<Button
title="Trigger Code Action"
onPress={() => {
track("code").catch((error: unknown) => {
// eslint-disable-next-line no-console -- logging is allowed in demo apps
console.error("Error tracking event:", error);
});
}}
/>
<Button
title="Set User Id"
onPress={() => {
setUserId("random-user-id").catch((error: unknown) => {
// eslint-disable-next-line no-console -- logging is allowed in demo apps
console.error("Error setting user id:", error);
});
}}
/>
<Button
title="Set User Attributess (multiple)"
onPress={() => {
setAttributes({
testAttr: "attr-test",
testAttr2: "attr-test-2",
testAttr3: "attr-test-3",
testAttr4: "attr-test-4",
}).catch((error: unknown) => {
// eslint-disable-next-line no-console -- logging is allowed in demo apps
console.error("Error setting user attributes:", error);
});
}}
/>
<Button
title="Set User Attributes (single)"
onPress={() => {
setAttribute("testSingleAttr", "testSingleAttr").catch((error: unknown) => {
// eslint-disable-next-line no-console -- logging is allowed in demo apps
console.error("Error setting user attributes:", error);
});
}}
/>
<Button
title="Logout"
onPress={() => {
logout().catch((error: unknown) => {
// eslint-disable-next-line no-console -- logging is allowed in demo apps
console.error("Error logging out:", error);
});
}}
/>
<Button
title="Set Language (de)"
onPress={() => {
setLanguage("de").catch((error: unknown) => {
// eslint-disable-next-line no-console -- logging is allowed in demo apps
console.error("Error setting language:", error);
});
}}
/>
</View>
<StatusBar style="auto" />
<Formbricks
appUrl={process.env.EXPO_PUBLIC_APP_URL as string}
environmentId={process.env.EXPO_PUBLIC_FORMBRICKS_ENVIRONMENT_ID as string}
/>
</View>
);
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: "#fff",
alignItems: "center",
justifyContent: "center",
},
});

View File

@@ -1,6 +0,0 @@
{
"compilerOptions": {
"strict": true
},
"extends": "expo/tsconfig.base"
}

View File

@@ -3,13 +3,13 @@ module.exports = {
ignorePatterns: ["**/package.json", "**/tsconfig.json"],
overrides: [
{
files: ["lib/messages/**/*.json"],
files: ["locales/*.json"],
plugins: ["i18n-json"],
rules: {
"i18n-json/identical-keys": [
"error",
{
filePath: require("path").join(__dirname, "messages", "en-US.json"),
filePath: require("path").join(__dirname, "locales", "en-US.json"),
checkExtraKeys: false,
checkMissingKeys: true,
},

View File

@@ -84,6 +84,12 @@ RUN apk add --no-cache curl \
&& addgroup -S nextjs \
&& adduser -S -u 1001 -G nextjs nextjs
# In the runner stage
RUN apk update && \
apk upgrade && \
# This explicitly removes old package versions
rm -rf /var/cache/apk/*
WORKDIR /home/nextjs
# Ensure no write permissions are assigned to the copied resources

View File

@@ -1,8 +1,13 @@
import { getDefaultEndingCard } from "@/app/lib/templates";
import {
buildCTAQuestion,
buildNPSQuestion,
buildOpenTextQuestion,
buildRatingQuestion,
getDefaultEndingCard,
} from "@/app/lib/survey-builder";
import { createId } from "@paralleldrive/cuid2";
import { TFnType } from "@tolgee/react";
import { logger } from "@formbricks/logger";
import { TSurveyQuestionTypeEnum } from "@formbricks/types/surveys/types";
import { TXMTemplate } from "@formbricks/types/templates";
export const getXMSurveyDefault = (t: TFnType): TXMTemplate => {
@@ -26,35 +31,26 @@ const npsSurvey = (t: TFnType): TXMTemplate => {
...getXMSurveyDefault(t),
name: t("templates.nps_survey_name"),
questions: [
{
id: createId(),
type: TSurveyQuestionTypeEnum.NPS,
headline: { default: t("templates.nps_survey_question_1_headline") },
buildNPSQuestion({
headline: t("templates.nps_survey_question_1_headline"),
required: true,
lowerLabel: { default: t("templates.nps_survey_question_1_lower_label") },
upperLabel: { default: t("templates.nps_survey_question_1_upper_label") },
lowerLabel: t("templates.nps_survey_question_1_lower_label"),
upperLabel: t("templates.nps_survey_question_1_upper_label"),
isColorCodingEnabled: true,
},
{
id: createId(),
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: t("templates.nps_survey_question_2_headline") },
t,
}),
buildOpenTextQuestion({
headline: t("templates.nps_survey_question_2_headline"),
required: false,
inputType: "text",
charLimit: {
enabled: false,
},
},
{
id: createId(),
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: t("templates.nps_survey_question_3_headline") },
t,
}),
buildOpenTextQuestion({
headline: t("templates.nps_survey_question_3_headline"),
required: false,
inputType: "text",
charLimit: {
enabled: false,
},
},
t,
}),
],
};
};
@@ -67,9 +63,8 @@ const starRatingSurvey = (t: TFnType): TXMTemplate => {
...defaultSurvey,
name: t("templates.star_rating_survey_name"),
questions: [
{
buildRatingQuestion({
id: reusableQuestionIds[0],
type: TSurveyQuestionTypeEnum.Rating,
logic: [
{
id: createId(),
@@ -102,16 +97,15 @@ const starRatingSurvey = (t: TFnType): TXMTemplate => {
],
range: 5,
scale: "number",
headline: { default: t("templates.star_rating_survey_question_1_headline") },
headline: t("templates.star_rating_survey_question_1_headline"),
required: true,
lowerLabel: { default: t("templates.star_rating_survey_question_1_lower_label") },
upperLabel: { default: t("templates.star_rating_survey_question_1_upper_label") },
isColorCodingEnabled: false,
},
{
lowerLabel: t("templates.star_rating_survey_question_1_lower_label"),
upperLabel: t("templates.star_rating_survey_question_1_upper_label"),
t,
}),
buildCTAQuestion({
id: reusableQuestionIds[1],
html: { default: t("templates.star_rating_survey_question_2_html") },
type: TSurveyQuestionTypeEnum.CTA,
html: t("templates.star_rating_survey_question_2_html"),
logic: [
{
id: createId(),
@@ -138,25 +132,23 @@ const starRatingSurvey = (t: TFnType): TXMTemplate => {
],
},
],
headline: { default: t("templates.star_rating_survey_question_2_headline") },
headline: t("templates.star_rating_survey_question_2_headline"),
required: true,
buttonUrl: "https://formbricks.com/github",
buttonLabel: { default: t("templates.star_rating_survey_question_2_button_label") },
buttonLabel: t("templates.star_rating_survey_question_2_button_label"),
buttonExternal: true,
},
{
t,
}),
buildOpenTextQuestion({
id: reusableQuestionIds[2],
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: t("templates.star_rating_survey_question_3_headline") },
headline: t("templates.star_rating_survey_question_3_headline"),
required: true,
subheader: { default: t("templates.star_rating_survey_question_3_subheader") },
buttonLabel: { default: t("templates.star_rating_survey_question_3_button_label") },
placeholder: { default: t("templates.star_rating_survey_question_3_placeholder") },
subheader: t("templates.star_rating_survey_question_3_subheader"),
buttonLabel: t("templates.star_rating_survey_question_3_button_label"),
placeholder: t("templates.star_rating_survey_question_3_placeholder"),
inputType: "text",
charLimit: {
enabled: false,
},
},
t,
}),
],
};
};
@@ -169,9 +161,8 @@ const csatSurvey = (t: TFnType): TXMTemplate => {
...defaultSurvey,
name: t("templates.csat_survey_name"),
questions: [
{
buildRatingQuestion({
id: reusableQuestionIds[0],
type: TSurveyQuestionTypeEnum.Rating,
logic: [
{
id: createId(),
@@ -204,15 +195,14 @@ const csatSurvey = (t: TFnType): TXMTemplate => {
],
range: 5,
scale: "smiley",
headline: { default: t("templates.csat_survey_question_1_headline") },
headline: t("templates.csat_survey_question_1_headline"),
required: true,
lowerLabel: { default: t("templates.csat_survey_question_1_lower_label") },
upperLabel: { default: t("templates.csat_survey_question_1_upper_label") },
isColorCodingEnabled: false,
},
{
lowerLabel: t("templates.csat_survey_question_1_lower_label"),
upperLabel: t("templates.csat_survey_question_1_upper_label"),
t,
}),
buildOpenTextQuestion({
id: reusableQuestionIds[1],
type: TSurveyQuestionTypeEnum.OpenText,
logic: [
{
id: createId(),
@@ -239,25 +229,20 @@ const csatSurvey = (t: TFnType): TXMTemplate => {
],
},
],
headline: { default: t("templates.csat_survey_question_2_headline") },
headline: t("templates.csat_survey_question_2_headline"),
required: false,
placeholder: { default: t("templates.csat_survey_question_2_placeholder") },
placeholder: t("templates.csat_survey_question_2_placeholder"),
inputType: "text",
charLimit: {
enabled: false,
},
},
{
t,
}),
buildOpenTextQuestion({
id: reusableQuestionIds[2],
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: t("templates.csat_survey_question_3_headline") },
headline: t("templates.csat_survey_question_3_headline"),
required: false,
placeholder: { default: t("templates.csat_survey_question_3_placeholder") },
placeholder: t("templates.csat_survey_question_3_placeholder"),
inputType: "text",
charLimit: {
enabled: false,
},
},
t,
}),
],
};
};
@@ -267,28 +252,22 @@ const cessSurvey = (t: TFnType): TXMTemplate => {
...getXMSurveyDefault(t),
name: t("templates.cess_survey_name"),
questions: [
{
id: createId(),
type: TSurveyQuestionTypeEnum.Rating,
buildRatingQuestion({
range: 5,
scale: "number",
headline: { default: t("templates.cess_survey_question_1_headline") },
headline: t("templates.cess_survey_question_1_headline"),
required: true,
lowerLabel: { default: t("templates.cess_survey_question_1_lower_label") },
upperLabel: { default: t("templates.cess_survey_question_1_upper_label") },
isColorCodingEnabled: false,
},
{
id: createId(),
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: t("templates.cess_survey_question_2_headline") },
lowerLabel: t("templates.cess_survey_question_1_lower_label"),
upperLabel: t("templates.cess_survey_question_1_upper_label"),
t,
}),
buildOpenTextQuestion({
headline: t("templates.cess_survey_question_2_headline"),
required: true,
placeholder: { default: t("templates.cess_survey_question_2_placeholder") },
placeholder: t("templates.cess_survey_question_2_placeholder"),
inputType: "text",
charLimit: {
enabled: false,
},
},
t,
}),
],
};
};
@@ -301,9 +280,8 @@ const smileysRatingSurvey = (t: TFnType): TXMTemplate => {
...defaultSurvey,
name: t("templates.smileys_survey_name"),
questions: [
{
buildRatingQuestion({
id: reusableQuestionIds[0],
type: TSurveyQuestionTypeEnum.Rating,
logic: [
{
id: createId(),
@@ -336,16 +314,15 @@ const smileysRatingSurvey = (t: TFnType): TXMTemplate => {
],
range: 5,
scale: "smiley",
headline: { default: t("templates.smileys_survey_question_1_headline") },
headline: t("templates.smileys_survey_question_1_headline"),
required: true,
lowerLabel: { default: t("templates.smileys_survey_question_1_lower_label") },
upperLabel: { default: t("templates.smileys_survey_question_1_upper_label") },
isColorCodingEnabled: false,
},
{
lowerLabel: t("templates.smileys_survey_question_1_lower_label"),
upperLabel: t("templates.smileys_survey_question_1_upper_label"),
t,
}),
buildCTAQuestion({
id: reusableQuestionIds[1],
html: { default: t("templates.smileys_survey_question_2_html") },
type: TSurveyQuestionTypeEnum.CTA,
html: t("templates.smileys_survey_question_2_html"),
logic: [
{
id: createId(),
@@ -372,25 +349,23 @@ const smileysRatingSurvey = (t: TFnType): TXMTemplate => {
],
},
],
headline: { default: t("templates.smileys_survey_question_2_headline") },
headline: t("templates.smileys_survey_question_2_headline"),
required: true,
buttonUrl: "https://formbricks.com/github",
buttonLabel: { default: t("templates.smileys_survey_question_2_button_label") },
buttonLabel: t("templates.smileys_survey_question_2_button_label"),
buttonExternal: true,
},
{
t,
}),
buildOpenTextQuestion({
id: reusableQuestionIds[2],
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: t("templates.smileys_survey_question_3_headline") },
headline: t("templates.smileys_survey_question_3_headline"),
required: true,
subheader: { default: t("templates.smileys_survey_question_3_subheader") },
buttonLabel: { default: t("templates.smileys_survey_question_3_button_label") },
placeholder: { default: t("templates.smileys_survey_question_3_placeholder") },
subheader: t("templates.smileys_survey_question_3_subheader"),
buttonLabel: t("templates.smileys_survey_question_3_button_label"),
placeholder: t("templates.smileys_survey_question_3_placeholder"),
inputType: "text",
charLimit: {
enabled: false,
},
},
t,
}),
],
};
};
@@ -400,37 +375,26 @@ const enpsSurvey = (t: TFnType): TXMTemplate => {
...getXMSurveyDefault(t),
name: t("templates.enps_survey_name"),
questions: [
{
id: createId(),
type: TSurveyQuestionTypeEnum.NPS,
headline: {
default: t("templates.enps_survey_question_1_headline"),
},
buildNPSQuestion({
headline: t("templates.enps_survey_question_1_headline"),
required: false,
lowerLabel: { default: t("templates.enps_survey_question_1_lower_label") },
upperLabel: { default: t("templates.enps_survey_question_1_upper_label") },
lowerLabel: t("templates.enps_survey_question_1_lower_label"),
upperLabel: t("templates.enps_survey_question_1_upper_label"),
isColorCodingEnabled: true,
},
{
id: createId(),
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: t("templates.enps_survey_question_2_headline") },
t,
}),
buildOpenTextQuestion({
headline: t("templates.enps_survey_question_2_headline"),
required: false,
inputType: "text",
charLimit: {
enabled: false,
},
},
{
id: createId(),
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: t("templates.enps_survey_question_3_headline") },
t,
}),
buildOpenTextQuestion({
headline: t("templates.enps_survey_question_3_headline"),
required: false,
inputType: "text",
charLimit: {
enabled: false,
},
},
t,
}),
],
};
};

View File

@@ -58,11 +58,6 @@ const Page = async (props) => {
comingSoon: false,
onRequest: false,
},
{
title: t("environments.settings.enterprise.ai"),
comingSoon: false,
onRequest: true,
},
{
title: t("environments.settings.enterprise.audit_logs"),
comingSoon: false,

View File

@@ -1,96 +0,0 @@
"use client";
import { getFormattedErrorMessage } from "@/lib/utils/helper";
import { updateOrganizationAIEnabledAction } from "@/modules/ee/insights/actions";
import { Alert, AlertDescription } from "@/modules/ui/components/alert";
import { Label } from "@/modules/ui/components/label";
import { Switch } from "@/modules/ui/components/switch";
import { useTranslate } from "@tolgee/react";
import Link from "next/link";
import { useState } from "react";
import toast from "react-hot-toast";
import { TOrganization } from "@formbricks/types/organizations";
interface AIToggleProps {
environmentId: string;
organization: TOrganization;
isOwnerOrManager: boolean;
}
export const AIToggle = ({ organization, isOwnerOrManager }: AIToggleProps) => {
const { t } = useTranslate();
const [isAIEnabled, setIsAIEnabled] = useState(organization.isAIEnabled);
const [isSubmitting, setIsSubmitting] = useState(false);
const handleUpdateOrganization = async (data) => {
try {
setIsAIEnabled(data.enabled);
setIsSubmitting(true);
const updatedOrganizationResponse = await updateOrganizationAIEnabledAction({
organizationId: organization.id,
data: {
isAIEnabled: data.enabled,
},
});
if (updatedOrganizationResponse?.data) {
if (data.enabled) {
toast.success(t("environments.settings.general.formbricks_ai_enable_success_message"));
} else {
toast.success(t("environments.settings.general.formbricks_ai_disable_success_message"));
}
} else {
const errorMessage = getFormattedErrorMessage(updatedOrganizationResponse);
toast.error(errorMessage);
}
} catch (err) {
toast.error(`Error: ${err.message}`);
} finally {
setIsSubmitting(false);
if (typeof window !== "undefined") {
setTimeout(() => {
window.location.reload();
}, 500);
}
}
};
return (
<>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-2">
<Label htmlFor="formbricks-ai-toggle" className="cursor-pointer">
{t("environments.settings.general.enable_formbricks_ai")}
</Label>
<Switch
id="formbricks-ai-toggle"
disabled={!isOwnerOrManager || isSubmitting}
checked={isAIEnabled}
onClick={(e) => {
e.stopPropagation();
handleUpdateOrganization({ enabled: !organization.isAIEnabled });
}}
/>
</div>
<div className="mt-3 text-xs text-slate-600">
{t("environments.settings.general.formbricks_ai_privacy_policy_text")}{" "}
<Link
className="underline"
href={"https://formbricks.com/privacy-policy"}
rel="noreferrer"
target="_blank">
{t("common.privacy_policy")}
</Link>
.
</div>
</div>
{!isOwnerOrManager && (
<Alert variant="warning" className="mt-4">
<AlertDescription>
{t("environments.settings.general.only_org_owner_can_perform_action")}
</AlertDescription>
</Alert>
)}
</>
);
};

View File

@@ -1,9 +1,5 @@
import { getUser } from "@/lib/user/service";
import {
getIsMultiOrgEnabled,
getIsOrganizationAIReady,
getWhiteLabelPermission,
} from "@/modules/ee/license-check/lib/utils";
import { getIsMultiOrgEnabled, getWhiteLabelPermission } from "@/modules/ee/license-check/lib/utils";
import { getEnvironmentAuth } from "@/modules/environments/lib/utils";
import { TEnvironmentAuth } from "@/modules/environments/types/environment-auth";
import { getTranslate } from "@/tolgee/server";
@@ -33,12 +29,6 @@ vi.mock("@/lib/constants", () => ({
WEBAPP_URL: "mock-webapp-url",
SMTP_HOST: "mock-smtp-host",
SMTP_PORT: "mock-smtp-port",
AI_AZURE_LLM_RESSOURCE_NAME: "mock-ai-azure-llm-ressource-name",
AI_AZURE_LLM_API_KEY: "mock-ai",
AI_AZURE_LLM_DEPLOYMENT_ID: "mock-ai-azure-llm-deployment-id",
AI_AZURE_EMBEDDINGS_RESSOURCE_NAME: "mock-ai-azure-embeddings-ressource-name",
AI_AZURE_EMBEDDINGS_API_KEY: "mock-ai-azure-embeddings-api-key",
AI_AZURE_EMBEDDINGS_DEPLOYMENT_ID: "mock-ai-azure-embeddings-deployment-id",
}));
vi.mock("next-auth", () => ({
@@ -59,7 +49,6 @@ vi.mock("@/modules/environments/lib/utils", () => ({
vi.mock("@/modules/ee/license-check/lib/utils", () => ({
getIsMultiOrgEnabled: vi.fn(),
getIsOrganizationAIReady: vi.fn(),
getWhiteLabelPermission: vi.fn(),
}));
@@ -80,7 +69,6 @@ describe("Page", () => {
vi.mocked(getUser).mockResolvedValue(mockUser);
vi.mocked(getEnvironmentAuth).mockResolvedValue(mockEnvironmentAuth);
vi.mocked(getIsMultiOrgEnabled).mockResolvedValue(true);
vi.mocked(getIsOrganizationAIReady).mockResolvedValue(true);
vi.mocked(getWhiteLabelPermission).mockResolvedValue(true);
});

View File

@@ -1,12 +1,7 @@
import { OrganizationSettingsNavbar } from "@/app/(app)/environments/[environmentId]/settings/(organization)/components/OrganizationSettingsNavbar";
import { AIToggle } from "@/app/(app)/environments/[environmentId]/settings/(organization)/general/components/AIToggle";
import { FB_LOGO_URL, IS_FORMBRICKS_CLOUD } from "@/lib/constants";
import { getUser } from "@/lib/user/service";
import {
getIsMultiOrgEnabled,
getIsOrganizationAIReady,
getWhiteLabelPermission,
} from "@/modules/ee/license-check/lib/utils";
import { getIsMultiOrgEnabled, getWhiteLabelPermission } from "@/modules/ee/license-check/lib/utils";
import { EmailCustomizationSettings } from "@/modules/ee/whitelabel/email-customization/components/email-customization-settings";
import { getEnvironmentAuth } from "@/modules/environments/lib/utils";
import { PageContentWrapper } from "@/modules/ui/components/page-content-wrapper";
@@ -35,8 +30,6 @@ const Page = async (props: { params: Promise<{ environmentId: string }> }) => {
const isOwnerOrManager = isManager || isOwner;
const isOrganizationAIReady = await getIsOrganizationAIReady(organization.billing.plan);
return (
<PageContentWrapper>
<PageHeader pageTitle={t("environments.settings.general.organization_settings")}>
@@ -56,17 +49,6 @@ const Page = async (props: { params: Promise<{ environmentId: string }> }) => {
membershipRole={currentUserMembership?.role}
/>
</SettingsCard>
{isOrganizationAIReady && (
<SettingsCard
title={t("environments.settings.general.formbricks_ai")}
description={t("environments.settings.general.formbricks_ai_description")}>
<AIToggle
environmentId={params.environmentId}
organization={organization}
isOwnerOrManager={isOwnerOrManager}
/>
</SettingsCard>
)}
<EmailCustomizationSettings
organization={organization}
hasWhiteLabelPermission={hasWhiteLabelPermission}

View File

@@ -1,6 +1,5 @@
"use server";
import { generateInsightsForSurvey } from "@/app/api/(internal)/insights/lib/utils";
import { getResponseCountBySurveyId, getResponses } from "@/lib/response/service";
import { authenticatedActionClient } from "@/lib/utils/action-client";
import { checkAuthorizationUpdated } from "@/lib/utils/action-client-middleware";
@@ -108,31 +107,3 @@ export const getResponseCountAction = authenticatedActionClient
return getResponseCountBySurveyId(parsedInput.surveyId, parsedInput.filterCriteria);
});
const ZGenerateInsightsForSurveyAction = z.object({
surveyId: ZId,
});
export const generateInsightsForSurveyAction = authenticatedActionClient
.schema(ZGenerateInsightsForSurveyAction)
.action(async ({ ctx, parsedInput }) => {
await checkAuthorizationUpdated({
userId: ctx.user.id,
organizationId: await getOrganizationIdFromSurveyId(parsedInput.surveyId),
access: [
{
type: "organization",
schema: ZGenerateInsightsForSurveyAction,
data: parsedInput,
roles: ["owner", "manager"],
},
{
type: "projectTeam",
projectId: await getProjectIdFromSurveyId(parsedInput.surveyId),
minPermission: "readWrite",
},
],
});
generateInsightsForSurvey(parsedInput.surveyId);
});

View File

@@ -1,16 +1,13 @@
import { SurveyAnalysisNavigation } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/components/SurveyAnalysisNavigation";
import { ResponsePage } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/responses/components/ResponsePage";
import { EnableInsightsBanner } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/EnableInsightsBanner";
import { SurveyAnalysisCTA } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SurveyAnalysisCTA";
import { needsInsightsGeneration } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/lib/utils";
import { MAX_RESPONSES_FOR_INSIGHT_GENERATION, RESPONSES_PER_PAGE, WEBAPP_URL } from "@/lib/constants";
import { RESPONSES_PER_PAGE, WEBAPP_URL } from "@/lib/constants";
import { getSurveyDomain } from "@/lib/getSurveyUrl";
import { getResponseCountBySurveyId } from "@/lib/response/service";
import { getSurvey } from "@/lib/survey/service";
import { getTagsByEnvironmentId } from "@/lib/tag/service";
import { getUser } from "@/lib/user/service";
import { findMatchingLocale } from "@/lib/utils/locale";
import { getIsAIEnabled } from "@/modules/ee/license-check/lib/utils";
import { getEnvironmentAuth } from "@/modules/environments/lib/utils";
import { PageContentWrapper } from "@/modules/ui/components/page-content-wrapper";
import { PageHeader } from "@/modules/ui/components/page-header";
@@ -20,7 +17,7 @@ const Page = async (props) => {
const params = await props.params;
const t = await getTranslate();
const { session, environment, organization, isReadOnly } = await getEnvironmentAuth(params.environmentId);
const { session, environment, isReadOnly } = await getEnvironmentAuth(params.environmentId);
const survey = await getSurvey(params.surveyId);
@@ -38,11 +35,6 @@ const Page = async (props) => {
const totalResponseCount = await getResponseCountBySurveyId(params.surveyId);
const isAIEnabled = await getIsAIEnabled({
isAIEnabled: organization.isAIEnabled,
billing: organization.billing,
});
const shouldGenerateInsights = needsInsightsGeneration(survey);
const locale = await findMatchingLocale();
const surveyDomain = getSurveyDomain();
@@ -57,16 +49,9 @@ const Page = async (props) => {
isReadOnly={isReadOnly}
user={user}
surveyDomain={surveyDomain}
responseCount={totalResponseCount}
/>
}>
{isAIEnabled && shouldGenerateInsights && (
<EnableInsightsBanner
surveyId={survey.id}
surveyResponseCount={totalResponseCount}
maxResponseCount={MAX_RESPONSES_FOR_INSIGHT_GENERATION}
/>
)}
<SurveyAnalysisNavigation
environmentId={environment.id}
survey={survey}

View File

@@ -0,0 +1,154 @@
import { cleanup, render, screen } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestionSummaryAddress } from "@formbricks/types/surveys/types";
import { AddressSummary } from "./AddressSummary";
// Mock dependencies
vi.mock("@/lib/time", () => ({
timeSince: () => "2 hours ago",
}));
vi.mock("@/lib/utils/contact", () => ({
getContactIdentifier: () => "contact@example.com",
}));
vi.mock("@/modules/ui/components/avatars", () => ({
PersonAvatar: ({ personId }: { personId: string }) => <div data-testid="person-avatar">{personId}</div>,
}));
vi.mock("@/modules/ui/components/array-response", () => ({
ArrayResponse: ({ value }: { value: string[] }) => (
<div data-testid="array-response">{value.join(", ")}</div>
),
}));
vi.mock("./QuestionSummaryHeader", () => ({
QuestionSummaryHeader: () => <div data-testid="question-summary-header" />,
}));
describe("AddressSummary", () => {
afterEach(() => {
cleanup();
});
const environmentId = "env-123";
const survey = {} as TSurvey;
const locale = "en-US";
test("renders table headers correctly", () => {
const questionSummary = {
question: { id: "q1", headline: "Address Question" },
samples: [],
} as unknown as TSurveyQuestionSummaryAddress;
render(
<AddressSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByTestId("question-summary-header")).toBeInTheDocument();
expect(screen.getByText("common.user")).toBeInTheDocument();
expect(screen.getByText("common.response")).toBeInTheDocument();
expect(screen.getByText("common.time")).toBeInTheDocument();
});
test("renders contact information correctly", () => {
const questionSummary = {
question: { id: "q1", headline: "Address Question" },
samples: [
{
id: "response1",
value: ["123 Main St", "Apt 4", "New York", "NY", "10001"],
updatedAt: new Date().toISOString(),
contact: { id: "contact1" },
contactAttributes: { email: "user@example.com" },
},
],
} as unknown as TSurveyQuestionSummaryAddress;
render(
<AddressSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByTestId("person-avatar")).toHaveTextContent("contact1");
expect(screen.getByText("contact@example.com")).toBeInTheDocument();
expect(screen.getByTestId("array-response")).toHaveTextContent("123 Main St, Apt 4, New York, NY, 10001");
expect(screen.getByText("2 hours ago")).toBeInTheDocument();
// Check link to contact
const contactLink = screen.getByText("contact@example.com").closest("a");
expect(contactLink).toHaveAttribute("href", `/environments/${environmentId}/contacts/contact1`);
});
test("renders anonymous user when no contact is provided", () => {
const questionSummary = {
question: { id: "q1", headline: "Address Question" },
samples: [
{
id: "response2",
value: ["456 Oak St", "London", "UK"],
updatedAt: new Date().toISOString(),
contact: null,
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryAddress;
render(
<AddressSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByTestId("person-avatar")).toHaveTextContent("anonymous");
expect(screen.getByText("common.anonymous")).toBeInTheDocument();
expect(screen.getByTestId("array-response")).toHaveTextContent("456 Oak St, London, UK");
});
test("renders multiple responses correctly", () => {
const questionSummary = {
question: { id: "q1", headline: "Address Question" },
samples: [
{
id: "response1",
value: ["123 Main St", "New York"],
updatedAt: new Date().toISOString(),
contact: { id: "contact1" },
contactAttributes: {},
},
{
id: "response2",
value: ["456 Oak St", "London"],
updatedAt: new Date().toISOString(),
contact: { id: "contact2" },
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryAddress;
render(
<AddressSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getAllByTestId("person-avatar")).toHaveLength(2);
expect(screen.getAllByTestId("array-response")).toHaveLength(2);
expect(screen.getAllByText("2 hours ago")).toHaveLength(2);
});
});

View File

@@ -0,0 +1,89 @@
import { cleanup, render, screen } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestionSummaryCta } from "@formbricks/types/surveys/types";
import { CTASummary } from "./CTASummary";
vi.mock("@/modules/ui/components/progress-bar", () => ({
ProgressBar: ({ progress, barColor }: { progress: number; barColor: string }) => (
<div data-testid="progress-bar">{`${progress}-${barColor}`}</div>
),
}));
vi.mock("./QuestionSummaryHeader", () => ({
QuestionSummaryHeader: ({
additionalInfo,
}: {
showResponses: boolean;
additionalInfo: React.ReactNode;
}) => <div data-testid="question-summary-header">{additionalInfo}</div>,
}));
vi.mock("lucide-react", () => ({
InboxIcon: () => <div data-testid="inbox-icon" />,
}));
vi.mock("../lib/utils", () => ({
convertFloatToNDecimal: (value: number) => value.toFixed(2),
}));
describe("CTASummary", () => {
afterEach(() => {
cleanup();
});
const survey = {} as TSurvey;
test("renders with all metrics and required question", () => {
const questionSummary = {
question: { id: "q1", headline: "CTA Question", required: true },
impressionCount: 100,
clickCount: 25,
skipCount: 10,
ctr: { count: 25, percentage: 25 },
} as unknown as TSurveyQuestionSummaryCta;
render(<CTASummary questionSummary={questionSummary} survey={survey} />);
expect(screen.getByTestId("question-summary-header")).toBeInTheDocument();
expect(screen.getByText("100 common.impressions")).toBeInTheDocument();
// Use getAllByText instead of getByText for multiple matching elements
expect(screen.getAllByText("25 common.clicks")).toHaveLength(2);
expect(screen.queryByText("10 common.skips")).not.toBeInTheDocument(); // Should not show skips for required questions
// Check CTR section
expect(screen.getByText("CTR")).toBeInTheDocument();
expect(screen.getByText("25.00%")).toBeInTheDocument();
// Check progress bar
expect(screen.getByTestId("progress-bar")).toHaveTextContent("0.25-bg-brand-dark");
});
test("renders skip count for non-required questions", () => {
const questionSummary = {
question: { id: "q1", headline: "CTA Question", required: false },
impressionCount: 100,
clickCount: 20,
skipCount: 30,
ctr: { count: 20, percentage: 20 },
} as unknown as TSurveyQuestionSummaryCta;
render(<CTASummary questionSummary={questionSummary} survey={survey} />);
expect(screen.getByText("30 common.skips")).toBeInTheDocument();
});
test("renders singular form for count = 1", () => {
const questionSummary = {
question: { id: "q1", headline: "CTA Question", required: true },
impressionCount: 10,
clickCount: 1,
skipCount: 0,
ctr: { count: 1, percentage: 10 },
} as unknown as TSurveyQuestionSummaryCta;
render(<CTASummary questionSummary={questionSummary} survey={survey} />);
// Use getAllByText instead of getByText for multiple matching elements
expect(screen.getAllByText("1 common.click")).toHaveLength(1);
});
});

View File

@@ -0,0 +1,69 @@
import { cleanup, render, screen } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestionSummaryCal } from "@formbricks/types/surveys/types";
import { CalSummary } from "./CalSummary";
vi.mock("@/modules/ui/components/progress-bar", () => ({
ProgressBar: ({ progress, barColor }: { progress: number; barColor: string }) => (
<div data-testid="progress-bar">{`${progress}-${barColor}`}</div>
),
}));
vi.mock("./QuestionSummaryHeader", () => ({
QuestionSummaryHeader: () => <div data-testid="question-summary-header" />,
}));
vi.mock("../lib/utils", () => ({
convertFloatToNDecimal: (value: number) => value.toFixed(2),
}));
describe("CalSummary", () => {
afterEach(() => {
cleanup();
});
const environmentId = "env-123";
const survey = {} as TSurvey;
test("renders the correct components and data", () => {
const questionSummary = {
question: { id: "q1", headline: "Calendar Question" },
booked: { count: 5, percentage: 75 },
skipped: { count: 1, percentage: 25 },
} as unknown as TSurveyQuestionSummaryCal;
render(<CalSummary questionSummary={questionSummary} environmentId={environmentId} survey={survey} />);
expect(screen.getByTestId("question-summary-header")).toBeInTheDocument();
// Check if booked section is displayed
expect(screen.getByText("common.booked")).toBeInTheDocument();
expect(screen.getByText("75.00%")).toBeInTheDocument();
expect(screen.getByText("5 common.responses")).toBeInTheDocument();
// Check if skipped section is displayed
expect(screen.getByText("common.dismissed")).toBeInTheDocument();
expect(screen.getByText("25.00%")).toBeInTheDocument();
expect(screen.getByText("1 common.response")).toBeInTheDocument();
// Check progress bars
const progressBars = screen.getAllByTestId("progress-bar");
expect(progressBars).toHaveLength(2);
expect(progressBars[0]).toHaveTextContent("0.75-bg-brand-dark");
expect(progressBars[1]).toHaveTextContent("0.25-bg-brand-dark");
});
test("renders singular and plural response counts correctly", () => {
const questionSummary = {
question: { id: "q1", headline: "Calendar Question" },
booked: { count: 1, percentage: 50 },
skipped: { count: 1, percentage: 50 },
} as unknown as TSurveyQuestionSummaryCal;
render(<CalSummary questionSummary={questionSummary} environmentId={environmentId} survey={survey} />);
// Use getAllByText directly since we know there are multiple matching elements
const responseElements = screen.getAllByText("1 common.response");
expect(responseElements).toHaveLength(2);
});
});

View File

@@ -0,0 +1,153 @@
import { cleanup, render, screen } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestionSummaryContactInfo } from "@formbricks/types/surveys/types";
import { ContactInfoSummary } from "./ContactInfoSummary";
vi.mock("@/lib/time", () => ({
timeSince: () => "2 hours ago",
}));
vi.mock("@/lib/utils/contact", () => ({
getContactIdentifier: () => "contact@example.com",
}));
vi.mock("@/modules/ui/components/avatars", () => ({
PersonAvatar: ({ personId }: { personId: string }) => <div data-testid="person-avatar">{personId}</div>,
}));
vi.mock("@/modules/ui/components/array-response", () => ({
ArrayResponse: ({ value }: { value: string[] }) => (
<div data-testid="array-response">{value.join(", ")}</div>
),
}));
vi.mock("./QuestionSummaryHeader", () => ({
QuestionSummaryHeader: () => <div data-testid="question-summary-header" />,
}));
describe("ContactInfoSummary", () => {
afterEach(() => {
cleanup();
});
const environmentId = "env-123";
const survey = {} as TSurvey;
const locale = "en-US";
test("renders table headers correctly", () => {
const questionSummary = {
question: { id: "q1", headline: "Contact Info Question" },
samples: [],
} as unknown as TSurveyQuestionSummaryContactInfo;
render(
<ContactInfoSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByTestId("question-summary-header")).toBeInTheDocument();
expect(screen.getByText("common.user")).toBeInTheDocument();
expect(screen.getByText("common.response")).toBeInTheDocument();
expect(screen.getByText("common.time")).toBeInTheDocument();
});
test("renders contact information correctly", () => {
const questionSummary = {
question: { id: "q1", headline: "Contact Info Question" },
samples: [
{
id: "response1",
value: ["John Doe", "john@example.com", "+1234567890"],
updatedAt: new Date().toISOString(),
contact: { id: "contact1" },
contactAttributes: { email: "user@example.com" },
},
],
} as unknown as TSurveyQuestionSummaryContactInfo;
render(
<ContactInfoSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByTestId("person-avatar")).toHaveTextContent("contact1");
expect(screen.getByText("contact@example.com")).toBeInTheDocument();
expect(screen.getByTestId("array-response")).toHaveTextContent("John Doe, john@example.com, +1234567890");
expect(screen.getByText("2 hours ago")).toBeInTheDocument();
// Check link to contact
const contactLink = screen.getByText("contact@example.com").closest("a");
expect(contactLink).toHaveAttribute("href", `/environments/${environmentId}/contacts/contact1`);
});
test("renders anonymous user when no contact is provided", () => {
const questionSummary = {
question: { id: "q1", headline: "Contact Info Question" },
samples: [
{
id: "response2",
value: ["Anonymous User", "anonymous@example.com"],
updatedAt: new Date().toISOString(),
contact: null,
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryContactInfo;
render(
<ContactInfoSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByTestId("person-avatar")).toHaveTextContent("anonymous");
expect(screen.getByText("common.anonymous")).toBeInTheDocument();
expect(screen.getByTestId("array-response")).toHaveTextContent("Anonymous User, anonymous@example.com");
});
test("renders multiple responses correctly", () => {
const questionSummary = {
question: { id: "q1", headline: "Contact Info Question" },
samples: [
{
id: "response1",
value: ["John Doe", "john@example.com"],
updatedAt: new Date().toISOString(),
contact: { id: "contact1" },
contactAttributes: {},
},
{
id: "response2",
value: ["Jane Smith", "jane@example.com"],
updatedAt: new Date().toISOString(),
contact: { id: "contact2" },
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryContactInfo;
render(
<ContactInfoSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getAllByTestId("person-avatar")).toHaveLength(2);
expect(screen.getAllByTestId("array-response")).toHaveLength(2);
expect(screen.getAllByText("2 hours ago")).toHaveLength(2);
});
});

View File

@@ -0,0 +1,192 @@
import { cleanup, render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestionSummaryDate } from "@formbricks/types/surveys/types";
import { DateQuestionSummary } from "./DateQuestionSummary";
vi.mock("@/lib/time", () => ({
timeSince: () => "2 hours ago",
}));
vi.mock("@/lib/utils/contact", () => ({
getContactIdentifier: () => "contact@example.com",
}));
vi.mock("@/lib/utils/datetime", () => ({
formatDateWithOrdinal: (_: Date) => "January 1st, 2023",
}));
vi.mock("@/modules/ui/components/avatars", () => ({
PersonAvatar: ({ personId }: { personId: string }) => <div data-testid="person-avatar">{personId}</div>,
}));
vi.mock("@/modules/ui/components/button", () => ({
Button: ({ children, onClick }: { children: React.ReactNode; onClick: () => void }) => (
<button onClick={onClick} data-testid="load-more-button">
{children}
</button>
),
}));
vi.mock("next/link", () => ({
default: ({ children, href }: { children: React.ReactNode; href: string }) => (
<a href={href} data-testid="next-link">
{children}
</a>
),
}));
vi.mock("./QuestionSummaryHeader", () => ({
QuestionSummaryHeader: () => <div data-testid="question-summary-header" />,
}));
describe("DateQuestionSummary", () => {
afterEach(() => {
cleanup();
});
const environmentId = "env-123";
const survey = {} as TSurvey;
const locale = "en-US";
test("renders table headers correctly", () => {
const questionSummary = {
question: { id: "q1", headline: "Date Question" },
samples: [],
} as unknown as TSurveyQuestionSummaryDate;
render(
<DateQuestionSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByTestId("question-summary-header")).toBeInTheDocument();
expect(screen.getByText("common.user")).toBeInTheDocument();
expect(screen.getByText("common.response")).toBeInTheDocument();
expect(screen.getByText("common.time")).toBeInTheDocument();
});
test("renders date responses correctly", () => {
const questionSummary = {
question: { id: "q1", headline: "Date Question" },
samples: [
{
id: "response1",
value: "2023-01-01",
updatedAt: new Date().toISOString(),
contact: { id: "contact1" },
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryDate;
render(
<DateQuestionSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByText("January 1st, 2023")).toBeInTheDocument();
expect(screen.getByText("contact@example.com")).toBeInTheDocument();
expect(screen.getByText("2 hours ago")).toBeInTheDocument();
});
test("renders invalid dates with special message", () => {
const questionSummary = {
question: { id: "q1", headline: "Date Question" },
samples: [
{
id: "response1",
value: "invalid-date",
updatedAt: new Date().toISOString(),
contact: { id: "contact1" },
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryDate;
render(
<DateQuestionSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByText("common.invalid_date(invalid-date)")).toBeInTheDocument();
});
test("renders anonymous user when no contact is provided", () => {
const questionSummary = {
question: { id: "q1", headline: "Date Question" },
samples: [
{
id: "response1",
value: "2023-01-01",
updatedAt: new Date().toISOString(),
contact: null,
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryDate;
render(
<DateQuestionSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByText("common.anonymous")).toBeInTheDocument();
});
test("shows load more button when there are more responses and loads more on click", async () => {
const samples = Array.from({ length: 15 }, (_, i) => ({
id: `response${i}`,
value: "2023-01-01",
updatedAt: new Date().toISOString(),
contact: null,
contactAttributes: {},
}));
const questionSummary = {
question: { id: "q1", headline: "Date Question" },
samples,
} as unknown as TSurveyQuestionSummaryDate;
render(
<DateQuestionSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
// Initially 10 responses should be visible
expect(screen.getAllByText("January 1st, 2023")).toHaveLength(10);
// "Load More" button should be visible
const loadMoreButton = screen.getByTestId("load-more-button");
expect(loadMoreButton).toBeInTheDocument();
// Click "Load More"
await userEvent.click(loadMoreButton);
// Now all 15 responses should be visible
expect(screen.getAllByText("January 1st, 2023")).toHaveLength(15);
// "Load More" button should disappear
expect(screen.queryByTestId("load-more-button")).not.toBeInTheDocument();
});
});

View File

@@ -1,71 +0,0 @@
"use client";
import { generateInsightsForSurveyAction } from "@/modules/ee/insights/actions";
import { Alert, AlertDescription, AlertTitle } from "@/modules/ui/components/alert";
import { Badge } from "@/modules/ui/components/badge";
import { Button } from "@/modules/ui/components/button";
import { TooltipRenderer } from "@/modules/ui/components/tooltip";
import { useTranslate } from "@tolgee/react";
import { SparklesIcon } from "lucide-react";
import { useState } from "react";
import toast from "react-hot-toast";
interface EnableInsightsBannerProps {
surveyId: string;
maxResponseCount: number;
surveyResponseCount: number;
}
export const EnableInsightsBanner = ({
surveyId,
surveyResponseCount,
maxResponseCount,
}: EnableInsightsBannerProps) => {
const { t } = useTranslate();
const [isGeneratingInsights, setIsGeneratingInsights] = useState(false);
const handleInsightGeneration = async () => {
toast.success("Generating insights for this survey. Please check back in a few minutes.", {
duration: 3000,
});
setIsGeneratingInsights(true);
toast.success(t("environments.surveys.summary.enable_ai_insights_banner_success"));
generateInsightsForSurveyAction({ surveyId });
};
if (isGeneratingInsights) {
return null;
}
return (
<Alert className="mb-6 mt-4 flex items-center gap-4 border-slate-400 bg-white">
<div>
<SparklesIcon strokeWidth={1.5} className="size-7 text-slate-700" />
</div>
<div className="flex-1">
<AlertTitle>
<span className="mr-2">{t("environments.surveys.summary.enable_ai_insights_banner_title")}</span>
<Badge type="gray" size="normal" text="Beta" />
</AlertTitle>
<AlertDescription className="flex items-start justify-between gap-4">
{t("environments.surveys.summary.enable_ai_insights_banner_description")}
</AlertDescription>
</div>
<TooltipRenderer
tooltipContent={
surveyResponseCount > maxResponseCount
? t("environments.surveys.summary.enable_ai_insights_banner_tooltip")
: undefined
}>
<Button
size="sm"
className="shrink-0"
onClick={handleInsightGeneration}
loading={isGeneratingInsights}
disabled={surveyResponseCount > maxResponseCount}>
{t("environments.surveys.summary.enable_ai_insights_banner_button")}
</Button>
</TooltipRenderer>
</Alert>
);
};

View File

@@ -0,0 +1,231 @@
import { FileUploadSummary } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/FileUploadSummary";
import { cleanup, render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { afterEach, describe, expect, test, vi } from "vitest";
import {
TSurvey,
TSurveyFileUploadQuestion,
TSurveyQuestionSummaryFileUpload,
TSurveyQuestionTypeEnum,
} from "@formbricks/types/surveys/types";
// Mock child components and hooks
vi.mock("@/modules/ui/components/avatars", () => ({
PersonAvatar: vi.fn(() => <div>PersonAvatarMock</div>),
}));
vi.mock("./QuestionSummaryHeader", () => ({
QuestionSummaryHeader: vi.fn(() => <div>QuestionSummaryHeaderMock</div>),
}));
// Mock utility functions
vi.mock("@/lib/storage/utils", () => ({
getOriginalFileNameFromUrl: (url: string) => `original-${url.split("/").pop()}`,
}));
vi.mock("@/lib/time", () => ({
timeSince: () => "some time ago",
}));
vi.mock("@/lib/utils/contact", () => ({
getContactIdentifier: () => "contact@example.com",
}));
const environmentId = "test-env-id";
const survey = { id: "survey-1" } as TSurvey;
const locale = "en-US";
const createMockResponse = (id: string, value: string[], contactId: string | null = null) => ({
id: `response-${id}`,
value,
updatedAt: new Date().toISOString(),
contact: contactId ? { id: contactId, name: `Contact ${contactId}` } : null,
contactAttributes: contactId ? { email: `contact${contactId}@example.com` } : {},
});
const questionSummaryBase = {
question: {
id: "q1",
headline: { default: "Upload your file" },
type: TSurveyQuestionTypeEnum.FileUpload,
} as unknown as TSurveyFileUploadQuestion,
responseCount: 0,
files: [],
} as unknown as TSurveyQuestionSummaryFileUpload;
describe("FileUploadSummary", () => {
afterEach(() => {
cleanup();
});
test("renders the component with initial responses", () => {
const files = Array.from({ length: 5 }, (_, i) =>
createMockResponse(i.toString(), [`https://example.com/file${i}.pdf`], `contact-${i}`)
);
const questionSummary = {
...questionSummaryBase,
files,
responseCount: files.length,
} as unknown as TSurveyQuestionSummaryFileUpload;
render(
<FileUploadSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByText("QuestionSummaryHeaderMock")).toBeInTheDocument();
expect(screen.getByText("common.user")).toBeInTheDocument();
expect(screen.getByText("common.response")).toBeInTheDocument();
expect(screen.getByText("common.time")).toBeInTheDocument();
expect(screen.getAllByText("PersonAvatarMock")).toHaveLength(5);
expect(screen.getAllByText("contact@example.com")).toHaveLength(5);
expect(screen.getByText("original-file0.pdf")).toBeInTheDocument();
expect(screen.getByText("original-file4.pdf")).toBeInTheDocument();
expect(screen.queryByText("common.load_more")).not.toBeInTheDocument();
});
test("renders 'Skipped' when value is an empty array", () => {
const files = [createMockResponse("skipped", [], "contact-skipped")];
const questionSummary = {
...questionSummaryBase,
files,
responseCount: files.length,
} as unknown as TSurveyQuestionSummaryFileUpload;
render(
<FileUploadSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByText("common.skipped")).toBeInTheDocument();
expect(screen.queryByText(/original-/)).not.toBeInTheDocument(); // No file name should be rendered
});
test("renders 'Anonymous' when contact is null", () => {
const files = [createMockResponse("anon", ["https://example.com/anonfile.jpg"], null)];
const questionSummary = {
...questionSummaryBase,
files,
responseCount: files.length,
} as unknown as TSurveyQuestionSummaryFileUpload;
render(
<FileUploadSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByText("common.anonymous")).toBeInTheDocument();
expect(screen.getByText("original-anonfile.jpg")).toBeInTheDocument();
});
test("shows 'Load More' button when there are more than 10 responses and loads more on click", async () => {
const files = Array.from({ length: 15 }, (_, i) =>
createMockResponse(i.toString(), [`https://example.com/file${i}.txt`], `contact-${i}`)
);
const questionSummary = {
...questionSummaryBase,
files,
responseCount: files.length,
} as unknown as TSurveyQuestionSummaryFileUpload;
render(
<FileUploadSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
// Initially 10 responses should be visible
expect(screen.getAllByText("PersonAvatarMock")).toHaveLength(10);
expect(screen.getByText("original-file9.txt")).toBeInTheDocument();
expect(screen.queryByText("original-file10.txt")).not.toBeInTheDocument();
// "Load More" button should be visible
const loadMoreButton = screen.getByText("common.load_more");
expect(loadMoreButton).toBeInTheDocument();
// Click "Load More"
await userEvent.click(loadMoreButton);
// Now all 15 responses should be visible
expect(screen.getAllByText("PersonAvatarMock")).toHaveLength(15);
expect(screen.getByText("original-file14.txt")).toBeInTheDocument();
// "Load More" button should disappear
expect(screen.queryByText("common.load_more")).not.toBeInTheDocument();
});
test("renders multiple files for a single response", () => {
const files = [
createMockResponse(
"multi",
["https://example.com/fileA.png", "https://example.com/fileB.docx"],
"contact-multi"
),
];
const questionSummary = {
...questionSummaryBase,
files,
responseCount: files.length,
} as unknown as TSurveyQuestionSummaryFileUpload;
render(
<FileUploadSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByText("original-fileA.png")).toBeInTheDocument();
expect(screen.getByText("original-fileB.docx")).toBeInTheDocument();
// Check that download links exist
const links = screen.getAllByRole("link");
// 1 contact link + 2 file links
expect(links.filter((link) => link.getAttribute("target") === "_blank")).toHaveLength(2);
expect(
links.find((link) => link.getAttribute("href") === "https://example.com/fileA.png")
).toBeInTheDocument();
expect(
links.find((link) => link.getAttribute("href") === "https://example.com/fileB.docx")
).toBeInTheDocument();
});
test("renders contact link correctly", () => {
const contactId = "contact-link-test";
const files = [createMockResponse("link", ["https://example.com/link.pdf"], contactId)];
const questionSummary = {
...questionSummaryBase,
files,
responseCount: files.length,
} as unknown as TSurveyQuestionSummaryFileUpload;
render(
<FileUploadSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
const contactLink = screen.getByText("contact@example.com").closest("a");
expect(contactLink).toBeInTheDocument();
expect(contactLink).toHaveAttribute("href", `/environments/${environmentId}/contacts/${contactId}`);
});
});

View File

@@ -74,12 +74,12 @@ export const FileUploadSummary = ({
<div className="col-span-2 grid">
{Array.isArray(response.value) &&
(response.value.length > 0 ? (
response.value.map((fileUrl, index) => {
response.value.map((fileUrl) => {
const fileName = getOriginalFileNameFromUrl(fileUrl);
return (
<div className="relative m-2 rounded-lg bg-slate-200" key={fileUrl}>
<a href={fileUrl} key={index} target="_blank" rel="noopener noreferrer">
<a href={fileUrl} key={fileUrl} target="_blank" rel="noopener noreferrer">
<div className="absolute top-0 right-0 m-2">
<div className="flex h-8 w-8 items-center justify-center rounded-lg bg-slate-50 hover:bg-white">
<DownloadIcon className="h-6 text-slate-500" />

View File

@@ -0,0 +1,183 @@
import { cleanup, render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TEnvironment } from "@formbricks/types/environment";
import { TSurveyQuestionSummaryHiddenFields } from "@formbricks/types/surveys/types";
import { HiddenFieldsSummary } from "./HiddenFieldsSummary";
// Mock dependencies
vi.mock("@/lib/time", () => ({
timeSince: () => "2 hours ago",
}));
vi.mock("@/lib/utils/contact", () => ({
getContactIdentifier: () => "contact@example.com",
}));
vi.mock("@/modules/ui/components/avatars", () => ({
PersonAvatar: ({ personId }: { personId: string }) => <div data-testid="person-avatar">{personId}</div>,
}));
vi.mock("@/modules/ui/components/button", () => ({
Button: ({ children, onClick }: { children: React.ReactNode; onClick: () => void }) => (
<button onClick={onClick} data-testid="load-more-button">
{children}
</button>
),
}));
// Mock lucide-react components
vi.mock("lucide-react", () => ({
InboxIcon: () => <div data-testid="inbox-icon" />,
MessageSquareTextIcon: () => <div data-testid="message-icon" />,
Link: ({ children, href, className }: { children: React.ReactNode; href: string; className: string }) => (
<a href={href} className={className} data-testid="lucide-link">
{children}
</a>
),
}));
// Mock Next.js Link
vi.mock("next/link", () => ({
default: ({ children, href }: { children: React.ReactNode; href: string }) => (
<a href={href} data-testid="next-link">
{children}
</a>
),
}));
describe("HiddenFieldsSummary", () => {
afterEach(() => {
cleanup();
});
const environment = { id: "env-123" } as TEnvironment;
const locale = "en-US";
test("renders component with correct header and single response", () => {
const questionSummary = {
id: "hidden-field-1",
responseCount: 1,
samples: [
{
id: "response1",
value: "Hidden value",
updatedAt: new Date().toISOString(),
contact: { id: "contact1" },
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryHiddenFields;
render(
<HiddenFieldsSummary environment={environment} questionSummary={questionSummary} locale={locale} />
);
expect(screen.getByText("hidden-field-1")).toBeInTheDocument();
expect(screen.getByText("Hidden Field")).toBeInTheDocument();
expect(screen.getByText("1 common.response")).toBeInTheDocument();
// Headers
expect(screen.getByText("common.user")).toBeInTheDocument();
expect(screen.getByText("common.response")).toBeInTheDocument();
expect(screen.getByText("common.time")).toBeInTheDocument();
// We can skip checking for PersonAvatar as it's inside hidden md:flex
expect(screen.getByText("contact@example.com")).toBeInTheDocument();
expect(screen.getByText("Hidden value")).toBeInTheDocument();
expect(screen.getByText("2 hours ago")).toBeInTheDocument();
// Check for link without checking for specific href
expect(screen.getByText("contact@example.com")).toBeInTheDocument();
});
test("renders anonymous user when no contact is provided", () => {
const questionSummary = {
id: "hidden-field-1",
responseCount: 1,
samples: [
{
id: "response1",
value: "Anonymous hidden value",
updatedAt: new Date().toISOString(),
contact: null,
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryHiddenFields;
render(
<HiddenFieldsSummary environment={environment} questionSummary={questionSummary} locale={locale} />
);
// Instead of checking for avatar, just check for anonymous text
expect(screen.getByText("common.anonymous")).toBeInTheDocument();
expect(screen.getByText("Anonymous hidden value")).toBeInTheDocument();
});
test("renders plural response label when multiple responses", () => {
const questionSummary = {
id: "hidden-field-1",
responseCount: 2,
samples: [
{
id: "response1",
value: "Hidden value 1",
updatedAt: new Date().toISOString(),
contact: { id: "contact1" },
contactAttributes: {},
},
{
id: "response2",
value: "Hidden value 2",
updatedAt: new Date().toISOString(),
contact: { id: "contact2" },
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryHiddenFields;
render(
<HiddenFieldsSummary environment={environment} questionSummary={questionSummary} locale={locale} />
);
expect(screen.getByText("2 common.responses")).toBeInTheDocument();
expect(screen.getAllByText("contact@example.com")).toHaveLength(2);
});
test("shows load more button when there are more responses and loads more on click", async () => {
const samples = Array.from({ length: 15 }, (_, i) => ({
id: `response${i}`,
value: `Hidden value ${i}`,
updatedAt: new Date().toISOString(),
contact: null,
contactAttributes: {},
}));
const questionSummary = {
id: "hidden-field-1",
responseCount: samples.length,
samples,
} as unknown as TSurveyQuestionSummaryHiddenFields;
render(
<HiddenFieldsSummary environment={environment} questionSummary={questionSummary} locale={locale} />
);
// Initially 10 responses should be visible
expect(screen.getAllByText(/Hidden value \d+/)).toHaveLength(10);
// "Load More" button should be visible
const loadMoreButton = screen.getByTestId("load-more-button");
expect(loadMoreButton).toBeInTheDocument();
// Click "Load More"
await userEvent.click(loadMoreButton);
// Now all 15 responses should be visible
expect(screen.getAllByText(/Hidden value \d+/)).toHaveLength(15);
// "Load More" button should disappear
expect(screen.queryByTestId("load-more-button")).not.toBeInTheDocument();
});
});

View File

@@ -0,0 +1,174 @@
import { cleanup, render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestionSummaryOpenText } from "@formbricks/types/surveys/types";
import { OpenTextSummary } from "./OpenTextSummary";
// Mock dependencies
vi.mock("@/lib/time", () => ({
timeSince: () => "2 hours ago",
}));
vi.mock("@/lib/utils/contact", () => ({
getContactIdentifier: () => "contact@example.com",
}));
vi.mock("@/modules/analysis/utils", () => ({
renderHyperlinkedContent: (text: string) => <div data-testid="hyperlinked-content">{text}</div>,
}));
vi.mock("@/modules/ui/components/avatars", () => ({
PersonAvatar: ({ personId }: { personId: string }) => <div data-testid="person-avatar">{personId}</div>,
}));
vi.mock("@/modules/ui/components/button", () => ({
Button: ({ children, onClick }: { children: React.ReactNode; onClick: () => void }) => (
<button onClick={onClick} data-testid="load-more-button">
{children}
</button>
),
}));
vi.mock("@/modules/ui/components/secondary-navigation", () => ({
SecondaryNavigation: ({ activeId, navigation }: any) => (
<div data-testid="secondary-navigation">
{navigation.map((item: any) => (
<button key={item.id} onClick={item.onClick} data-active={activeId === item.id}>
{item.label}
</button>
))}
</div>
),
}));
vi.mock("@/modules/ui/components/table", () => ({
Table: ({ children }: { children: React.ReactNode }) => <table data-testid="table">{children}</table>,
TableHeader: ({ children }: { children: React.ReactNode }) => <thead>{children}</thead>,
TableBody: ({ children }: { children: React.ReactNode }) => <tbody>{children}</tbody>,
TableRow: ({ children }: { children: React.ReactNode }) => <tr>{children}</tr>,
TableHead: ({ children }: { children: React.ReactNode }) => <th>{children}</th>,
TableCell: ({ children, width }: { children: React.ReactNode; width?: number }) => (
<td style={width ? { width } : {}}>{children}</td>
),
}));
vi.mock("@/modules/ee/insights/components/insights-view", () => ({
InsightView: () => <div data-testid="insight-view"></div>,
}));
vi.mock("./QuestionSummaryHeader", () => ({
QuestionSummaryHeader: ({ additionalInfo }: { additionalInfo?: React.ReactNode }) => (
<div data-testid="question-summary-header">{additionalInfo}</div>
),
}));
describe("OpenTextSummary", () => {
afterEach(() => {
cleanup();
});
const environmentId = "env-123";
const survey = { id: "survey-1" } as TSurvey;
const locale = "en-US";
test("renders response mode by default when insights not enabled", () => {
const questionSummary = {
question: { id: "q1", headline: "Open Text Question" },
samples: [
{
id: "response1",
value: "Sample response text",
updatedAt: new Date().toISOString(),
contact: { id: "contact1" },
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryOpenText;
render(
<OpenTextSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByTestId("question-summary-header")).toBeInTheDocument();
expect(screen.getByTestId("table")).toBeInTheDocument();
expect(screen.getByTestId("person-avatar")).toHaveTextContent("contact1");
expect(screen.getByText("contact@example.com")).toBeInTheDocument();
expect(screen.getByTestId("hyperlinked-content")).toHaveTextContent("Sample response text");
expect(screen.getByText("2 hours ago")).toBeInTheDocument();
// No secondary navigation when insights not enabled
expect(screen.queryByTestId("secondary-navigation")).not.toBeInTheDocument();
});
test("renders anonymous user when no contact is provided", () => {
const questionSummary = {
question: { id: "q1", headline: "Open Text Question" },
samples: [
{
id: "response1",
value: "Anonymous response",
updatedAt: new Date().toISOString(),
contact: null,
contactAttributes: {},
},
],
} as unknown as TSurveyQuestionSummaryOpenText;
render(
<OpenTextSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
expect(screen.getByTestId("person-avatar")).toHaveTextContent("anonymous");
expect(screen.getByText("common.anonymous")).toBeInTheDocument();
});
test("shows load more button when there are more responses and loads more on click", async () => {
const samples = Array.from({ length: 15 }, (_, i) => ({
id: `response${i}`,
value: `Response ${i}`,
updatedAt: new Date().toISOString(),
contact: null,
contactAttributes: {},
}));
const questionSummary = {
question: { id: "q1", headline: "Open Text Question" },
samples,
} as unknown as TSurveyQuestionSummaryOpenText;
render(
<OpenTextSummary
questionSummary={questionSummary}
environmentId={environmentId}
survey={survey}
locale={locale}
/>
);
// Initially 10 responses should be visible
expect(screen.getAllByTestId("hyperlinked-content")).toHaveLength(10);
// "Load More" button should be visible
const loadMoreButton = screen.getByTestId("load-more-button");
expect(loadMoreButton).toBeInTheDocument();
// Click "Load More"
await userEvent.click(loadMoreButton);
// Now all 15 responses should be visible
expect(screen.getAllByTestId("hyperlinked-content")).toHaveLength(15);
// "Load More" button should disappear
expect(screen.queryByTestId("load-more-button")).not.toBeInTheDocument();
});
});

View File

@@ -3,10 +3,8 @@
import { timeSince } from "@/lib/time";
import { getContactIdentifier } from "@/lib/utils/contact";
import { renderHyperlinkedContent } from "@/modules/analysis/utils";
import { InsightView } from "@/modules/ee/insights/components/insights-view";
import { PersonAvatar } from "@/modules/ui/components/avatars";
import { Button } from "@/modules/ui/components/button";
import { SecondaryNavigation } from "@/modules/ui/components/secondary-navigation";
import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/modules/ui/components/table";
import { useTranslate } from "@tolgee/react";
import Link from "next/link";
@@ -19,25 +17,12 @@ interface OpenTextSummaryProps {
questionSummary: TSurveyQuestionSummaryOpenText;
environmentId: string;
survey: TSurvey;
isAIEnabled: boolean;
documentsPerPage?: number;
locale: TUserLocale;
}
export const OpenTextSummary = ({
questionSummary,
environmentId,
survey,
isAIEnabled,
documentsPerPage,
locale,
}: OpenTextSummaryProps) => {
export const OpenTextSummary = ({ questionSummary, environmentId, survey, locale }: OpenTextSummaryProps) => {
const { t } = useTranslate();
const isInsightsEnabled = isAIEnabled && questionSummary.insightsEnabled;
const [visibleResponses, setVisibleResponses] = useState(10);
const [activeTab, setActiveTab] = useState<"insights" | "responses">(
isInsightsEnabled && questionSummary.insights.length ? "insights" : "responses"
);
const handleLoadMore = () => {
// Increase the number of visible responses by 10, not exceeding the total number of responses
@@ -46,104 +31,62 @@ export const OpenTextSummary = ({
);
};
const tabNavigation = [
{
id: "insights",
label: t("common.insights"),
onClick: () => setActiveTab("insights"),
},
{
id: "responses",
label: t("common.responses"),
onClick: () => setActiveTab("responses"),
},
];
return (
<div className="overflow-hidden rounded-xl border border-slate-200 bg-white shadow-sm">
<QuestionSummaryHeader
questionSummary={questionSummary}
survey={survey}
additionalInfo={
isAIEnabled && questionSummary.insightsEnabled === false ? (
<div className="flex items-center space-x-2">
<div className="flex items-center rounded-lg bg-slate-100 p-2">
{t("environments.surveys.summary.insights_disabled")}
</div>
</div>
) : undefined
}
/>
{isInsightsEnabled && (
<div className="ml-4">
<SecondaryNavigation activeId={activeTab} navigation={tabNavigation} />
</div>
)}
<QuestionSummaryHeader questionSummary={questionSummary} survey={survey} />
<div className="border-t border-slate-200"></div>
<div className="max-h-[40vh] overflow-y-auto">
{activeTab === "insights" ? (
<InsightView
insights={questionSummary.insights}
questionId={questionSummary.question.id}
surveyId={survey.id}
documentsPerPage={documentsPerPage}
locale={locale}
/>
) : activeTab === "responses" ? (
<>
<Table>
<TableHeader className="bg-slate-100">
<TableRow>
<TableHead>{t("common.user")}</TableHead>
<TableHead>{t("common.response")}</TableHead>
<TableHead>{t("common.time")}</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{questionSummary.samples.slice(0, visibleResponses).map((response) => (
<TableRow key={response.id}>
<TableCell>
{response.contact ? (
<Link
className="ph-no-capture group flex items-center"
href={`/environments/${environmentId}/contacts/${response.contact.id}`}>
<div className="hidden md:flex">
<PersonAvatar personId={response.contact.id} />
</div>
<p className="ph-no-capture break-all text-slate-600 group-hover:underline md:ml-2">
{getContactIdentifier(response.contact, response.contactAttributes)}
</p>
</Link>
) : (
<div className="group flex items-center">
<div className="hidden md:flex">
<PersonAvatar personId="anonymous" />
</div>
<p className="break-normal text-slate-600 md:ml-2">{t("common.anonymous")}</p>
</div>
)}
</TableCell>
<TableCell className="font-medium">
{typeof response.value === "string"
? renderHyperlinkedContent(response.value)
: response.value}
</TableCell>
<TableCell width={120}>
{timeSince(new Date(response.updatedAt).toISOString(), locale)}
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
{visibleResponses < questionSummary.samples.length && (
<div className="flex justify-center py-4">
<Button onClick={handleLoadMore} variant="secondary" size="sm">
{t("common.load_more")}
</Button>
</div>
)}
</>
) : null}
<Table>
<TableHeader className="bg-slate-100">
<TableRow>
<TableHead>{t("common.user")}</TableHead>
<TableHead>{t("common.response")}</TableHead>
<TableHead>{t("common.time")}</TableHead>
</TableRow>
</TableHeader>
<TableBody>
{questionSummary.samples.slice(0, visibleResponses).map((response) => (
<TableRow key={response.id}>
<TableCell>
{response.contact ? (
<Link
className="ph-no-capture group flex items-center"
href={`/environments/${environmentId}/contacts/${response.contact.id}`}>
<div className="hidden md:flex">
<PersonAvatar personId={response.contact.id} />
</div>
<p className="ph-no-capture break-all text-slate-600 group-hover:underline md:ml-2">
{getContactIdentifier(response.contact, response.contactAttributes)}
</p>
</Link>
) : (
<div className="group flex items-center">
<div className="hidden md:flex">
<PersonAvatar personId="anonymous" />
</div>
<p className="break-normal text-slate-600 md:ml-2">{t("common.anonymous")}</p>
</div>
)}
</TableCell>
<TableCell className="font-medium">
{typeof response.value === "string"
? renderHyperlinkedContent(response.value)
: response.value}
</TableCell>
<TableCell width={120}>
{timeSince(new Date(response.updatedAt).toISOString(), locale)}
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
{visibleResponses < questionSummary.samples.length && (
<div className="flex justify-center py-4">
<Button onClick={handleLoadMore} variant="secondary" size="sm">
{t("common.load_more")}
</Button>
</div>
)}
</div>
</div>
);

View File

@@ -0,0 +1,164 @@
import { cleanup, render, screen } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestionSummary, TSurveyQuestionTypeEnum } from "@formbricks/types/surveys/types";
import { QuestionSummaryHeader } from "./QuestionSummaryHeader";
// Mock dependencies
vi.mock("@/lib/utils/recall", () => ({
recallToHeadline: () => ({ default: "Recalled Headline" }),
}));
vi.mock("@/modules/survey/editor/lib/utils", () => ({
formatTextWithSlashes: (text: string) => <span data-testid="formatted-headline">{text}</span>,
}));
vi.mock("@/modules/survey/lib/questions", () => ({
getQuestionTypes: () => [
{
id: "openText",
label: "Open Text",
icon: () => <div data-testid="question-icon">Icon</div>,
},
{
id: "multipleChoice",
label: "Multiple Choice",
icon: () => <div data-testid="question-icon">Icon</div>,
},
],
}));
vi.mock("@/modules/ui/components/settings-id", () => ({
SettingsId: ({ title, id }: { title: string; id: string }) => (
<div data-testid="settings-id">
{title}: {id}
</div>
),
}));
// Mock InboxIcon
vi.mock("lucide-react", () => ({
InboxIcon: () => <div data-testid="inbox-icon"></div>,
}));
describe("QuestionSummaryHeader", () => {
afterEach(() => {
cleanup();
});
const survey = {} as TSurvey;
test("renders header with question headline and type", () => {
const questionSummary = {
question: {
id: "q1",
headline: { default: "Test Question" },
type: "openText" as TSurveyQuestionTypeEnum,
required: true,
},
responseCount: 42,
} as unknown as TSurveyQuestionSummary;
render(<QuestionSummaryHeader questionSummary={questionSummary} survey={survey} />);
expect(screen.getByTestId("formatted-headline")).toHaveTextContent("Recalled Headline");
// Look for text content with a more specific approach
const questionTypeElement = screen.getByText((content) => {
return content.includes("Open Text") && !content.includes("common.question_id");
});
expect(questionTypeElement).toBeInTheDocument();
// Check for responses text specifically
expect(
screen.getByText((content) => {
return content.includes("42") && content.includes("common.responses");
})
).toBeInTheDocument();
expect(screen.getByTestId("question-icon")).toBeInTheDocument();
expect(screen.getByTestId("settings-id")).toHaveTextContent("common.question_id: q1");
expect(screen.queryByText("environments.surveys.edit.optional")).not.toBeInTheDocument();
});
test("shows 'optional' tag when question is not required", () => {
const questionSummary = {
question: {
id: "q2",
headline: { default: "Optional Question" },
type: "multipleChoice" as TSurveyQuestionTypeEnum,
required: false,
},
responseCount: 10,
} as unknown as TSurveyQuestionSummary;
render(<QuestionSummaryHeader questionSummary={questionSummary} survey={survey} />);
expect(screen.getByText("environments.surveys.edit.optional")).toBeInTheDocument();
});
test("hides response count when showResponses is false", () => {
const questionSummary = {
question: {
id: "q3",
headline: { default: "No Response Count Question" },
type: "openText" as TSurveyQuestionTypeEnum,
required: true,
},
responseCount: 15,
} as unknown as TSurveyQuestionSummary;
render(<QuestionSummaryHeader questionSummary={questionSummary} survey={survey} showResponses={false} />);
expect(
screen.queryByText((content) => content.includes("15") && content.includes("common.responses"))
).not.toBeInTheDocument();
});
test("shows unknown question type for unrecognized type", () => {
const questionSummary = {
question: {
id: "q4",
headline: { default: "Unknown Type Question" },
type: "unknownType" as TSurveyQuestionTypeEnum,
required: true,
},
responseCount: 5,
} as unknown as TSurveyQuestionSummary;
render(<QuestionSummaryHeader questionSummary={questionSummary} survey={survey} />);
// Look for text in the question type element specifically
const unknownTypeElement = screen.getByText((content) => {
return (
content.includes("environments.surveys.summary.unknown_question_type") &&
!content.includes("common.question_id")
);
});
expect(unknownTypeElement).toBeInTheDocument();
});
test("renders additional info when provided", () => {
const questionSummary = {
question: {
id: "q5",
headline: { default: "With Additional Info" },
type: "openText" as TSurveyQuestionTypeEnum,
required: true,
},
responseCount: 20,
} as unknown as TSurveyQuestionSummary;
const additionalInfo = <div data-testid="additional-info">Extra Information</div>;
render(
<QuestionSummaryHeader
questionSummary={questionSummary}
survey={survey}
additionalInfo={additionalInfo}
/>
);
expect(screen.getByTestId("additional-info")).toBeInTheDocument();
expect(screen.getByText("Extra Information")).toBeInTheDocument();
});
});

View File

@@ -0,0 +1,104 @@
import { cleanup, render, screen } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestionSummaryRanking, TSurveyType } from "@formbricks/types/surveys/types";
import { RankingSummary } from "./RankingSummary";
// Mock dependencies
vi.mock("./QuestionSummaryHeader", () => ({
QuestionSummaryHeader: () => <div data-testid="question-summary-header" />,
}));
vi.mock("../lib/utils", () => ({
convertFloatToNDecimal: (value: number) => value.toFixed(2),
}));
describe("RankingSummary", () => {
afterEach(() => {
cleanup();
});
const survey = {} as TSurvey;
const surveyType: TSurveyType = "app";
test("renders ranking results in correct order", () => {
const questionSummary = {
question: { id: "q1", headline: "Rank the following" },
choices: {
option1: { value: "Option A", avgRanking: 1.5, others: [] },
option2: { value: "Option B", avgRanking: 2.3, others: [] },
option3: { value: "Option C", avgRanking: 1.2, others: [] },
},
} as unknown as TSurveyQuestionSummaryRanking;
render(<RankingSummary questionSummary={questionSummary} survey={survey} surveyType={surveyType} />);
expect(screen.getByTestId("question-summary-header")).toBeInTheDocument();
// Check order: should be sorted by avgRanking (ascending)
const options = screen.getAllByText(/Option [A-C]/);
expect(options[0]).toHaveTextContent("Option C"); // 1.2 (lowest avgRanking first)
expect(options[1]).toHaveTextContent("Option A"); // 1.5
expect(options[2]).toHaveTextContent("Option B"); // 2.3
// Check rankings are displayed
expect(screen.getByText("#1")).toBeInTheDocument();
expect(screen.getByText("#2")).toBeInTheDocument();
expect(screen.getByText("#3")).toBeInTheDocument();
// Check average values are displayed
expect(screen.getByText("#1.20")).toBeInTheDocument();
expect(screen.getByText("#1.50")).toBeInTheDocument();
expect(screen.getByText("#2.30")).toBeInTheDocument();
});
test("renders 'other values found' section when others exist", () => {
const questionSummary = {
question: { id: "q1", headline: "Rank the following" },
choices: {
option1: {
value: "Option A",
avgRanking: 1.0,
others: [{ value: "Other value", count: 2 }],
},
},
} as unknown as TSurveyQuestionSummaryRanking;
render(<RankingSummary questionSummary={questionSummary} survey={survey} surveyType={surveyType} />);
expect(screen.getByText("environments.surveys.summary.other_values_found")).toBeInTheDocument();
});
test("shows 'User' column in other values section for app survey type", () => {
const questionSummary = {
question: { id: "q1", headline: "Rank the following" },
choices: {
option1: {
value: "Option A",
avgRanking: 1.0,
others: [{ value: "Other value", count: 1 }],
},
},
} as unknown as TSurveyQuestionSummaryRanking;
render(<RankingSummary questionSummary={questionSummary} survey={survey} surveyType="app" />);
expect(screen.getByText("common.user")).toBeInTheDocument();
});
test("doesn't show 'User' column for link survey type", () => {
const questionSummary = {
question: { id: "q1", headline: "Rank the following" },
choices: {
option1: {
value: "Option A",
avgRanking: 1.0,
others: [{ value: "Other value", count: 1 }],
},
},
} as unknown as TSurveyQuestionSummaryRanking;
render(<RankingSummary questionSummary={questionSummary} survey={survey} surveyType="link" />);
expect(screen.queryByText("common.user")).not.toBeInTheDocument();
});
});

View File

@@ -0,0 +1,125 @@
import { cleanup, render, screen } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestionTypeEnum, TSurveySummary } from "@formbricks/types/surveys/types";
import { SummaryDropOffs } from "./SummaryDropOffs";
// Mock dependencies
vi.mock("@/lib/utils/recall", () => ({
recallToHeadline: () => ({ default: "Recalled Question" }),
}));
vi.mock("@/modules/survey/editor/lib/utils", () => ({
formatTextWithSlashes: (text) => <span data-testid="formatted-text">{text}</span>,
}));
vi.mock("@/modules/survey/lib/questions", () => ({
getQuestionIcon: () => () => <div data-testid="question-icon" />,
}));
vi.mock("@/modules/ui/components/tooltip", () => ({
TooltipProvider: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
Tooltip: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
TooltipTrigger: ({ children }: { children: React.ReactNode }) => (
<div data-testid="tooltip-trigger">{children}</div>
),
TooltipContent: ({ children }: { children: React.ReactNode }) => (
<div data-testid="tooltip-content">{children}</div>
),
}));
vi.mock("lucide-react", () => ({
TimerIcon: () => <div data-testid="timer-icon" />,
}));
describe("SummaryDropOffs", () => {
afterEach(() => {
cleanup();
});
const mockSurvey = {} as TSurvey;
const mockDropOff: TSurveySummary["dropOff"] = [
{
questionId: "q1",
headline: "First Question",
questionType: TSurveyQuestionTypeEnum.OpenText,
ttc: 15000, // 15 seconds
impressions: 100,
dropOffCount: 20,
dropOffPercentage: 20,
},
{
questionId: "q2",
headline: "Second Question",
questionType: TSurveyQuestionTypeEnum.MultipleChoiceMulti,
ttc: 30000, // 30 seconds
impressions: 80,
dropOffCount: 15,
dropOffPercentage: 18.75,
},
{
questionId: "q3",
headline: "Third Question",
questionType: TSurveyQuestionTypeEnum.Rating,
ttc: 0, // No time data
impressions: 65,
dropOffCount: 10,
dropOffPercentage: 15.38,
},
];
test("renders header row with correct columns", () => {
render(<SummaryDropOffs dropOff={mockDropOff} survey={mockSurvey} />);
// Check header
expect(screen.getByText("common.questions")).toBeInTheDocument();
expect(screen.getByTestId("tooltip-trigger")).toBeInTheDocument();
expect(screen.getByTestId("timer-icon")).toBeInTheDocument();
expect(screen.getByText("environments.surveys.summary.impressions")).toBeInTheDocument();
expect(screen.getByText("environments.surveys.summary.drop_offs")).toBeInTheDocument();
});
test("renders tooltip with correct content", () => {
render(<SummaryDropOffs dropOff={mockDropOff} survey={mockSurvey} />);
expect(screen.getByTestId("tooltip-content")).toBeInTheDocument();
expect(screen.getByText("environments.surveys.summary.ttc_tooltip")).toBeInTheDocument();
});
test("renders all drop-off items with correct data", () => {
render(<SummaryDropOffs dropOff={mockDropOff} survey={mockSurvey} />);
// There should be 3 rows of data (one for each question)
expect(screen.getAllByTestId("question-icon")).toHaveLength(3);
expect(screen.getAllByTestId("formatted-text")).toHaveLength(3);
// Check time to complete values
expect(screen.getByText("15.00s")).toBeInTheDocument(); // 15000ms converted to seconds
expect(screen.getByText("30.00s")).toBeInTheDocument(); // 30000ms converted to seconds
expect(screen.getByText("N/A")).toBeInTheDocument(); // 0ms shown as N/A
// Check impressions values
expect(screen.getByText("100")).toBeInTheDocument();
expect(screen.getByText("80")).toBeInTheDocument();
expect(screen.getByText("65")).toBeInTheDocument();
// Check drop-off counts and percentages
expect(screen.getByText("20")).toBeInTheDocument();
expect(screen.getByText("(20%)")).toBeInTheDocument();
expect(screen.getByText("15")).toBeInTheDocument();
expect(screen.getByText("(19%)")).toBeInTheDocument(); // 18.75% rounded to 19%
expect(screen.getByText("10")).toBeInTheDocument();
expect(screen.getByText("(15%)")).toBeInTheDocument(); // 15.38% rounded to 15%
});
test("renders empty state when dropOff array is empty", () => {
render(<SummaryDropOffs dropOff={[]} survey={mockSurvey} />);
// Header should still be visible
expect(screen.getByText("common.questions")).toBeInTheDocument();
// But no question icons
expect(screen.queryByTestId("question-icon")).not.toBeInTheDocument();
});
});

View File

@@ -39,8 +39,6 @@ interface SummaryListProps {
environment: TEnvironment;
survey: TSurvey;
totalResponseCount: number;
isAIEnabled: boolean;
documentsPerPage?: number;
locale: TUserLocale;
}
@@ -50,8 +48,6 @@ export const SummaryList = ({
responseCount,
survey,
totalResponseCount,
isAIEnabled,
documentsPerPage,
locale,
}: SummaryListProps) => {
const { setSelectedFilter, selectedFilter } = useResponseFilter();
@@ -134,8 +130,6 @@ export const SummaryList = ({
questionSummary={questionSummary}
environmentId={environment.id}
survey={survey}
isAIEnabled={isAIEnabled}
documentsPerPage={documentsPerPage}
locale={locale}
/>
);

View File

@@ -0,0 +1,228 @@
import { cleanup, render, screen, waitFor } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TEnvironment } from "@formbricks/types/environment";
import { TSurvey } from "@formbricks/types/surveys/types";
import { TUserLocale } from "@formbricks/types/user";
import { SummaryPage } from "./SummaryPage";
// Mock actions
vi.mock("@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/actions", () => ({
getResponseCountAction: vi.fn().mockResolvedValue({ data: 42 }),
getSurveySummaryAction: vi.fn().mockResolvedValue({
data: {
meta: {
completedPercentage: 80,
completedResponses: 40,
displayCount: 50,
dropOffPercentage: 20,
dropOffCount: 10,
startsPercentage: 100,
totalResponses: 50,
ttcAverage: 120,
},
dropOff: [
{
questionId: "q1",
headline: "Question 1",
questionType: "openText",
ttc: 20000,
impressions: 50,
dropOffCount: 5,
dropOffPercentage: 10,
},
],
summary: [
{
question: { id: "q1", headline: "Question 1", type: "openText", required: true },
responseCount: 45,
type: "openText",
samples: [],
},
],
},
}),
}));
vi.mock("@/app/share/[sharingKey]/actions", () => ({
getResponseCountBySurveySharingKeyAction: vi.fn().mockResolvedValue({ data: 42 }),
getSummaryBySurveySharingKeyAction: vi.fn().mockResolvedValue({
data: {
meta: {
completedPercentage: 80,
completedResponses: 40,
displayCount: 50,
dropOffPercentage: 20,
dropOffCount: 10,
startsPercentage: 100,
totalResponses: 50,
ttcAverage: 120,
},
dropOff: [
{
questionId: "q1",
headline: "Question 1",
questionType: "openText",
ttc: 20000,
impressions: 50,
dropOffCount: 5,
dropOffPercentage: 10,
},
],
summary: [
{
question: { id: "q1", headline: "Question 1", type: "openText", required: true },
responseCount: 45,
type: "openText",
samples: [],
},
],
},
}),
}));
// Mock components
vi.mock(
"@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SummaryDropOffs",
() => ({
SummaryDropOffs: () => <div data-testid="summary-drop-offs">DropOffs Component</div>,
})
);
vi.mock(
"@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SummaryList",
() => ({
SummaryList: ({ summary, responseCount }: any) => (
<div data-testid="summary-list">
<span>Response Count: {responseCount}</span>
<span>Summary Items: {summary.length}</span>
</div>
),
})
);
vi.mock(
"@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SummaryMetadata",
() => ({
SummaryMetadata: ({ showDropOffs, setShowDropOffs, isLoading }: any) => (
<div data-testid="summary-metadata">
<span>Is Loading: {isLoading ? "true" : "false"}</span>
<button onClick={() => setShowDropOffs(!showDropOffs)}>Toggle Dropoffs</button>
</div>
),
})
);
vi.mock(
"@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/ScrollToTop",
() => ({
__esModule: true,
default: () => <div data-testid="scroll-to-top">Scroll To Top</div>,
})
);
vi.mock("@/app/(app)/environments/[environmentId]/surveys/[surveyId]/components/CustomFilter", () => ({
CustomFilter: () => <div data-testid="custom-filter">Custom Filter</div>,
}));
vi.mock("@/app/(app)/environments/[environmentId]/surveys/[surveyId]/components/ResultsShareButton", () => ({
ResultsShareButton: () => <div data-testid="results-share-button">Share Results</div>,
}));
// Mock context
vi.mock("@/app/(app)/environments/[environmentId]/components/ResponseFilterContext", () => ({
useResponseFilter: () => ({
selectedFilter: { filter: [], onlyComplete: false },
dateRange: { from: null, to: null },
resetState: vi.fn(),
}),
}));
// Mock hooks
vi.mock("@/lib/utils/hooks/useIntervalWhenFocused", () => ({
useIntervalWhenFocused: vi.fn(),
}));
vi.mock("@/lib/utils/recall", () => ({
replaceHeadlineRecall: (survey: any) => survey,
}));
vi.mock("next/navigation", () => ({
useParams: () => ({}),
useSearchParams: () => ({ get: () => null }),
}));
describe("SummaryPage", () => {
afterEach(() => {
cleanup();
vi.clearAllMocks();
});
const mockEnvironment = { id: "env-123" } as TEnvironment;
const mockSurvey = {
id: "survey-123",
environmentId: "env-123",
} as TSurvey;
const locale = "en-US" as TUserLocale;
const defaultProps = {
environment: mockEnvironment,
survey: mockSurvey,
surveyId: "survey-123",
webAppUrl: "https://app.example.com",
totalResponseCount: 50,
locale,
isReadOnly: false,
};
test("renders loading state initially", () => {
render(<SummaryPage {...defaultProps} />);
expect(screen.getByTestId("summary-metadata")).toBeInTheDocument();
expect(screen.getByText("Is Loading: true")).toBeInTheDocument();
});
test("renders summary components after loading", async () => {
render(<SummaryPage {...defaultProps} />);
// Wait for loading to complete
await waitFor(() => {
expect(screen.getByText("Is Loading: false")).toBeInTheDocument();
});
expect(screen.getByTestId("custom-filter")).toBeInTheDocument();
expect(screen.getByTestId("results-share-button")).toBeInTheDocument();
expect(screen.getByTestId("scroll-to-top")).toBeInTheDocument();
expect(screen.getByTestId("summary-list")).toBeInTheDocument();
});
test("shows drop-offs component when toggled", async () => {
const user = userEvent.setup();
render(<SummaryPage {...defaultProps} />);
// Wait for loading to complete
await waitFor(() => {
expect(screen.getByText("Is Loading: false")).toBeInTheDocument();
});
// Drop-offs should initially be hidden
expect(screen.queryByTestId("summary-drop-offs")).not.toBeInTheDocument();
// Toggle drop-offs
await user.click(screen.getByText("Toggle Dropoffs"));
// Drop-offs should now be visible
expect(screen.getByTestId("summary-drop-offs")).toBeInTheDocument();
});
test("doesn't show share button in read-only mode", async () => {
render(<SummaryPage {...defaultProps} isReadOnly={true} />);
// Wait for loading to complete
await waitFor(() => {
expect(screen.getByText("Is Loading: false")).toBeInTheDocument();
});
expect(screen.queryByTestId("results-share-button")).not.toBeInTheDocument();
});
});

View File

@@ -46,7 +46,6 @@ interface SummaryPageProps {
webAppUrl: string;
user?: TUser;
totalResponseCount: number;
isAIEnabled: boolean;
documentsPerPage?: number;
locale: TUserLocale;
isReadOnly: boolean;
@@ -58,8 +57,6 @@ export const SummaryPage = ({
surveyId,
webAppUrl,
totalResponseCount,
isAIEnabled,
documentsPerPage,
locale,
isReadOnly,
}: SummaryPageProps) => {
@@ -184,8 +181,6 @@ export const SummaryPage = ({
survey={surveyMemoized}
environment={environment}
totalResponseCount={totalResponseCount}
isAIEnabled={isAIEnabled}
documentsPerPage={documentsPerPage}
locale={locale}
/>
</>

View File

@@ -3,8 +3,11 @@
import { ShareEmbedSurvey } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/ShareEmbedSurvey";
import { SuccessMessage } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SuccessMessage";
import { SurveyStatusDropdown } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/components/SurveyStatusDropdown";
import { getFormattedErrorMessage } from "@/lib/utils/helper";
import { EditPublicSurveyAlertDialog } from "@/modules/survey/components/edit-public-survey-alert-dialog";
import { useSingleUseId } from "@/modules/survey/hooks/useSingleUseId";
import { copySurveyLink } from "@/modules/survey/lib/client-utils";
import { copySurveyToOtherEnvironmentAction } from "@/modules/survey/list/actions";
import { Badge } from "@/modules/ui/components/badge";
import { IconBar } from "@/modules/ui/components/iconbar";
import { useTranslate } from "@tolgee/react";
@@ -22,6 +25,7 @@ interface SurveyAnalysisCTAProps {
isReadOnly: boolean;
user: TUser;
surveyDomain: string;
responseCount: number;
}
interface ModalState {
@@ -37,11 +41,13 @@ export const SurveyAnalysisCTA = ({
isReadOnly,
user,
surveyDomain,
responseCount,
}: SurveyAnalysisCTAProps) => {
const { t } = useTranslate();
const searchParams = useSearchParams();
const pathname = usePathname();
const router = useRouter();
const [loading, setLoading] = useState(false);
const [modalState, setModalState] = useState<ModalState>({
share: searchParams.get("share") === "true",
@@ -89,6 +95,24 @@ export const SurveyAnalysisCTA = ({
setModalState((prev) => ({ ...prev, dropdown: false }));
};
const duplicateSurveyAndRoute = async (surveyId: string) => {
setLoading(true);
const duplicatedSurveyResponse = await copySurveyToOtherEnvironmentAction({
environmentId: environment.id,
surveyId: surveyId,
targetEnvironmentId: environment.id,
});
if (duplicatedSurveyResponse?.data) {
toast.success(t("environments.surveys.survey_duplicated_successfully"));
router.push(`/environments/${environment.id}/surveys/${duplicatedSurveyResponse.data.id}/edit`);
} else {
const errorMessage = getFormattedErrorMessage(duplicatedSurveyResponse);
toast.error(errorMessage);
}
setIsCautionDialogOpen(false);
setLoading(false);
};
const getPreviewUrl = () => {
const separator = surveyUrl.includes("?") ? "&" : "?";
return `${surveyUrl}${separator}preview=true`;
@@ -107,6 +131,8 @@ export const SurveyAnalysisCTA = ({
{ key: "panel", modalView: "panel" as const, setOpen: handleModalState("panel") },
];
const [isCautionDialogOpen, setIsCautionDialogOpen] = useState(false);
const iconActions = [
{
icon: Eye,
@@ -144,7 +170,11 @@ export const SurveyAnalysisCTA = ({
{
icon: SquarePenIcon,
tooltip: t("common.edit"),
onClick: () => router.push(`/environments/${environment.id}/surveys/${survey.id}/edit`),
onClick: () => {
responseCount && responseCount > 0
? setIsCautionDialogOpen(true)
: router.push(`/environments/${environment.id}/surveys/${survey.id}/edit`);
},
isVisible: !isReadOnly,
},
];
@@ -182,6 +212,20 @@ export const SurveyAnalysisCTA = ({
<SuccessMessage environment={environment} survey={survey} />
</>
)}
{responseCount > 0 && (
<EditPublicSurveyAlertDialog
open={isCautionDialogOpen}
setOpen={setIsCautionDialogOpen}
isLoading={loading}
primaryButtonAction={() => duplicateSurveyAndRoute(survey.id)}
primaryButtonText={t("environments.surveys.edit.caution_edit_duplicate")}
secondaryButtonAction={() =>
router.push(`/environments/${environment.id}/surveys/${survey.id}/edit`)
}
secondaryButtonText={t("common.edit")}
/>
)}
</div>
);
};

View File

@@ -1,7 +1,7 @@
import "@testing-library/jest-dom/vitest";
import { cleanup, fireEvent, render, screen, waitFor } from "@testing-library/react";
import toast from "react-hot-toast";
import { afterEach, describe, expect, test, vi } from "vitest";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { TEnvironment } from "@formbricks/types/environment";
import { TSurvey } from "@formbricks/types/surveys/types";
import { TUser } from "@formbricks/types/user";
@@ -25,12 +25,6 @@ vi.mock("@/lib/constants", () => ({
OIDC_DISPLAY_NAME: "mock-oidc-display-name",
OIDC_SIGNING_ALGORITHM: "mock-oidc-signing-algorithm",
WEBAPP_URL: "mock-webapp-url",
AI_AZURE_LLM_RESSOURCE_NAME: "mock-azure-llm-resource-name",
AI_AZURE_LLM_API_KEY: "mock-azure-llm-api-key",
AI_AZURE_LLM_DEPLOYMENT_ID: "mock-azure-llm-deployment-id",
AI_AZURE_EMBEDDINGS_RESSOURCE_NAME: "mock-azure-embeddings-resource-name",
AI_AZURE_EMBEDDINGS_API_KEY: "mock-azure-embeddings-api-key",
AI_AZURE_EMBEDDINGS_DEPLOYMENT_ID: "mock-azure-embeddings-deployment-id",
IS_PRODUCTION: true,
FB_LOGO_URL: "https://example.com/mock-logo.png",
SMTP_HOST: "mock-smtp-host",
@@ -49,10 +43,12 @@ vi.mock("@/modules/survey/hooks/useSingleUseId", () => ({
}));
const mockSearchParams = new URLSearchParams();
const mockPush = vi.fn();
// Mock next/navigation
vi.mock("next/navigation", () => ({
useRouter: () => ({ push: vi.fn() }),
useSearchParams: () => mockSearchParams, // Reuse the same object
useRouter: () => ({ push: mockPush }),
useSearchParams: () => mockSearchParams,
usePathname: () => "/current",
}));
@@ -61,13 +57,27 @@ vi.mock("@/modules/survey/lib/client-utils", () => ({
copySurveyLink: vi.fn((url: string, id: string) => `${url}?id=${id}`),
}));
// Mock the copy survey action
const mockCopySurveyToOtherEnvironmentAction = vi.fn();
vi.mock("@/modules/survey/list/actions", () => ({
copySurveyToOtherEnvironmentAction: (args: any) => mockCopySurveyToOtherEnvironmentAction(args),
}));
// Mock getFormattedErrorMessage function
vi.mock("@/lib/utils/helper", () => ({
getFormattedErrorMessage: vi.fn((response) => response?.error || "Unknown error"),
}));
vi.spyOn(toast, "success");
vi.spyOn(toast, "error");
// Set up a fake clipboard
const writeTextMock = vi.fn(() => Promise.resolve());
Object.assign(navigator, {
clipboard: { writeText: writeTextMock },
// Mock clipboard API
const writeTextMock = vi.fn().mockImplementation(() => Promise.resolve());
// Define it at the global level
Object.defineProperty(navigator, "clipboard", {
value: { writeText: writeTextMock },
configurable: true,
});
const dummySurvey = {
@@ -93,6 +103,7 @@ describe("SurveyAnalysisCTA - handleCopyLink", () => {
isReadOnly={false}
surveyDomain={surveyDomain}
user={dummyUser}
responseCount={5}
/>
);
@@ -117,6 +128,7 @@ describe("SurveyAnalysisCTA - handleCopyLink", () => {
isReadOnly={false}
surveyDomain={surveyDomain}
user={dummyUser}
responseCount={5}
/>
);
@@ -130,3 +142,225 @@ describe("SurveyAnalysisCTA - handleCopyLink", () => {
});
});
});
// New tests for squarePenIcon and edit functionality
describe("SurveyAnalysisCTA - Edit functionality", () => {
beforeEach(() => {
vi.resetAllMocks();
});
afterEach(() => {
cleanup();
});
test("opens EditPublicSurveyAlertDialog when edit icon is clicked and response count > 0", async () => {
render(
<SurveyAnalysisCTA
survey={dummySurvey}
environment={dummyEnvironment}
isReadOnly={false}
surveyDomain={surveyDomain}
user={dummyUser}
responseCount={5}
/>
);
// Find the edit button
const editButton = screen.getByRole("button", { name: "common.edit" });
await fireEvent.click(editButton);
// Check if dialog is shown
const dialogTitle = screen.getByText("environments.surveys.edit.caution_edit_published_survey");
expect(dialogTitle).toBeInTheDocument();
});
test("navigates directly to edit page when response count = 0", async () => {
render(
<SurveyAnalysisCTA
survey={dummySurvey}
environment={dummyEnvironment}
isReadOnly={false}
surveyDomain={surveyDomain}
user={dummyUser}
responseCount={0}
/>
);
// Find the edit button
const editButton = screen.getByRole("button", { name: "common.edit" });
await fireEvent.click(editButton);
// Should navigate directly to edit page
expect(mockPush).toHaveBeenCalledWith(
`/environments/${dummyEnvironment.id}/surveys/${dummySurvey.id}/edit`
);
});
test("doesn't show edit button when isReadOnly is true", () => {
render(
<SurveyAnalysisCTA
survey={dummySurvey}
environment={dummyEnvironment}
isReadOnly={true}
surveyDomain={surveyDomain}
user={dummyUser}
responseCount={5}
/>
);
// Try to find the edit button (it shouldn't exist)
const editButton = screen.queryByRole("button", { name: "common.edit" });
expect(editButton).not.toBeInTheDocument();
});
});
// Updated test description to mention EditPublicSurveyAlertDialog
describe("SurveyAnalysisCTA - duplicateSurveyAndRoute and EditPublicSurveyAlertDialog", () => {
afterEach(() => {
cleanup();
});
test("duplicates survey successfully and navigates to edit page", async () => {
// Mock the API response
mockCopySurveyToOtherEnvironmentAction.mockResolvedValueOnce({
data: { id: "duplicated-survey-456" },
});
render(
<SurveyAnalysisCTA
survey={dummySurvey}
environment={dummyEnvironment}
isReadOnly={false}
surveyDomain={surveyDomain}
user={dummyUser}
responseCount={5}
/>
);
// Find and click the edit button to show dialog
const editButton = screen.getByRole("button", { name: "common.edit" });
await fireEvent.click(editButton);
// Find and click the duplicate button in dialog
const duplicateButton = screen.getByRole("button", {
name: "environments.surveys.edit.caution_edit_duplicate",
});
await fireEvent.click(duplicateButton);
// Verify the API was called with correct parameters
expect(mockCopySurveyToOtherEnvironmentAction).toHaveBeenCalledWith({
environmentId: dummyEnvironment.id,
surveyId: dummySurvey.id,
targetEnvironmentId: dummyEnvironment.id,
});
// Verify success toast was shown
expect(toast.success).toHaveBeenCalledWith("environments.surveys.survey_duplicated_successfully");
// Verify navigation to edit page
expect(mockPush).toHaveBeenCalledWith(
`/environments/${dummyEnvironment.id}/surveys/duplicated-survey-456/edit`
);
});
test("shows error toast when duplication fails with error object", async () => {
// Mock API failure with error object
mockCopySurveyToOtherEnvironmentAction.mockResolvedValueOnce({
error: "Test error message",
});
render(
<SurveyAnalysisCTA
survey={dummySurvey}
environment={dummyEnvironment}
isReadOnly={false}
surveyDomain={surveyDomain}
user={dummyUser}
responseCount={5}
/>
);
// Open dialog
const editButton = screen.getByRole("button", { name: "common.edit" });
await fireEvent.click(editButton);
// Click duplicate
const duplicateButton = screen.getByRole("button", {
name: "environments.surveys.edit.caution_edit_duplicate",
});
await fireEvent.click(duplicateButton);
// Verify error toast
expect(toast.error).toHaveBeenCalledWith("Test error message");
});
test("navigates to edit page when cancel button is clicked in dialog", async () => {
render(
<SurveyAnalysisCTA
survey={dummySurvey}
environment={dummyEnvironment}
isReadOnly={false}
surveyDomain={surveyDomain}
user={dummyUser}
responseCount={5}
/>
);
// Open dialog
const editButton = screen.getByRole("button", { name: "common.edit" });
await fireEvent.click(editButton);
// Click edit (cancel) button
const editButtonInDialog = screen.getByRole("button", { name: "common.edit" });
await fireEvent.click(editButtonInDialog);
// Verify navigation
expect(mockPush).toHaveBeenCalledWith(
`/environments/${dummyEnvironment.id}/surveys/${dummySurvey.id}/edit`
);
});
test("shows loading state when duplicating survey", async () => {
// Create a promise that we can resolve manually
let resolvePromise: (value: any) => void;
const promise = new Promise((resolve) => {
resolvePromise = resolve;
});
mockCopySurveyToOtherEnvironmentAction.mockImplementation(() => promise);
render(
<SurveyAnalysisCTA
survey={dummySurvey}
environment={dummyEnvironment}
isReadOnly={false}
surveyDomain={surveyDomain}
user={dummyUser}
responseCount={5}
/>
);
// Open dialog
const editButton = screen.getByRole("button", { name: "common.edit" });
await fireEvent.click(editButton);
// Click duplicate
const duplicateButton = screen.getByRole("button", {
name: "environments.surveys.edit.caution_edit_duplicate",
});
await fireEvent.click(duplicateButton);
// Button should now be in loading state
// expect(duplicateButton).toHaveAttribute("data-state", "loading");
// Resolve the promise
resolvePromise!({
data: { id: "duplicated-survey-456" },
});
// Wait for the promise to resolve
await waitFor(() => {
expect(mockPush).toHaveBeenCalled();
});
});
});

View File

@@ -1,88 +0,0 @@
import { cache } from "@/lib/cache";
import { documentCache } from "@/lib/cache/document";
import { INSIGHTS_PER_PAGE } from "@/lib/constants";
import { validateInputs } from "@/lib/utils/validate";
import { Prisma } from "@prisma/client";
import { cache as reactCache } from "react";
import { prisma } from "@formbricks/database";
import { ZId } from "@formbricks/types/common";
import { DatabaseError } from "@formbricks/types/errors";
import {
TSurveyQuestionId,
TSurveyQuestionSummaryOpenText,
ZSurveyQuestionId,
} from "@formbricks/types/surveys/types";
export const getInsightsBySurveyIdQuestionId = reactCache(
async (
surveyId: string,
questionId: TSurveyQuestionId,
insightResponsesIds: string[],
limit?: number,
offset?: number
): Promise<TSurveyQuestionSummaryOpenText["insights"]> =>
cache(
async () => {
validateInputs([surveyId, ZId], [questionId, ZSurveyQuestionId]);
limit = limit ?? INSIGHTS_PER_PAGE;
try {
const insights = await prisma.insight.findMany({
where: {
documentInsights: {
some: {
document: {
surveyId,
questionId,
...(insightResponsesIds.length > 0 && {
responseId: {
in: insightResponsesIds,
},
}),
},
},
},
},
include: {
_count: {
select: {
documentInsights: {
where: {
document: {
surveyId,
questionId,
},
},
},
},
},
},
orderBy: [
{
documentInsights: {
_count: "desc",
},
},
{
createdAt: "desc",
},
],
take: limit ? limit : undefined,
skip: offset ? offset : undefined,
});
return insights;
} catch (error) {
if (error instanceof Prisma.PrismaClientKnownRequestError) {
throw new DatabaseError(error.message);
}
throw error;
}
},
[`getInsightsBySurveyIdQuestionId-${surveyId}-${questionId}-${limit}-${offset}`],
{
tags: [documentCache.tag.bySurveyId(surveyId)],
}
)()
);

View File

@@ -1,5 +1,4 @@
import "server-only";
import { getInsightsBySurveyIdQuestionId } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/lib/insights";
import { cache } from "@/lib/cache";
import { RESPONSES_PER_PAGE } from "@/lib/constants";
import { displayCache } from "@/lib/display/cache";
@@ -317,11 +316,9 @@ export const getQuestionSummary = async (
switch (question.type) {
case TSurveyQuestionTypeEnum.OpenText: {
let values: TSurveyQuestionSummaryOpenText["samples"] = [];
const insightResponsesIds: string[] = [];
responses.forEach((response) => {
const answer = response.data[question.id];
if (answer && typeof answer === "string") {
insightResponsesIds.push(response.id);
values.push({
id: response.id,
updatedAt: response.updatedAt,
@@ -331,20 +328,12 @@ export const getQuestionSummary = async (
});
}
});
const insights = await getInsightsBySurveyIdQuestionId(
survey.id,
question.id,
insightResponsesIds,
50
);
summary.push({
type: question.type,
question,
responseCount: values.length,
samples: values.slice(0, VALUES_LIMIT),
insights,
insightsEnabled: question.insightsEnabled,
});
values = [];

View File

@@ -38,12 +38,3 @@ export const constructToastMessage = (
});
}
};
export const needsInsightsGeneration = (survey: TSurvey): boolean => {
const openTextQuestions = survey.questions.filter((question) => question.type === "openText");
const questionWithoutInsightsEnabled = openTextQuestions.some(
(question) => question.type === "openText" && typeof question.insightsEnabled === "undefined"
);
return openTextQuestions.length > 0 && questionWithoutInsightsEnabled;
};

View File

@@ -1,19 +1,11 @@
import { SurveyAnalysisNavigation } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/components/SurveyAnalysisNavigation";
import { EnableInsightsBanner } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/EnableInsightsBanner";
import { SummaryPage } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SummaryPage";
import { SurveyAnalysisCTA } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SurveyAnalysisCTA";
import { needsInsightsGeneration } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/lib/utils";
import {
DEFAULT_LOCALE,
DOCUMENTS_PER_PAGE,
MAX_RESPONSES_FOR_INSIGHT_GENERATION,
WEBAPP_URL,
} from "@/lib/constants";
import { DEFAULT_LOCALE, DOCUMENTS_PER_PAGE, WEBAPP_URL } from "@/lib/constants";
import { getSurveyDomain } from "@/lib/getSurveyUrl";
import { getResponseCountBySurveyId } from "@/lib/response/service";
import { getSurvey } from "@/lib/survey/service";
import { getUser } from "@/lib/user/service";
import { getIsAIEnabled } from "@/modules/ee/license-check/lib/utils";
import { getEnvironmentAuth } from "@/modules/environments/lib/utils";
import { PageContentWrapper } from "@/modules/ui/components/page-content-wrapper";
import { PageHeader } from "@/modules/ui/components/page-header";
@@ -25,7 +17,7 @@ const SurveyPage = async (props: { params: Promise<{ environmentId: string; surv
const params = await props.params;
const t = await getTranslate();
const { session, environment, organization, isReadOnly } = await getEnvironmentAuth(params.environmentId);
const { session, environment, isReadOnly } = await getEnvironmentAuth(params.environmentId);
const surveyId = params.surveyId;
@@ -50,11 +42,6 @@ const SurveyPage = async (props: { params: Promise<{ environmentId: string; surv
// I took this out cause it's cloud only right?
// const { active: isEnterpriseEdition } = await getEnterpriseLicense();
const isAIEnabled = await getIsAIEnabled({
isAIEnabled: organization.isAIEnabled,
billing: organization.billing,
});
const shouldGenerateInsights = needsInsightsGeneration(survey);
const surveyDomain = getSurveyDomain();
return (
@@ -68,15 +55,9 @@ const SurveyPage = async (props: { params: Promise<{ environmentId: string; surv
isReadOnly={isReadOnly}
user={user}
surveyDomain={surveyDomain}
responseCount={totalResponseCount}
/>
}>
{isAIEnabled && shouldGenerateInsights && (
<EnableInsightsBanner
surveyId={survey.id}
surveyResponseCount={totalResponseCount}
maxResponseCount={MAX_RESPONSES_FOR_INSIGHT_GENERATION}
/>
)}
<SurveyAnalysisNavigation
environmentId={environment.id}
survey={survey}
@@ -91,7 +72,6 @@ const SurveyPage = async (props: { params: Promise<{ environmentId: string; surv
webAppUrl={WEBAPP_URL}
user={user}
totalResponseCount={totalResponseCount}
isAIEnabled={isAIEnabled}
documentsPerPage={DOCUMENTS_PER_PAGE}
isReadOnly={isReadOnly}
locale={user.locale ?? DEFAULT_LOCALE}

View File

@@ -9,6 +9,7 @@ import { checkAuthorizationUpdated } from "@/lib/utils/action-client-middleware"
import { getOrganizationIdFromSurveyId, getProjectIdFromSurveyId } from "@/lib/utils/helper";
import { checkMultiLanguagePermission } from "@/modules/ee/multi-language-surveys/lib/actions";
import { getSurveyFollowUpsPermission } from "@/modules/survey/follow-ups/lib/utils";
import { checkSpamProtectionPermission } from "@/modules/survey/lib/permission";
import { z } from "zod";
import { ZId } from "@formbricks/types/common";
import { OperationNotAllowedError, ResourceNotFoundError } from "@formbricks/types/errors";
@@ -123,6 +124,10 @@ export const updateSurveyAction = authenticatedActionClient
const { followUps } = parsedInput;
if (parsedInput.recaptcha?.enabled) {
await checkSpamProtectionPermission(organizationId);
}
if (followUps?.length) {
await checkSurveyFollowUpsPermission(organizationId);
}

View File

@@ -1,84 +0,0 @@
import { embeddingsModel, llmModel } from "@/lib/aiModels";
import { documentCache } from "@/lib/cache/document";
import { validateInputs } from "@/lib/utils/validate";
import { Prisma } from "@prisma/client";
import { embed, generateObject } from "ai";
import { z } from "zod";
import { prisma } from "@formbricks/database";
import {
TDocument,
TDocumentCreateInput,
TGenerateDocumentObjectSchema,
ZDocumentCreateInput,
ZGenerateDocumentObjectSchema,
} from "@formbricks/types/documents";
import { DatabaseError } from "@formbricks/types/errors";
export type TCreatedDocument = TDocument & {
isSpam: boolean;
insights: TGenerateDocumentObjectSchema["insights"];
};
export const createDocument = async (
surveyName: string,
documentInput: TDocumentCreateInput
): Promise<TCreatedDocument> => {
validateInputs([surveyName, z.string()], [documentInput, ZDocumentCreateInput]);
try {
// Generate text embedding
const { embedding } = await embed({
model: embeddingsModel,
value: documentInput.text,
experimental_telemetry: { isEnabled: true },
});
// generate sentiment and insights
const { object } = await generateObject({
model: llmModel,
schema: ZGenerateDocumentObjectSchema,
system: `You are an XM researcher. You analyse a survey response (survey name, question headline & user answer) and generate insights from it. The insight title (1-3 words) should concisely answer the question, e.g., "What type of people do you think would most benefit" -> "Developers". You are very objective. For the insights, split the feedback into the smallest parts possible and only use the feedback itself to draw conclusions. You must output at least one insight. Always generate insights and titles in English, regardless of the input language.`,
prompt: `Survey: ${surveyName}\n${documentInput.text}`,
temperature: 0,
experimental_telemetry: { isEnabled: true },
});
const sentiment = object.sentiment;
const isSpam = object.isSpam;
// create document
const prismaDocument = await prisma.document.create({
data: {
...documentInput,
sentiment,
isSpam,
},
});
const document = {
...prismaDocument,
vector: embedding,
};
// update document vector with the embedding
const vectorString = `[${embedding.join(",")}]`;
await prisma.$executeRaw`
UPDATE "Document"
SET "vector" = ${vectorString}::vector(512)
WHERE "id" = ${document.id};
`;
documentCache.revalidate({
id: document.id,
responseId: document.responseId,
questionId: document.questionId,
});
return { ...document, insights: object.insights, isSpam };
} catch (error) {
if (error instanceof Prisma.PrismaClientKnownRequestError) {
throw new DatabaseError(error.message);
}
throw error;
}
};

View File

@@ -1,430 +0,0 @@
import { createDocument } from "@/app/api/(internal)/insights/lib/document";
import { doesResponseHasAnyOpenTextAnswer } from "@/app/api/(internal)/insights/lib/utils";
import { embeddingsModel } from "@/lib/aiModels";
import { documentCache } from "@/lib/cache/document";
import { insightCache } from "@/lib/cache/insight";
import { getPromptText } from "@/lib/utils/ai";
import { parseRecallInfo } from "@/lib/utils/recall";
import { validateInputs } from "@/lib/utils/validate";
import { Insight, InsightCategory, Prisma } from "@prisma/client";
import { embed } from "ai";
import { prisma } from "@formbricks/database";
import { ZId } from "@formbricks/types/common";
import { TCreatedDocument } from "@formbricks/types/documents";
import { DatabaseError } from "@formbricks/types/errors";
import {
TSurvey,
TSurveyQuestionId,
TSurveyQuestionTypeEnum,
ZSurveyQuestions,
} from "@formbricks/types/surveys/types";
import { TInsightCreateInput, TNearestInsights, ZInsightCreateInput } from "./types";
export const generateInsightsForSurveyResponsesConcept = async (
survey: Pick<TSurvey, "id" | "name" | "environmentId" | "questions">
): Promise<void> => {
const { id: surveyId, name, environmentId, questions } = survey;
validateInputs([surveyId, ZId], [environmentId, ZId], [questions, ZSurveyQuestions]);
try {
const openTextQuestionsWithInsights = questions.filter(
(question) => question.type === TSurveyQuestionTypeEnum.OpenText && question.insightsEnabled
);
const openTextQuestionIds = openTextQuestionsWithInsights.map((question) => question.id);
if (openTextQuestionIds.length === 0) {
return;
}
// Fetching responses
const batchSize = 200;
let skip = 0;
let rateLimit: number | undefined;
const spillover: { responseId: string; questionId: string; text: string }[] = [];
let allResponsesProcessed = false;
// Fetch the rate limit once, if not already set
if (rateLimit === undefined) {
const { rawResponse } = await embed({
model: embeddingsModel,
value: "Test",
experimental_telemetry: { isEnabled: true },
});
const rateLimitHeader = rawResponse?.headers?.["x-ratelimit-remaining-requests"];
rateLimit = rateLimitHeader ? parseInt(rateLimitHeader, 10) : undefined;
}
while (!allResponsesProcessed || spillover.length > 0) {
// If there are any spillover documents from the previous iteration, prioritize them
let answersForDocumentCreation = [...spillover];
spillover.length = 0; // Empty the spillover array after moving contents
// Fetch new responses only if spillover is empty
if (answersForDocumentCreation.length === 0 && !allResponsesProcessed) {
const responses = await prisma.response.findMany({
where: {
surveyId,
documents: {
none: {},
},
finished: true,
},
select: {
id: true,
data: true,
variables: true,
contactId: true,
language: true,
},
take: batchSize,
skip,
});
if (
responses.length === 0 ||
(responses.length < batchSize && rateLimit && responses.length < rateLimit)
) {
allResponsesProcessed = true; // Mark as finished when no more responses are found
}
const responsesWithOpenTextAnswers = responses.filter((response) =>
doesResponseHasAnyOpenTextAnswer(openTextQuestionIds, response.data)
);
skip += batchSize - responsesWithOpenTextAnswers.length;
const answersForDocumentCreationPromises = await Promise.all(
responsesWithOpenTextAnswers.map(async (response) => {
const responseEntries = openTextQuestionsWithInsights.map((question) => {
const responseText = response.data[question.id] as string;
if (!responseText) {
return;
}
const headline = parseRecallInfo(
question.headline[response.language ?? "default"],
response.data,
response.variables
);
const text = getPromptText(headline, responseText);
return {
responseId: response.id,
questionId: question.id,
text,
};
});
return responseEntries;
})
);
const answersForDocumentCreationResult = answersForDocumentCreationPromises.flat();
answersForDocumentCreationResult.forEach((answer) => {
if (answer) {
answersForDocumentCreation.push(answer);
}
});
}
// Process documents only up to the rate limit
if (rateLimit !== undefined && rateLimit < answersForDocumentCreation.length) {
// Push excess documents to the spillover array
spillover.push(...answersForDocumentCreation.slice(rateLimit));
answersForDocumentCreation = answersForDocumentCreation.slice(0, rateLimit);
}
const createDocumentPromises = answersForDocumentCreation.map((answer) => {
return createDocument(name, {
environmentId,
surveyId,
responseId: answer.responseId,
questionId: answer.questionId,
text: answer.text,
});
});
const createDocumentResults = await Promise.allSettled(createDocumentPromises);
const fullfilledCreateDocumentResults = createDocumentResults.filter(
(result) => result.status === "fulfilled"
) as PromiseFulfilledResult<TCreatedDocument>[];
const createdDocuments = fullfilledCreateDocumentResults.filter(Boolean).map((result) => result.value);
for (const document of createdDocuments) {
if (document) {
const insightPromises: Promise<void>[] = [];
const { insights, isSpam, id, environmentId } = document;
if (!isSpam) {
for (const insight of insights) {
if (typeof insight.title !== "string" || typeof insight.description !== "string") {
throw new Error("Insight title and description must be a string");
}
// Create or connect the insight
insightPromises.push(handleInsightAssignments(environmentId, id, insight));
}
await Promise.allSettled(insightPromises);
}
}
}
documentCache.revalidate({
environmentId: environmentId,
surveyId: surveyId,
});
}
return;
} catch (error) {
if (error instanceof Prisma.PrismaClientKnownRequestError) {
throw new DatabaseError(error.message);
}
throw error;
}
};
export const generateInsightsForSurveyResponses = async (
survey: Pick<TSurvey, "id" | "name" | "environmentId" | "questions">
): Promise<void> => {
const { id: surveyId, name, environmentId, questions } = survey;
validateInputs([surveyId, ZId], [environmentId, ZId], [questions, ZSurveyQuestions]);
try {
const openTextQuestionsWithInsights = questions.filter(
(question) => question.type === TSurveyQuestionTypeEnum.OpenText && question.insightsEnabled
);
const openTextQuestionIds = openTextQuestionsWithInsights.map((question) => question.id);
if (openTextQuestionIds.length === 0) {
return;
}
// Fetching responses
const batchSize = 200;
let skip = 0;
const totalResponseCount = await prisma.response.count({
where: {
surveyId,
documents: {
none: {},
},
finished: true,
},
});
const pages = Math.ceil(totalResponseCount / batchSize);
for (let i = 0; i < pages; i++) {
const responses = await prisma.response.findMany({
where: {
surveyId,
documents: {
none: {},
},
finished: true,
},
select: {
id: true,
data: true,
variables: true,
contactId: true,
language: true,
},
take: batchSize,
skip,
});
const responsesWithOpenTextAnswers = responses.filter((response) =>
doesResponseHasAnyOpenTextAnswer(openTextQuestionIds, response.data)
);
skip += batchSize - responsesWithOpenTextAnswers.length;
const createDocumentPromises: Promise<TCreatedDocument | undefined>[] = [];
for (const response of responsesWithOpenTextAnswers) {
for (const question of openTextQuestionsWithInsights) {
const responseText = response.data[question.id] as string;
if (!responseText) {
continue;
}
const headline = parseRecallInfo(
question.headline[response.language ?? "default"],
response.data,
response.variables
);
const text = getPromptText(headline, responseText);
const createDocumentPromise = createDocument(name, {
environmentId,
surveyId,
responseId: response.id,
questionId: question.id,
text,
});
createDocumentPromises.push(createDocumentPromise);
}
}
const createdDocuments = (await Promise.all(createDocumentPromises)).filter(
Boolean
) as TCreatedDocument[];
for (const document of createdDocuments) {
if (document) {
const insightPromises: Promise<void>[] = [];
const { insights, isSpam, id, environmentId } = document;
if (!isSpam) {
for (const insight of insights) {
if (typeof insight.title !== "string" || typeof insight.description !== "string") {
throw new Error("Insight title and description must be a string");
}
// create or connect the insight
insightPromises.push(handleInsightAssignments(environmentId, id, insight));
}
await Promise.all(insightPromises);
}
}
}
documentCache.revalidate({
environmentId: environmentId,
surveyId: surveyId,
});
}
} catch (error) {
if (error instanceof Prisma.PrismaClientKnownRequestError) {
throw new DatabaseError(error.message);
}
throw error;
}
};
export const getQuestionResponseReferenceId = (surveyId: string, questionId: TSurveyQuestionId) => {
return `${surveyId}-${questionId}`;
};
export const createInsight = async (insightGroupInput: TInsightCreateInput): Promise<Insight> => {
validateInputs([insightGroupInput, ZInsightCreateInput]);
try {
// create document
const { vector, ...data } = insightGroupInput;
const insight = await prisma.insight.create({
data,
});
// update document vector with the embedding
const vectorString = `[${insightGroupInput.vector.join(",")}]`;
await prisma.$executeRaw`
UPDATE "Insight"
SET "vector" = ${vectorString}::vector(512)
WHERE "id" = ${insight.id};
`;
insightCache.revalidate({
id: insight.id,
environmentId: insight.environmentId,
});
return insight;
} catch (error) {
if (error instanceof Prisma.PrismaClientKnownRequestError) {
throw new DatabaseError(error.message);
}
throw error;
}
};
export const handleInsightAssignments = async (
environmentId: string,
documentId: string,
insight: {
title: string;
description: string;
category: InsightCategory;
}
) => {
try {
// create embedding for insight
const { embedding } = await embed({
model: embeddingsModel,
value: getInsightVectorText(insight.title, insight.description),
experimental_telemetry: { isEnabled: true },
});
// find close insight to merge it with
const nearestInsights = await findNearestInsights(environmentId, embedding, 1, 0.2);
if (nearestInsights.length > 0) {
// create a documentInsight with this insight
await prisma.documentInsight.create({
data: {
documentId,
insightId: nearestInsights[0].id,
},
});
documentCache.revalidate({
insightId: nearestInsights[0].id,
});
} else {
// create new insight and documentInsight
const newInsight = await createInsight({
environmentId: environmentId,
title: insight.title,
description: insight.description,
category: insight.category ?? "other",
vector: embedding,
});
// create a documentInsight with this insight
await prisma.documentInsight.create({
data: {
documentId,
insightId: newInsight.id,
},
});
documentCache.revalidate({
insightId: newInsight.id,
});
}
} catch (error) {
throw error;
}
};
export const findNearestInsights = async (
environmentId: string,
vector: number[],
limit: number = 5,
threshold: number = 0.5
): Promise<TNearestInsights[]> => {
validateInputs([environmentId, ZId]);
// Convert the embedding array to a JSON-like string representation
const vectorString = `[${vector.join(",")}]`;
// Execute raw SQL query to find nearest neighbors and exclude the vector column
const insights: TNearestInsights[] = await prisma.$queryRaw`
SELECT
id
FROM "Insight" d
WHERE d."environmentId" = ${environmentId}
AND d."vector" <=> ${vectorString}::vector(512) <= ${threshold}
ORDER BY d."vector" <=> ${vectorString}::vector(512)
LIMIT ${limit};
`;
return insights;
};
export const getInsightVectorText = (title: string, description: string): string =>
`${title}: ${description}`;

View File

@@ -1,16 +0,0 @@
import { Insight } from "@prisma/client";
import { z } from "zod";
import { ZInsight } from "@formbricks/database/zod/insights";
export const ZInsightCreateInput = ZInsight.pick({
environmentId: true,
title: true,
description: true,
category: true,
}).extend({
vector: z.array(z.number()).length(512),
});
export type TInsightCreateInput = z.infer<typeof ZInsightCreateInput>;
export type TNearestInsights = Pick<Insight, "id">;

View File

@@ -1,390 +0,0 @@
import { CRON_SECRET, WEBAPP_URL } from "@/lib/constants";
import { getSurvey, updateSurvey } from "@/lib/survey/service";
import { mockSurveyOutput } from "@/lib/survey/tests/__mock__/survey.mock";
import { doesSurveyHasOpenTextQuestion } from "@/lib/survey/utils";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { ResourceNotFoundError } from "@formbricks/types/errors";
import { TSurvey, TSurveyQuestionTypeEnum } from "@formbricks/types/surveys/types";
import {
doesResponseHasAnyOpenTextAnswer,
generateInsightsEnabledForSurveyQuestions,
generateInsightsForSurvey,
} from "./utils";
// Mock all dependencies
vi.mock("@/lib/constants", () => ({
CRON_SECRET: vi.fn(() => "mocked-cron-secret"),
WEBAPP_URL: "https://mocked-webapp-url.com",
}));
vi.mock("@/lib/survey/cache", () => ({
surveyCache: {
revalidate: vi.fn(),
},
}));
vi.mock("@/lib/survey/service", () => ({
getSurvey: vi.fn(),
updateSurvey: vi.fn(),
}));
vi.mock("@/lib/survey/utils", () => ({
doesSurveyHasOpenTextQuestion: vi.fn(),
}));
vi.mock("@/lib/utils/validate", () => ({
validateInputs: vi.fn(),
}));
// Mock global fetch
const mockFetch = vi.fn();
global.fetch = mockFetch;
describe("Insights Utils", () => {
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(() => {
vi.clearAllMocks();
});
describe("generateInsightsForSurvey", () => {
test("should call fetch with correct parameters", () => {
const surveyId = "survey-123";
mockFetch.mockResolvedValueOnce({ ok: true });
generateInsightsForSurvey(surveyId);
expect(mockFetch).toHaveBeenCalledWith(`${WEBAPP_URL}/api/insights`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"x-api-key": CRON_SECRET,
},
body: JSON.stringify({
surveyId,
}),
});
});
test("should handle errors and return error object", () => {
const surveyId = "survey-123";
mockFetch.mockImplementationOnce(() => {
throw new Error("Network error");
});
const result = generateInsightsForSurvey(surveyId);
expect(result).toEqual({
ok: false,
error: new Error("Error while generating insights for survey: Network error"),
});
});
test("should throw error if CRON_SECRET is not set", async () => {
// Reset modules to ensure clean state
vi.resetModules();
// Mock CRON_SECRET as undefined
vi.doMock("@/lib/constants", () => ({
CRON_SECRET: undefined,
WEBAPP_URL: "https://mocked-webapp-url.com",
}));
// Re-import the utils module to get the mocked CRON_SECRET
const { generateInsightsForSurvey } = await import("./utils");
expect(() => generateInsightsForSurvey("survey-123")).toThrow("CRON_SECRET is not set");
// Reset modules after test
vi.resetModules();
});
});
describe("generateInsightsEnabledForSurveyQuestions", () => {
test("should return success=false when survey has no open text questions", async () => {
// Mock data
const surveyId = "survey-123";
const mockSurvey: TSurvey = {
...mockSurveyOutput,
type: "link",
segment: null,
displayPercentage: null,
questions: [
{
id: "cm8cjnse3000009jxf20v91ic",
type: TSurveyQuestionTypeEnum.MultipleChoiceSingle,
headline: { default: "Question 1" },
required: true,
choices: [
{
id: "cm8cjnse3000009jxf20v91ic",
label: { default: "Choice 1" },
},
],
},
{
id: "cm8cjo19c000109jx6znygc0u",
type: TSurveyQuestionTypeEnum.Rating,
headline: { default: "Question 2" },
required: true,
scale: "number",
range: 5,
isColorCodingEnabled: false,
},
],
};
// Setup mocks
vi.mocked(getSurvey).mockResolvedValueOnce(mockSurvey);
vi.mocked(doesSurveyHasOpenTextQuestion).mockReturnValueOnce(false);
// Execute function
const result = await generateInsightsEnabledForSurveyQuestions(surveyId);
// Verify results
expect(result).toEqual({ success: false });
expect(updateSurvey).not.toHaveBeenCalled();
});
test("should return success=true when survey is updated with insights enabled", async () => {
vi.clearAllMocks();
// Mock data
const surveyId = "cm8ckvchx000008lb710n0gdn";
// Mock survey with open text questions that have no insightsEnabled property
const mockSurveyWithOpenTextQuestions: TSurvey = {
...mockSurveyOutput,
id: surveyId,
type: "link",
segment: null,
displayPercentage: null,
questions: [
{
id: "cm8cjnse3000009jxf20v91ic",
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: "Question 1" },
required: true,
inputType: "text",
charLimit: {},
},
{
id: "cm8cjo19c000109jx6znygc0u",
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: "Question 2" },
required: true,
inputType: "text",
charLimit: {},
},
],
};
// Define the updated survey that should be returned after updateSurvey
const mockUpdatedSurveyWithOpenTextQuestions: TSurvey = {
...mockSurveyWithOpenTextQuestions,
questions: mockSurveyWithOpenTextQuestions.questions.map((q) => ({
...q,
insightsEnabled: true, // Updated property
})),
};
// Setup mocks
vi.mocked(getSurvey).mockResolvedValueOnce(mockSurveyWithOpenTextQuestions);
vi.mocked(doesSurveyHasOpenTextQuestion).mockReturnValueOnce(true);
vi.mocked(updateSurvey).mockResolvedValueOnce(mockUpdatedSurveyWithOpenTextQuestions);
// Execute function
const result = await generateInsightsEnabledForSurveyQuestions(surveyId);
expect(result).toEqual({
success: true,
survey: mockUpdatedSurveyWithOpenTextQuestions,
});
});
test("should return success=false when all open text questions already have insightsEnabled defined", async () => {
// Mock data
const surveyId = "survey-123";
const mockSurvey: TSurvey = {
...mockSurveyOutput,
type: "link",
segment: null,
displayPercentage: null,
questions: [
{
id: "cm8cjnse3000009jxf20v91ic",
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: "Question 1" },
required: true,
inputType: "text",
charLimit: {},
insightsEnabled: true,
},
{
id: "cm8cjo19c000109jx6znygc0u",
type: TSurveyQuestionTypeEnum.MultipleChoiceSingle,
headline: { default: "Question 2" },
required: true,
choices: [
{
id: "cm8cjnse3000009jxf20v91ic",
label: { default: "Choice 1" },
},
],
},
],
};
// Setup mocks
vi.mocked(getSurvey).mockResolvedValueOnce(mockSurvey);
vi.mocked(doesSurveyHasOpenTextQuestion).mockReturnValueOnce(true);
// Execute function
const result = await generateInsightsEnabledForSurveyQuestions(surveyId);
// Verify results
expect(result).toEqual({ success: false });
expect(updateSurvey).not.toHaveBeenCalled();
});
test("should throw ResourceNotFoundError if survey is not found", async () => {
// Setup mocks
vi.mocked(getSurvey).mockResolvedValueOnce(null);
// Execute and verify function
await expect(generateInsightsEnabledForSurveyQuestions("survey-123")).rejects.toThrow(
new ResourceNotFoundError("Survey", "survey-123")
);
});
test("should throw ResourceNotFoundError if updateSurvey returns null", async () => {
// Mock data
const surveyId = "survey-123";
const mockSurvey: TSurvey = {
...mockSurveyOutput,
type: "link",
segment: null,
displayPercentage: null,
questions: [
{
id: "cm8cjnse3000009jxf20v91ic",
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: "Question 1" },
required: true,
inputType: "text",
charLimit: {},
},
],
};
// Setup mocks
vi.mocked(getSurvey).mockResolvedValueOnce(mockSurvey);
vi.mocked(doesSurveyHasOpenTextQuestion).mockReturnValueOnce(true);
// Type assertion to handle the null case
vi.mocked(updateSurvey).mockResolvedValueOnce(null as unknown as TSurvey);
// Execute and verify function
await expect(generateInsightsEnabledForSurveyQuestions(surveyId)).rejects.toThrow(
new ResourceNotFoundError("Survey", surveyId)
);
});
test("should return success=false when no questions have insights enabled after update", async () => {
// Mock data
const surveyId = "survey-123";
const mockSurvey: TSurvey = {
...mockSurveyOutput,
type: "link",
segment: null,
displayPercentage: null,
questions: [
{
id: "cm8cjnse3000009jxf20v91ic",
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: "Question 1" },
required: true,
inputType: "text",
charLimit: {},
insightsEnabled: false,
},
],
};
// Setup mocks
vi.mocked(getSurvey).mockResolvedValueOnce(mockSurvey);
vi.mocked(doesSurveyHasOpenTextQuestion).mockReturnValueOnce(true);
vi.mocked(updateSurvey).mockResolvedValueOnce(mockSurvey);
// Execute function
const result = await generateInsightsEnabledForSurveyQuestions(surveyId);
// Verify results
expect(result).toEqual({ success: false });
});
test("should propagate any errors that occur", async () => {
// Setup mocks
const testError = new Error("Test error");
vi.mocked(getSurvey).mockRejectedValueOnce(testError);
// Execute and verify function
await expect(generateInsightsEnabledForSurveyQuestions("survey-123")).rejects.toThrow(testError);
});
});
describe("doesResponseHasAnyOpenTextAnswer", () => {
test("should return true when at least one open text question has an answer", () => {
const openTextQuestionIds = ["q1", "q2", "q3"];
const response = {
q1: "",
q2: "This is an answer",
q3: "",
q4: "This is not an open text answer",
};
const result = doesResponseHasAnyOpenTextAnswer(openTextQuestionIds, response);
expect(result).toBe(true);
});
test("should return false when no open text questions have answers", () => {
const openTextQuestionIds = ["q1", "q2", "q3"];
const response = {
q1: "",
q2: "",
q3: "",
q4: "This is not an open text answer",
};
const result = doesResponseHasAnyOpenTextAnswer(openTextQuestionIds, response);
expect(result).toBe(false);
});
test("should return false when response does not contain any open text question IDs", () => {
const openTextQuestionIds = ["q1", "q2", "q3"];
const response = {
q4: "This is not an open text answer",
q5: "Another answer",
};
const result = doesResponseHasAnyOpenTextAnswer(openTextQuestionIds, response);
expect(result).toBe(false);
});
test("should return false for non-string answers", () => {
const openTextQuestionIds = ["q1", "q2", "q3"];
const response = {
q1: "",
q2: 123,
q3: true,
} as any; // Use type assertion to handle mixed types in the test
const result = doesResponseHasAnyOpenTextAnswer(openTextQuestionIds, response);
expect(result).toBe(false);
});
});
});

View File

@@ -1,101 +0,0 @@
import "server-only";
import { CRON_SECRET, WEBAPP_URL } from "@/lib/constants";
import { surveyCache } from "@/lib/survey/cache";
import { getSurvey, updateSurvey } from "@/lib/survey/service";
import { doesSurveyHasOpenTextQuestion } from "@/lib/survey/utils";
import { validateInputs } from "@/lib/utils/validate";
import { logger } from "@formbricks/logger";
import { ZId } from "@formbricks/types/common";
import { ResourceNotFoundError } from "@formbricks/types/errors";
import { TResponse } from "@formbricks/types/responses";
import { TSurvey } from "@formbricks/types/surveys/types";
export const generateInsightsForSurvey = (surveyId: string) => {
if (!CRON_SECRET) {
throw new Error("CRON_SECRET is not set");
}
try {
return fetch(`${WEBAPP_URL}/api/insights`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"x-api-key": CRON_SECRET,
},
body: JSON.stringify({
surveyId,
}),
});
} catch (error) {
return {
ok: false,
error: new Error(`Error while generating insights for survey: ${error.message}`),
};
}
};
export const generateInsightsEnabledForSurveyQuestions = async (
surveyId: string
): Promise<
| {
success: false;
}
| {
success: true;
survey: Pick<TSurvey, "id" | "name" | "environmentId" | "questions">;
}
> => {
validateInputs([surveyId, ZId]);
try {
const survey = await getSurvey(surveyId);
if (!survey) {
throw new ResourceNotFoundError("Survey", surveyId);
}
if (!doesSurveyHasOpenTextQuestion(survey.questions)) {
return { success: false };
}
const openTextQuestions = survey.questions.filter((question) => question.type === "openText");
const openTextQuestionsWithoutInsightsEnabled = openTextQuestions.filter(
(question) => question.type === "openText" && typeof question.insightsEnabled === "undefined"
);
if (openTextQuestionsWithoutInsightsEnabled.length === 0) {
return { success: false };
}
const updatedSurvey = await updateSurvey(survey);
if (!updatedSurvey) {
throw new ResourceNotFoundError("Survey", surveyId);
}
const doesSurveyHasInsightsEnabledQuestion = updatedSurvey.questions.some(
(question) => question.type === "openText" && question.insightsEnabled === true
);
surveyCache.revalidate({ id: surveyId, environmentId: survey.environmentId });
if (doesSurveyHasInsightsEnabledQuestion) {
return { success: true, survey: updatedSurvey };
}
return { success: false };
} catch (error) {
logger.error(error, "Error generating insights for surveys");
throw error;
}
};
export const doesResponseHasAnyOpenTextAnswer = (
openTextQuestionIds: string[],
response: TResponse["data"]
): boolean => {
return openTextQuestionIds.some((questionId) => {
const answer = response[questionId];
return typeof answer === "string" && answer.length > 0;
});
};

View File

@@ -1,51 +0,0 @@
// This function can run for a maximum of 300 seconds
import { generateInsightsForSurveyResponsesConcept } from "@/app/api/(internal)/insights/lib/insights";
import { responses } from "@/app/lib/api/response";
import { transformErrorToDetails } from "@/app/lib/api/validator";
import { CRON_SECRET } from "@/lib/constants";
import { headers } from "next/headers";
import { z } from "zod";
import { logger } from "@formbricks/logger";
import { generateInsightsEnabledForSurveyQuestions } from "./lib/utils";
export const maxDuration = 300; // This function can run for a maximum of 300 seconds
const ZGenerateInsightsInput = z.object({
surveyId: z.string(),
});
export const POST = async (request: Request) => {
try {
const requestHeaders = await headers();
// Check authentication
if (requestHeaders.get("x-api-key") !== CRON_SECRET) {
return responses.notAuthenticatedResponse();
}
const jsonInput = await request.json();
const inputValidation = ZGenerateInsightsInput.safeParse(jsonInput);
if (!inputValidation.success) {
logger.error({ error: inputValidation.error, url: request.url }, "Error in POST /api/insights");
return responses.badRequestResponse(
"Fields are missing or incorrectly formatted",
transformErrorToDetails(inputValidation.error),
true
);
}
const { surveyId } = inputValidation.data;
const data = await generateInsightsEnabledForSurveyQuestions(surveyId);
if (!data.success) {
return responses.successResponse({ message: "No insights enabled questions found" });
}
await generateInsightsForSurveyResponsesConcept(data.survey);
return responses.successResponse({ message: "Insights generated successfully" });
} catch (error) {
throw error;
}
};

View File

@@ -1,107 +0,0 @@
import { handleInsightAssignments } from "@/app/api/(internal)/insights/lib/insights";
import { embeddingsModel, llmModel } from "@/lib/aiModels";
import { documentCache } from "@/lib/cache/document";
import { validateInputs } from "@/lib/utils/validate";
import { Prisma } from "@prisma/client";
import { embed, generateObject } from "ai";
import { z } from "zod";
import { prisma } from "@formbricks/database";
import { ZInsight } from "@formbricks/database/zod/insights";
import {
TDocument,
TDocumentCreateInput,
ZDocumentCreateInput,
ZDocumentSentiment,
} from "@formbricks/types/documents";
import { DatabaseError } from "@formbricks/types/errors";
export const createDocumentAndAssignInsight = async (
surveyName: string,
documentInput: TDocumentCreateInput
): Promise<TDocument> => {
validateInputs([surveyName, z.string()], [documentInput, ZDocumentCreateInput]);
try {
// Generate text embedding
const { embedding } = await embed({
model: embeddingsModel,
value: documentInput.text,
experimental_telemetry: { isEnabled: true },
});
// generate sentiment and insights
const { object } = await generateObject({
model: llmModel,
schema: z.object({
sentiment: ZDocumentSentiment,
insights: z.array(
z.object({
title: z.string().describe("insight title, very specific"),
description: z.string().describe("very brief insight description"),
category: ZInsight.shape.category,
})
),
isSpam: z.boolean(),
}),
system: `You are an XM researcher. You analyse a survey response (survey name, question headline & user answer) and generate insights from it. The insight title (1-3 words) should concisely answer the question, e.g., "What type of people do you think would most benefit" -> "Developers". You are very objective. For the insights, split the feedback into the smallest parts possible and only use the feedback itself to draw conclusions. You must output at least one insight. Always generate insights and titles in English, regardless of the input language.`,
prompt: `Survey: ${surveyName}\n${documentInput.text}`,
temperature: 0,
experimental_telemetry: { isEnabled: true },
});
const sentiment = object.sentiment;
const isSpam = object.isSpam;
const insights = object.insights;
// create document
const prismaDocument = await prisma.document.create({
data: {
...documentInput,
sentiment,
isSpam,
},
});
const document = {
...prismaDocument,
vector: embedding,
};
// update document vector with the embedding
const vectorString = `[${embedding.join(",")}]`;
await prisma.$executeRaw`
UPDATE "Document"
SET "vector" = ${vectorString}::vector(512)
WHERE "id" = ${document.id};
`;
// connect or create the insights
const insightPromises: Promise<void>[] = [];
if (!isSpam) {
for (const insight of insights) {
if (typeof insight.title !== "string" || typeof insight.description !== "string") {
throw new Error("Insight title and description must be a string");
}
// create or connect the insight
insightPromises.push(handleInsightAssignments(documentInput.environmentId, document.id, insight));
}
await Promise.allSettled(insightPromises);
}
documentCache.revalidate({
id: document.id,
environmentId: document.environmentId,
surveyId: document.surveyId,
responseId: document.responseId,
questionId: document.questionId,
});
return document;
} catch (error) {
if (error instanceof Prisma.PrismaClientKnownRequestError) {
throw new DatabaseError(error.message);
}
throw error;
}
};

View File

@@ -156,6 +156,7 @@ export const mockSurvey: TSurvey = {
isVerifyEmailEnabled: false,
isSingleResponsePerEmailEnabled: false,
isBackButtonHidden: false,
recaptcha: null,
projectOverwrites: null,
styling: null,
surveyClosedMessage: null,

View File

@@ -1,19 +1,15 @@
import { createDocumentAndAssignInsight } from "@/app/api/(internal)/pipeline/lib/documents";
import { sendSurveyFollowUps } from "@/app/api/(internal)/pipeline/lib/survey-follow-up";
import { ZPipelineInput } from "@/app/api/(internal)/pipeline/types/pipelines";
import { responses } from "@/app/lib/api/response";
import { transformErrorToDetails } from "@/app/lib/api/validator";
import { cache } from "@/lib/cache";
import { webhookCache } from "@/lib/cache/webhook";
import { CRON_SECRET, IS_AI_CONFIGURED } from "@/lib/constants";
import { CRON_SECRET } from "@/lib/constants";
import { getIntegrations } from "@/lib/integration/service";
import { getOrganizationByEnvironmentId } from "@/lib/organization/service";
import { getResponseCountBySurveyId } from "@/lib/response/service";
import { getSurvey, updateSurvey } from "@/lib/survey/service";
import { convertDatesInObject } from "@/lib/time";
import { getPromptText } from "@/lib/utils/ai";
import { parseRecallInfo } from "@/lib/utils/recall";
import { getIsAIEnabled } from "@/modules/ee/license-check/lib/utils";
import { sendResponseFinishedEmail } from "@/modules/email";
import { getSurveyFollowUpsPermission } from "@/modules/survey/follow-ups/lib/utils";
import { PipelineTriggers, Webhook } from "@prisma/client";
@@ -199,50 +195,6 @@ export const POST = async (request: Request) => {
logger.error({ error: result.reason, url: request.url }, "Promise rejected");
}
});
// generate embeddings for all open text question responses for all paid plans
const hasSurveyOpenTextQuestions = survey.questions.some((question) => question.type === "openText");
if (hasSurveyOpenTextQuestions) {
const isAICofigured = IS_AI_CONFIGURED;
if (hasSurveyOpenTextQuestions && isAICofigured) {
const isAIEnabled = await getIsAIEnabled({
isAIEnabled: organization.isAIEnabled,
billing: organization.billing,
});
if (isAIEnabled) {
for (const question of survey.questions) {
if (question.type === "openText" && question.insightsEnabled) {
const isQuestionAnswered =
response.data[question.id] !== undefined && response.data[question.id] !== "";
if (!isQuestionAnswered) {
continue;
}
const headline = parseRecallInfo(
question.headline[response.language ?? "default"],
response.data,
response.variables
);
const text = getPromptText(headline, response.data[question.id] as string);
// TODO: check if subheadline gives more context and better embeddings
try {
await createDocumentAndAssignInsight(survey.name, {
environmentId,
surveyId,
responseId: response.id,
questionId: question.id,
text,
});
} catch (e) {
logger.error({ error: e, url: request.url }, "Error creating document and assigning insight");
}
}
}
}
}
}
} else {
// Await webhook promises if no emails are sent (with allSettled to prevent early rejection)
const results = await Promise.allSettled(webhookPromises);

View File

@@ -0,0 +1,276 @@
import { convertResponseValue } from "@/lib/responses";
import { cleanup } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { TSurvey, TSurveyQuestion } from "@formbricks/types/surveys/types";
import {
TWeeklyEmailResponseData,
TWeeklySummaryEnvironmentData,
TWeeklySummarySurveyData,
} from "@formbricks/types/weekly-summary";
import { getNotificationResponse } from "./notificationResponse";
vi.mock("@/lib/responses", () => ({
convertResponseValue: vi.fn(),
}));
vi.mock("@/lib/utils/recall", () => ({
replaceHeadlineRecall: vi.fn((survey) => survey),
}));
describe("getNotificationResponse", () => {
afterEach(() => {
cleanup();
});
test("should return a notification response with calculated insights and survey data when provided with an environment containing multiple surveys", () => {
const mockSurveys = [
{
id: "survey1",
name: "Survey 1",
status: "inProgress",
questions: [
{
id: "question1",
headline: { default: "Question 1" },
type: "text",
} as unknown as TSurveyQuestion,
],
displays: [{ id: "display1" }],
responses: [
{ id: "response1", finished: true, data: { question1: "Answer 1" } },
{ id: "response2", finished: false, data: { question1: "Answer 2" } },
],
} as unknown as TSurvey & { responses: TWeeklyEmailResponseData[] },
{
id: "survey2",
name: "Survey 2",
status: "inProgress",
questions: [
{
id: "question2",
headline: { default: "Question 2" },
type: "text",
} as unknown as TSurveyQuestion,
],
displays: [{ id: "display2" }],
responses: [
{ id: "response3", finished: true, data: { question2: "Answer 3" } },
{ id: "response4", finished: true, data: { question2: "Answer 4" } },
{ id: "response5", finished: false, data: { question2: "Answer 5" } },
],
} as unknown as TSurvey & { responses: TWeeklyEmailResponseData[] },
] as unknown as TWeeklySummarySurveyData[];
const mockEnvironment = {
id: "env1",
surveys: mockSurveys,
} as unknown as TWeeklySummaryEnvironmentData;
const projectName = "Project Name";
const notificationResponse = getNotificationResponse(mockEnvironment, projectName);
expect(notificationResponse).toBeDefined();
expect(notificationResponse.environmentId).toBe("env1");
expect(notificationResponse.projectName).toBe(projectName);
expect(notificationResponse.surveys).toHaveLength(2);
expect(notificationResponse.insights.totalCompletedResponses).toBe(3);
expect(notificationResponse.insights.totalDisplays).toBe(2);
expect(notificationResponse.insights.totalResponses).toBe(5);
expect(notificationResponse.insights.completionRate).toBe(60);
expect(notificationResponse.insights.numLiveSurvey).toBe(2);
expect(notificationResponse.surveys[0].id).toBe("survey1");
expect(notificationResponse.surveys[0].name).toBe("Survey 1");
expect(notificationResponse.surveys[0].status).toBe("inProgress");
expect(notificationResponse.surveys[0].responseCount).toBe(2);
expect(notificationResponse.surveys[1].id).toBe("survey2");
expect(notificationResponse.surveys[1].name).toBe("Survey 2");
expect(notificationResponse.surveys[1].status).toBe("inProgress");
expect(notificationResponse.surveys[1].responseCount).toBe(3);
});
test("should calculate the correct completion rate and other insights when surveys have responses with varying statuses", () => {
const mockSurveys = [
{
id: "survey1",
name: "Survey 1",
status: "inProgress",
questions: [
{
id: "question1",
headline: { default: "Question 1" },
type: "text",
} as unknown as TSurveyQuestion,
],
displays: [{ id: "display1" }],
responses: [
{ id: "response1", finished: true, data: { question1: "Answer 1" } },
{ id: "response2", finished: false, data: { question1: "Answer 2" } },
],
} as unknown as TSurvey & { responses: TWeeklyEmailResponseData[] },
{
id: "survey2",
name: "Survey 2",
status: "inProgress",
questions: [
{
id: "question2",
headline: { default: "Question 2" },
type: "text",
} as unknown as TSurveyQuestion,
],
displays: [{ id: "display2" }],
responses: [
{ id: "response3", finished: true, data: { question2: "Answer 3" } },
{ id: "response4", finished: true, data: { question2: "Answer 4" } },
{ id: "response5", finished: false, data: { question2: "Answer 5" } },
],
} as unknown as TSurvey & { responses: TWeeklyEmailResponseData[] },
{
id: "survey3",
name: "Survey 3",
status: "inProgress",
questions: [
{
id: "question3",
headline: { default: "Question 3" },
type: "text",
} as unknown as TSurveyQuestion,
],
displays: [{ id: "display3" }],
responses: [{ id: "response6", finished: false, data: { question3: "Answer 6" } }],
} as unknown as TSurvey & { responses: TWeeklyEmailResponseData[] },
] as unknown as TWeeklySummarySurveyData[];
const mockEnvironment = {
id: "env1",
surveys: mockSurveys,
} as unknown as TWeeklySummaryEnvironmentData;
const projectName = "Project Name";
const notificationResponse = getNotificationResponse(mockEnvironment, projectName);
expect(notificationResponse).toBeDefined();
expect(notificationResponse.environmentId).toBe("env1");
expect(notificationResponse.projectName).toBe(projectName);
expect(notificationResponse.surveys).toHaveLength(3);
expect(notificationResponse.insights.totalCompletedResponses).toBe(3);
expect(notificationResponse.insights.totalDisplays).toBe(3);
expect(notificationResponse.insights.totalResponses).toBe(6);
expect(notificationResponse.insights.completionRate).toBe(50);
expect(notificationResponse.insights.numLiveSurvey).toBe(3);
expect(notificationResponse.surveys[0].id).toBe("survey1");
expect(notificationResponse.surveys[0].name).toBe("Survey 1");
expect(notificationResponse.surveys[0].status).toBe("inProgress");
expect(notificationResponse.surveys[0].responseCount).toBe(2);
expect(notificationResponse.surveys[1].id).toBe("survey2");
expect(notificationResponse.surveys[1].name).toBe("Survey 2");
expect(notificationResponse.surveys[1].status).toBe("inProgress");
expect(notificationResponse.surveys[1].responseCount).toBe(3);
expect(notificationResponse.surveys[2].id).toBe("survey3");
expect(notificationResponse.surveys[2].name).toBe("Survey 3");
expect(notificationResponse.surveys[2].status).toBe("inProgress");
expect(notificationResponse.surveys[2].responseCount).toBe(1);
});
test("should return default insights and an empty surveys array when the environment contains no surveys", () => {
const mockEnvironment = {
id: "env1",
surveys: [],
} as unknown as TWeeklySummaryEnvironmentData;
const projectName = "Project Name";
const notificationResponse = getNotificationResponse(mockEnvironment, projectName);
expect(notificationResponse).toBeDefined();
expect(notificationResponse.environmentId).toBe("env1");
expect(notificationResponse.projectName).toBe(projectName);
expect(notificationResponse.surveys).toHaveLength(0);
expect(notificationResponse.insights.totalCompletedResponses).toBe(0);
expect(notificationResponse.insights.totalDisplays).toBe(0);
expect(notificationResponse.insights.totalResponses).toBe(0);
expect(notificationResponse.insights.completionRate).toBe(0);
expect(notificationResponse.insights.numLiveSurvey).toBe(0);
});
test("should handle missing response data gracefully when a response doesn't contain data for a question ID", () => {
const mockSurveys = [
{
id: "survey1",
name: "Survey 1",
status: "inProgress",
questions: [
{
id: "question1",
headline: { default: "Question 1" },
type: "text",
} as unknown as TSurveyQuestion,
],
displays: [{ id: "display1" }],
responses: [
{ id: "response1", finished: true, data: {} }, // Response missing data for question1
],
} as unknown as TSurvey & { responses: TWeeklyEmailResponseData[] },
] as unknown as TWeeklySummarySurveyData[];
const mockEnvironment = {
id: "env1",
surveys: mockSurveys,
} as unknown as TWeeklySummaryEnvironmentData;
const projectName = "Project Name";
// Mock the convertResponseValue function to handle the missing data case
vi.mocked(convertResponseValue).mockReturnValue("");
const notificationResponse = getNotificationResponse(mockEnvironment, projectName);
expect(notificationResponse).toBeDefined();
expect(notificationResponse.surveys).toHaveLength(1);
expect(notificationResponse.surveys[0].responses).toHaveLength(1);
expect(notificationResponse.surveys[0].responses[0].responseValue).toBe("");
});
test("should handle unsupported question types gracefully", () => {
const mockSurveys = [
{
id: "survey1",
name: "Survey 1",
status: "inProgress",
questions: [
{
id: "question1",
headline: { default: "Question 1" },
type: "unsupported",
} as unknown as TSurveyQuestion,
],
displays: [{ id: "display1" }],
responses: [{ id: "response1", finished: true, data: { question1: "Answer 1" } }],
} as unknown as TSurvey & { responses: TWeeklyEmailResponseData[] },
] as unknown as TWeeklySummarySurveyData[];
const mockEnvironment = {
id: "env1",
surveys: mockSurveys,
} as unknown as TWeeklySummaryEnvironmentData;
const projectName = "Project Name";
vi.mocked(convertResponseValue).mockReturnValue("Unsupported Response");
const notificationResponse = getNotificationResponse(mockEnvironment, projectName);
expect(notificationResponse).toBeDefined();
expect(notificationResponse.surveys[0].responses[0].responseValue).toBe("Unsupported Response");
});
});

View File

@@ -0,0 +1,48 @@
import { cleanup } from "@testing-library/react";
import { afterEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { getOrganizationIds } from "./organization";
vi.mock("@formbricks/database", () => ({
prisma: {
organization: {
findMany: vi.fn(),
},
},
}));
describe("Organization", () => {
afterEach(() => {
cleanup();
});
test("getOrganizationIds should return an array of organization IDs when the database contains multiple organizations", async () => {
const mockOrganizations = [{ id: "org1" }, { id: "org2" }, { id: "org3" }];
vi.mocked(prisma.organization.findMany).mockResolvedValue(mockOrganizations);
const organizationIds = await getOrganizationIds();
expect(organizationIds).toEqual(["org1", "org2", "org3"]);
expect(prisma.organization.findMany).toHaveBeenCalledTimes(1);
expect(prisma.organization.findMany).toHaveBeenCalledWith({
select: {
id: true,
},
});
});
test("getOrganizationIds should return an empty array when the database contains no organizations", async () => {
vi.mocked(prisma.organization.findMany).mockResolvedValue([]);
const organizationIds = await getOrganizationIds();
expect(organizationIds).toEqual([]);
expect(prisma.organization.findMany).toHaveBeenCalledTimes(1);
expect(prisma.organization.findMany).toHaveBeenCalledWith({
select: {
id: true,
},
});
});
});

View File

@@ -0,0 +1,570 @@
import { cleanup } from "@testing-library/react";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { getProjectsByOrganizationId } from "./project";
const mockProjects = [
{
id: "project1",
name: "Project 1",
environments: [
{
id: "env1",
type: "production",
surveys: [],
attributeKeys: [],
},
],
organization: {
memberships: [
{
user: {
id: "user1",
email: "test@example.com",
notificationSettings: {
weeklySummary: {
project1: true,
},
},
locale: "en",
},
},
],
},
},
];
const sevenDaysAgo = new Date();
sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 6); // Set to 6 days ago to be within the last 7 days
const mockProjectsWithNoEnvironments = [
{
id: "project3",
name: "Project 3",
environments: [],
organization: {
memberships: [
{
user: {
id: "user1",
email: "test@example.com",
notificationSettings: {
weeklySummary: {
project3: true,
},
},
locale: "en",
},
},
],
},
},
];
vi.mock("@formbricks/database", () => ({
prisma: {
project: {
findMany: vi.fn(),
},
},
}));
describe("Project Management", () => {
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(() => {
cleanup();
});
describe("getProjectsByOrganizationId", () => {
test("retrieves projects with environments, surveys, and organization memberships for a valid organization ID", async () => {
vi.mocked(prisma.project.findMany).mockResolvedValueOnce(mockProjects);
const organizationId = "testOrgId";
const projects = await getProjectsByOrganizationId(organizationId);
expect(projects).toEqual(mockProjects);
expect(prisma.project.findMany).toHaveBeenCalledWith({
where: {
organizationId: organizationId,
},
select: {
id: true,
name: true,
environments: {
where: {
type: "production",
},
select: {
id: true,
surveys: {
where: {
NOT: {
AND: [
{ status: "completed" },
{
responses: {
none: {
createdAt: {
gte: expect.any(Date),
},
},
},
},
],
},
status: {
not: "draft",
},
},
select: {
id: true,
name: true,
questions: true,
status: true,
responses: {
where: {
createdAt: {
gte: expect.any(Date),
},
},
select: {
id: true,
createdAt: true,
updatedAt: true,
finished: true,
data: true,
},
orderBy: {
createdAt: "desc",
},
},
displays: {
where: {
createdAt: {
gte: expect.any(Date),
},
},
select: {
id: true,
},
},
hiddenFields: true,
},
},
attributeKeys: {
select: {
id: true,
createdAt: true,
updatedAt: true,
name: true,
description: true,
type: true,
environmentId: true,
key: true,
isUnique: true,
},
},
},
},
organization: {
select: {
memberships: {
select: {
user: {
select: {
id: true,
email: true,
notificationSettings: true,
locale: true,
},
},
},
},
},
},
},
});
});
test("handles date calculations correctly across DST boundaries", async () => {
const mockDate = new Date(2024, 10, 3, 0, 0, 0); // November 3, 2024, 00:00:00 (example DST boundary)
const sevenDaysAgo = new Date(mockDate);
sevenDaysAgo.setDate(mockDate.getDate() - 7);
vi.useFakeTimers();
vi.setSystemTime(mockDate);
vi.mocked(prisma.project.findMany).mockResolvedValueOnce(mockProjects);
const organizationId = "testOrgId";
await getProjectsByOrganizationId(organizationId);
expect(prisma.project.findMany).toHaveBeenCalledWith(
expect.objectContaining({
where: {
organizationId: organizationId,
},
select: expect.objectContaining({
environments: expect.objectContaining({
select: expect.objectContaining({
surveys: expect.objectContaining({
where: expect.objectContaining({
NOT: expect.objectContaining({
AND: expect.arrayContaining([
expect.objectContaining({ status: "completed" }),
expect.objectContaining({
responses: expect.objectContaining({
none: expect.objectContaining({
createdAt: expect.objectContaining({
gte: sevenDaysAgo,
}),
}),
}),
}),
]),
}),
}),
}),
}),
}),
}),
})
);
vi.useRealTimers();
});
test("includes surveys with 'completed' status but responses within the last 7 days", async () => {
vi.mocked(prisma.project.findMany).mockResolvedValueOnce(mockProjects);
const organizationId = "testOrgId";
const projects = await getProjectsByOrganizationId(organizationId);
expect(projects).toEqual(mockProjects);
expect(prisma.project.findMany).toHaveBeenCalledWith({
where: {
organizationId: organizationId,
},
select: {
id: true,
name: true,
environments: {
where: {
type: "production",
},
select: {
id: true,
surveys: {
where: {
NOT: {
AND: [
{ status: "completed" },
{
responses: {
none: {
createdAt: {
gte: expect.any(Date),
},
},
},
},
],
},
status: {
not: "draft",
},
},
select: {
id: true,
name: true,
questions: true,
status: true,
responses: {
where: {
createdAt: {
gte: expect.any(Date),
},
},
select: {
id: true,
createdAt: true,
updatedAt: true,
finished: true,
data: true,
},
orderBy: {
createdAt: "desc",
},
},
displays: {
where: {
createdAt: {
gte: expect.any(Date),
},
},
select: {
id: true,
},
},
hiddenFields: true,
},
},
attributeKeys: {
select: {
id: true,
createdAt: true,
updatedAt: true,
name: true,
description: true,
type: true,
environmentId: true,
key: true,
isUnique: true,
},
},
},
},
organization: {
select: {
memberships: {
select: {
user: {
select: {
id: true,
email: true,
notificationSettings: true,
locale: true,
},
},
},
},
},
},
},
});
});
test("returns an empty array when an invalid organization ID is provided", async () => {
vi.mocked(prisma.project.findMany).mockResolvedValueOnce([]);
const invalidOrganizationId = "invalidOrgId";
const projects = await getProjectsByOrganizationId(invalidOrganizationId);
expect(projects).toEqual([]);
expect(prisma.project.findMany).toHaveBeenCalledWith({
where: {
organizationId: invalidOrganizationId,
},
select: {
id: true,
name: true,
environments: {
where: {
type: "production",
},
select: {
id: true,
surveys: {
where: {
NOT: {
AND: [
{ status: "completed" },
{
responses: {
none: {
createdAt: {
gte: expect.any(Date),
},
},
},
},
],
},
status: {
not: "draft",
},
},
select: {
id: true,
name: true,
questions: true,
status: true,
responses: {
where: {
createdAt: {
gte: expect.any(Date),
},
},
select: {
id: true,
createdAt: true,
updatedAt: true,
finished: true,
data: true,
},
orderBy: {
createdAt: "desc",
},
},
displays: {
where: {
createdAt: {
gte: expect.any(Date),
},
},
select: {
id: true,
},
},
hiddenFields: true,
},
},
attributeKeys: {
select: {
id: true,
createdAt: true,
updatedAt: true,
name: true,
description: true,
type: true,
environmentId: true,
key: true,
isUnique: true,
},
},
},
},
organization: {
select: {
memberships: {
select: {
user: {
select: {
id: true,
email: true,
notificationSettings: true,
locale: true,
},
},
},
},
},
},
},
});
});
test("handles projects with no environments", async () => {
vi.mocked(prisma.project.findMany).mockResolvedValueOnce(mockProjectsWithNoEnvironments);
const organizationId = "testOrgId";
const projects = await getProjectsByOrganizationId(organizationId);
expect(projects).toEqual(mockProjectsWithNoEnvironments);
expect(prisma.project.findMany).toHaveBeenCalledWith({
where: {
organizationId: organizationId,
},
select: {
id: true,
name: true,
environments: {
where: {
type: "production",
},
select: {
id: true,
surveys: {
where: {
NOT: {
AND: [
{ status: "completed" },
{
responses: {
none: {
createdAt: {
gte: expect.any(Date),
},
},
},
},
],
},
status: {
not: "draft",
},
},
select: {
id: true,
name: true,
questions: true,
status: true,
responses: {
where: {
createdAt: {
gte: expect.any(Date),
},
},
select: {
id: true,
createdAt: true,
updatedAt: true,
finished: true,
data: true,
},
orderBy: {
createdAt: "desc",
},
},
displays: {
where: {
createdAt: {
gte: expect.any(Date),
},
},
select: {
id: true,
},
},
hiddenFields: true,
},
},
attributeKeys: {
select: {
id: true,
createdAt: true,
updatedAt: true,
name: true,
description: true,
type: true,
environmentId: true,
key: true,
isUnique: true,
},
},
},
},
organization: {
select: {
memberships: {
select: {
user: {
select: {
id: true,
email: true,
notificationSettings: true,
locale: true,
},
},
},
},
},
},
},
});
});
});
});

View File

@@ -0,0 +1,99 @@
import { cache } from "@/lib/cache";
import { TContact } from "@/modules/ee/contacts/types/contact";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { getContactByUserId } from "./contact";
// Mock prisma
vi.mock("@formbricks/database", () => ({
prisma: {
contact: {
findFirst: vi.fn(),
},
},
}));
// Mock cache\
vi.mock("@/lib/cache", async () => {
const actual = await vi.importActual("@/lib/cache");
return {
...(actual as any),
cache: vi.fn((fn) => fn()), // Mock cache function to just execute the passed function
};
});
const environmentId = "test-environment-id";
const userId = "test-user-id";
const contactId = "test-contact-id";
const contactMock: Partial<TContact> & {
attributes: { value: string; attributeKey: { key: string } }[];
} = {
id: contactId,
attributes: [
{ attributeKey: { key: "userId" }, value: userId },
{ attributeKey: { key: "email" }, value: "test@example.com" },
],
};
describe("getContactByUserId", () => {
beforeEach(() => {
vi.mocked(cache).mockImplementation((fn) => async () => {
return fn();
});
});
afterEach(() => {
vi.resetAllMocks();
});
test("should return contact if found", async () => {
vi.mocked(prisma.contact.findFirst).mockResolvedValue(contactMock as any);
const contact = await getContactByUserId(environmentId, userId);
expect(prisma.contact.findFirst).toHaveBeenCalledWith({
where: {
attributes: {
some: {
attributeKey: {
key: "userId",
environmentId,
},
value: userId,
},
},
},
select: {
id: true,
attributes: { select: { attributeKey: { select: { key: true } }, value: true } },
},
});
expect(contact).toEqual(contactMock);
});
test("should return null if contact not found", async () => {
vi.mocked(prisma.contact.findFirst).mockResolvedValue(null);
const contact = await getContactByUserId(environmentId, userId);
expect(prisma.contact.findFirst).toHaveBeenCalledWith({
where: {
attributes: {
some: {
attributeKey: {
key: "userId",
environmentId,
},
value: userId,
},
},
},
select: {
id: true,
attributes: { select: { attributeKey: { select: { key: true } }, value: true } },
},
});
expect(contact).toBeNull();
});
});

View File

@@ -0,0 +1,309 @@
import { cache } from "@/lib/cache";
import { getProjectByEnvironmentId } from "@/lib/project/service";
import { getSurveys } from "@/lib/survey/service";
import { anySurveyHasFilters } from "@/lib/survey/utils";
import { diffInDays } from "@/lib/utils/datetime";
import { evaluateSegment } from "@/modules/ee/contacts/segments/lib/segments";
import { Prisma } from "@prisma/client";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { logger } from "@formbricks/logger";
import { DatabaseError } from "@formbricks/types/errors";
import { TProject } from "@formbricks/types/project";
import { TSegment } from "@formbricks/types/segment";
import { TSurvey } from "@formbricks/types/surveys/types";
import { getSyncSurveys } from "./survey";
// Mock dependencies
vi.mock("@/lib/cache", async () => {
const actual = await vi.importActual("@/lib/cache");
return {
...(actual as any),
cache: vi.fn((fn) => fn()), // Mock cache function to just execute the passed function
};
});
vi.mock("@/lib/project/service", () => ({
getProjectByEnvironmentId: vi.fn(),
}));
vi.mock("@/lib/survey/service", () => ({
getSurveys: vi.fn(),
}));
vi.mock("@/lib/survey/utils", () => ({
anySurveyHasFilters: vi.fn(),
}));
vi.mock("@/lib/utils/datetime", () => ({
diffInDays: vi.fn(),
}));
vi.mock("@/lib/utils/validate", () => ({
validateInputs: vi.fn(),
}));
vi.mock("@/modules/ee/contacts/segments/lib/segments", () => ({
evaluateSegment: vi.fn(),
}));
vi.mock("@formbricks/database", () => ({
prisma: {
display: {
findMany: vi.fn(),
},
response: {
findMany: vi.fn(),
},
},
}));
vi.mock("@formbricks/logger", () => ({
logger: {
error: vi.fn(),
},
}));
const environmentId = "test-env-id";
const contactId = "test-contact-id";
const contactAttributes = { userId: "user1", email: "test@example.com" };
const deviceType = "desktop";
const mockProject = {
id: "proj1",
name: "Test Project",
createdAt: new Date(),
updatedAt: new Date(),
organizationId: "org1",
environments: [],
recontactDays: 10,
inAppSurveyBranding: true,
linkSurveyBranding: true,
placement: "bottomRight",
clickOutsideClose: true,
darkOverlay: false,
languages: [],
} as unknown as TProject;
const baseSurvey: TSurvey = {
id: "survey1",
createdAt: new Date(),
updatedAt: new Date(),
name: "Test Survey 1",
environmentId: environmentId,
type: "app",
status: "inProgress",
questions: [],
displayOption: "displayOnce",
recontactDays: null,
autoClose: null,
closeOnDate: null,
delay: 0,
displayPercentage: null,
autoComplete: null,
segment: null,
surveyClosedMessage: null,
singleUse: null,
styling: null,
pin: null,
resultShareKey: null,
displayLimit: null,
welcomeCard: { enabled: false } as TSurvey["welcomeCard"],
endings: [],
triggers: [],
languages: [],
variables: [],
hiddenFields: { enabled: false },
createdBy: null,
isSingleResponsePerEmailEnabled: false,
isVerifyEmailEnabled: false,
projectOverwrites: null,
runOnDate: null,
showLanguageSwitch: false,
isBackButtonHidden: false,
followUps: [],
recaptcha: { enabled: false, threshold: 0.5 },
};
describe("getSyncSurveys", () => {
beforeEach(() => {
vi.mocked(cache).mockImplementation((fn) => async () => {
return fn();
});
vi.mocked(getProjectByEnvironmentId).mockResolvedValue(mockProject);
vi.mocked(prisma.display.findMany).mockResolvedValue([]);
vi.mocked(prisma.response.findMany).mockResolvedValue([]);
vi.mocked(anySurveyHasFilters).mockReturnValue(false);
vi.mocked(evaluateSegment).mockResolvedValue(true);
vi.mocked(diffInDays).mockReturnValue(100); // Assume enough days passed
});
afterEach(() => {
vi.resetAllMocks();
});
test("should throw error if product not found", async () => {
vi.mocked(getProjectByEnvironmentId).mockResolvedValue(null);
await expect(getSyncSurveys(environmentId, contactId, contactAttributes, deviceType)).rejects.toThrow(
"Product not found"
);
});
test("should return empty array if no surveys found", async () => {
vi.mocked(getSurveys).mockResolvedValue([]);
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result).toEqual([]);
});
test("should return empty array if no 'app' type surveys in progress", async () => {
const surveys: TSurvey[] = [
{ ...baseSurvey, id: "s1", type: "link", status: "inProgress" },
{ ...baseSurvey, id: "s2", type: "app", status: "paused" },
];
vi.mocked(getSurveys).mockResolvedValue(surveys);
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result).toEqual([]);
});
test("should filter by displayOption 'displayOnce'", async () => {
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", displayOption: "displayOnce" }];
vi.mocked(getSurveys).mockResolvedValue(surveys);
vi.mocked(prisma.display.findMany).mockResolvedValue([{ id: "d1", surveyId: "s1", contactId }]); // Already displayed
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result).toEqual([]);
vi.mocked(prisma.display.findMany).mockResolvedValue([]); // Not displayed yet
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result2).toEqual(surveys);
});
test("should filter by displayOption 'displayMultiple'", async () => {
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", displayOption: "displayMultiple" }];
vi.mocked(getSurveys).mockResolvedValue(surveys);
vi.mocked(prisma.response.findMany).mockResolvedValue([{ id: "r1", surveyId: "s1", contactId }]); // Already responded
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result).toEqual([]);
vi.mocked(prisma.response.findMany).mockResolvedValue([]); // Not responded yet
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result2).toEqual(surveys);
});
test("should filter by displayOption 'displaySome'", async () => {
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", displayOption: "displaySome", displayLimit: 2 }];
vi.mocked(getSurveys).mockResolvedValue(surveys);
vi.mocked(prisma.display.findMany).mockResolvedValue([
{ id: "d1", surveyId: "s1", contactId },
{ id: "d2", surveyId: "s1", contactId },
]); // Display limit reached
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result).toEqual([]);
vi.mocked(prisma.display.findMany).mockResolvedValue([{ id: "d1", surveyId: "s1", contactId }]); // Within limit
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result2).toEqual(surveys);
// Test with response already submitted
vi.mocked(prisma.response.findMany).mockResolvedValue([{ id: "r1", surveyId: "s1", contactId }]);
const result3 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result3).toEqual([]);
});
test("should not filter by displayOption 'respondMultiple'", async () => {
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", displayOption: "respondMultiple" }];
vi.mocked(getSurveys).mockResolvedValue(surveys);
vi.mocked(prisma.display.findMany).mockResolvedValue([{ id: "d1", surveyId: "s1", contactId }]);
vi.mocked(prisma.response.findMany).mockResolvedValue([{ id: "r1", surveyId: "s1", contactId }]);
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result).toEqual(surveys);
});
test("should filter by product recontactDays if survey recontactDays is null", async () => {
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", recontactDays: null }];
vi.mocked(getSurveys).mockResolvedValue(surveys);
const displayDate = new Date();
vi.mocked(prisma.display.findMany).mockResolvedValue([
{ id: "d1", surveyId: "s2", contactId, createdAt: displayDate }, // Display for another survey
]);
vi.mocked(diffInDays).mockReturnValue(5); // Not enough days passed (product.recontactDays = 10)
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result).toEqual([]);
expect(diffInDays).toHaveBeenCalledWith(expect.any(Date), displayDate);
vi.mocked(diffInDays).mockReturnValue(15); // Enough days passed
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result2).toEqual(surveys);
});
test("should return surveys if no segment filters exist", async () => {
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1" }];
vi.mocked(getSurveys).mockResolvedValue(surveys);
vi.mocked(anySurveyHasFilters).mockReturnValue(false);
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result).toEqual(surveys);
expect(evaluateSegment).not.toHaveBeenCalled();
});
test("should evaluate segment filters if they exist", async () => {
const segment = { id: "seg1", filters: [{}] } as TSegment; // Mock filter structure
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", segment }];
vi.mocked(getSurveys).mockResolvedValue(surveys);
vi.mocked(anySurveyHasFilters).mockReturnValue(true);
// Case 1: Segment evaluation matches
vi.mocked(evaluateSegment).mockResolvedValue(true);
const result1 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result1).toEqual(surveys);
expect(evaluateSegment).toHaveBeenCalledWith(
{
attributes: contactAttributes,
deviceType,
environmentId,
contactId,
userId: contactAttributes.userId,
},
segment.filters
);
// Case 2: Segment evaluation does not match
vi.mocked(evaluateSegment).mockResolvedValue(false);
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result2).toEqual([]);
});
test("should handle Prisma errors", async () => {
const prismaError = new Prisma.PrismaClientKnownRequestError("Test Prisma Error", {
code: "P2025",
clientVersion: "test",
});
vi.mocked(getSurveys).mockRejectedValue(prismaError);
await expect(getSyncSurveys(environmentId, contactId, contactAttributes, deviceType)).rejects.toThrow(
DatabaseError
);
expect(logger.error).toHaveBeenCalledWith(prismaError);
});
test("should handle general errors", async () => {
const generalError = new Error("Something went wrong");
vi.mocked(getSurveys).mockRejectedValue(generalError);
await expect(getSyncSurveys(environmentId, contactId, contactAttributes, deviceType)).rejects.toThrow(
generalError
);
});
test("should throw ResourceNotFoundError if resolved surveys are null after filtering", async () => {
const segment = { id: "seg1", filters: [{}] } as TSegment; // Mock filter structure
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", segment }];
vi.mocked(getSurveys).mockResolvedValue(surveys);
vi.mocked(anySurveyHasFilters).mockReturnValue(true);
vi.mocked(evaluateSegment).mockResolvedValue(false); // Ensure all surveys are filtered out
// This scenario is tricky to force directly as the code checks `if (!surveys)` before returning.
// However, if `Promise.all` somehow resolved to null/undefined (highly unlikely), it should throw.
// We can simulate this by mocking `Promise.all` if needed, but the current code structure makes this hard to test.
// Let's assume the filter logic works correctly and test the intended path.
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
expect(result).toEqual([]); // Expect empty array, not an error in this case.
});
});

View File

@@ -0,0 +1,247 @@
import { parseRecallInfo } from "@/lib/utils/recall";
import { describe, expect, test, vi } from "vitest";
import { TAttributes } from "@formbricks/types/attributes";
import { TLanguage } from "@formbricks/types/project";
import {
TSurvey,
TSurveyEnding,
TSurveyQuestion,
TSurveyQuestionTypeEnum,
} from "@formbricks/types/surveys/types";
import { replaceAttributeRecall } from "./utils";
vi.mock("@/lib/utils/recall", () => ({
parseRecallInfo: vi.fn((text, attributes) => {
const recallPattern = /recall:([a-zA-Z0-9_-]+)/;
const match = text.match(recallPattern);
if (match && match[1]) {
const recallKey = match[1];
const attributeValue = attributes[recallKey];
if (attributeValue !== undefined) {
return text.replace(recallPattern, `parsed-${attributeValue}`);
}
}
return text; // Return original text if no match or attribute not found
}),
}));
const baseSurvey: TSurvey = {
id: "survey1",
createdAt: new Date(),
updatedAt: new Date(),
name: "Test Survey",
environmentId: "env1",
type: "app",
status: "inProgress",
questions: [],
endings: [],
welcomeCard: { enabled: false } as TSurvey["welcomeCard"],
languages: [
{ language: { id: "lang1", code: "en" } as unknown as TLanguage, default: true, enabled: true },
],
triggers: [],
recontactDays: null,
displayLimit: null,
singleUse: null,
styling: null,
surveyClosedMessage: null,
hiddenFields: { enabled: false },
variables: [],
createdBy: null,
isSingleResponsePerEmailEnabled: false,
isVerifyEmailEnabled: false,
projectOverwrites: null,
runOnDate: null,
showLanguageSwitch: false,
isBackButtonHidden: false,
followUps: [],
recaptcha: { enabled: false, threshold: 0.5 },
displayOption: "displayOnce",
autoClose: null,
closeOnDate: null,
delay: 0,
displayPercentage: null,
autoComplete: null,
segment: null,
pin: null,
resultShareKey: null,
};
const attributes: TAttributes = {
name: "John Doe",
email: "john.doe@example.com",
plan: "premium",
};
describe("replaceAttributeRecall", () => {
test("should replace recall info in question headlines and subheaders", () => {
const surveyWithRecall: TSurvey = {
...baseSurvey,
questions: [
{
id: "q1",
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: "Hello recall:name!" },
subheader: { default: "Your email is recall:email" },
required: true,
buttonLabel: { default: "Next" },
placeholder: { default: "Type here..." },
longAnswer: false,
logic: [],
} as unknown as TSurveyQuestion,
],
};
const result = replaceAttributeRecall(surveyWithRecall, attributes);
expect(result.questions[0].headline.default).toBe("Hello parsed-John Doe!");
expect(result.questions[0].subheader?.default).toBe("Your email is parsed-john.doe@example.com");
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Hello recall:name!", attributes);
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Your email is recall:email", attributes);
});
test("should replace recall info in welcome card headline", () => {
const surveyWithRecall: TSurvey = {
...baseSurvey,
welcomeCard: {
enabled: true,
headline: { default: "Welcome, recall:name!" },
html: { default: "<p>Some content</p>" },
buttonLabel: { default: "Start" },
timeToFinish: false,
showResponseCount: false,
},
};
const result = replaceAttributeRecall(surveyWithRecall, attributes);
expect(result.welcomeCard.headline?.default).toBe("Welcome, parsed-John Doe!");
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Welcome, recall:name!", attributes);
});
test("should replace recall info in end screen headlines and subheaders", () => {
const surveyWithRecall: TSurvey = {
...baseSurvey,
endings: [
{
type: "endScreen",
headline: { default: "Thank you, recall:name!" },
subheader: { default: "Your plan: recall:plan" },
buttonLabel: { default: "Finish" },
buttonLink: "https://example.com",
} as unknown as TSurveyEnding,
],
};
const result = replaceAttributeRecall(surveyWithRecall, attributes);
expect(result.endings[0].type).toBe("endScreen");
if (result.endings[0].type === "endScreen") {
expect(result.endings[0].headline?.default).toBe("Thank you, parsed-John Doe!");
expect(result.endings[0].subheader?.default).toBe("Your plan: parsed-premium");
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Thank you, recall:name!", attributes);
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Your plan: recall:plan", attributes);
}
});
test("should handle multiple languages", () => {
const surveyMultiLang: TSurvey = {
...baseSurvey,
languages: [
{ language: { id: "lang1", code: "en" } as unknown as TLanguage, default: true, enabled: true },
{ language: { id: "lang2", code: "es" } as unknown as TLanguage, default: false, enabled: true },
],
questions: [
{
id: "q1",
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: "Hello recall:name!", es: "Hola recall:name!" },
required: true,
buttonLabel: { default: "Next", es: "Siguiente" },
placeholder: { default: "Type here...", es: "Escribe aquí..." },
longAnswer: false,
logic: [],
} as unknown as TSurveyQuestion,
],
};
const result = replaceAttributeRecall(surveyMultiLang, attributes);
expect(result.questions[0].headline.default).toBe("Hello parsed-John Doe!");
expect(result.questions[0].headline.es).toBe("Hola parsed-John Doe!");
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Hello recall:name!", attributes);
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Hola recall:name!", attributes);
});
test("should not replace if recall key is not in attributes", () => {
const surveyWithRecall: TSurvey = {
...baseSurvey,
questions: [
{
id: "q1",
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: "Your company: recall:company" },
required: true,
buttonLabel: { default: "Next" },
placeholder: { default: "Type here..." },
longAnswer: false,
logic: [],
} as unknown as TSurveyQuestion,
],
};
const result = replaceAttributeRecall(surveyWithRecall, attributes);
expect(result.questions[0].headline.default).toBe("Your company: recall:company");
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Your company: recall:company", attributes);
});
test("should handle surveys with no recall information", async () => {
const surveyNoRecall: TSurvey = {
...baseSurvey,
questions: [
{
id: "q1",
type: TSurveyQuestionTypeEnum.OpenText,
headline: { default: "Just a regular question" },
required: true,
buttonLabel: { default: "Next" },
placeholder: { default: "Type here..." },
longAnswer: false,
logic: [],
} as unknown as TSurveyQuestion,
],
welcomeCard: {
enabled: true,
headline: { default: "Welcome!" },
html: { default: "<p>Some content</p>" },
buttonLabel: { default: "Start" },
timeToFinish: false,
showResponseCount: false,
},
endings: [
{
type: "endScreen",
headline: { default: "Thank you!" },
buttonLabel: { default: "Finish" },
} as unknown as TSurveyEnding,
],
};
const parseRecallInfoSpy = vi.spyOn(await import("@/lib/utils/recall"), "parseRecallInfo");
const result = replaceAttributeRecall(surveyNoRecall, attributes);
expect(result).toEqual(surveyNoRecall); // Should be unchanged
expect(parseRecallInfoSpy).not.toHaveBeenCalled();
parseRecallInfoSpy.mockRestore();
});
test("should handle surveys with empty questions, endings, or disabled welcome card", async () => {
const surveyEmpty: TSurvey = {
...baseSurvey,
questions: [],
endings: [],
welcomeCard: { enabled: false } as TSurvey["welcomeCard"],
};
const parseRecallInfoSpy = vi.spyOn(await import("@/lib/utils/recall"), "parseRecallInfo");
const result = replaceAttributeRecall(surveyEmpty, attributes);
expect(result).toEqual(surveyEmpty);
expect(parseRecallInfoSpy).not.toHaveBeenCalled();
parseRecallInfoSpy.mockRestore();
});
});

View File

@@ -0,0 +1,86 @@
import { cache } from "@/lib/cache";
import { validateInputs } from "@/lib/utils/validate";
import { describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { TActionClassNoCodeConfig } from "@formbricks/types/action-classes";
import { DatabaseError } from "@formbricks/types/errors";
import { TJsEnvironmentStateActionClass } from "@formbricks/types/js";
import { getActionClassesForEnvironmentState } from "./actionClass";
// Mock dependencies
vi.mock("@/lib/cache");
vi.mock("@/lib/utils/validate");
vi.mock("@formbricks/database", () => ({
prisma: {
actionClass: {
findMany: vi.fn(),
},
},
}));
const environmentId = "test-environment-id";
const mockActionClasses: TJsEnvironmentStateActionClass[] = [
{
id: "action1",
type: "code",
name: "Code Action",
key: "code-action",
noCodeConfig: null,
},
{
id: "action2",
type: "noCode",
name: "No Code Action",
key: null,
noCodeConfig: { type: "click" } as TActionClassNoCodeConfig,
},
];
describe("getActionClassesForEnvironmentState", () => {
test("should return action classes successfully", async () => {
vi.mocked(prisma.actionClass.findMany).mockResolvedValue(mockActionClasses);
vi.mocked(cache).mockImplementation((fn) => async () => {
return fn();
});
const result = await getActionClassesForEnvironmentState(environmentId);
expect(result).toEqual(mockActionClasses);
expect(validateInputs).toHaveBeenCalledWith([environmentId, expect.any(Object)]); // ZId is an object
expect(prisma.actionClass.findMany).toHaveBeenCalledWith({
where: { environmentId },
select: {
id: true,
type: true,
name: true,
key: true,
noCodeConfig: true,
},
});
expect(cache).toHaveBeenCalledWith(
expect.any(Function),
[`getActionClassesForEnvironmentState-${environmentId}`],
{ tags: [`environments-${environmentId}-actionClasses`] }
);
});
test("should throw DatabaseError on prisma error", async () => {
const mockError = new Error("Prisma error");
vi.mocked(prisma.actionClass.findMany).mockRejectedValue(mockError);
vi.mocked(cache).mockImplementation((fn) => async () => {
return fn();
});
await expect(getActionClassesForEnvironmentState(environmentId)).rejects.toThrow(DatabaseError);
await expect(getActionClassesForEnvironmentState(environmentId)).rejects.toThrow(
`Database error when fetching actions for environment ${environmentId}`
);
expect(validateInputs).toHaveBeenCalledWith([environmentId, expect.any(Object)]);
expect(prisma.actionClass.findMany).toHaveBeenCalled();
expect(cache).toHaveBeenCalledWith(
expect.any(Function),
[`getActionClassesForEnvironmentState-${environmentId}`],
{ tags: [`environments-${environmentId}-actionClasses`] }
);
});
});

View File

@@ -0,0 +1,372 @@
import { cache } from "@/lib/cache";
import { getEnvironment } from "@/lib/environment/service";
import {
getMonthlyOrganizationResponseCount,
getOrganizationByEnvironmentId,
} from "@/lib/organization/service";
import {
capturePosthogEnvironmentEvent,
sendPlanLimitsReachedEventToPosthogWeekly,
} from "@/lib/posthogServer";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { logger } from "@formbricks/logger";
import { TActionClass } from "@formbricks/types/action-classes";
import { TEnvironment } from "@formbricks/types/environment";
import { ResourceNotFoundError } from "@formbricks/types/errors";
import { TJsEnvironmentState } from "@formbricks/types/js";
import { TOrganization } from "@formbricks/types/organizations";
import { TProject } from "@formbricks/types/project";
import { TSurvey } from "@formbricks/types/surveys/types";
import { getActionClassesForEnvironmentState } from "./actionClass";
import { getEnvironmentState } from "./environmentState";
import { getProjectForEnvironmentState } from "./project";
import { getSurveysForEnvironmentState } from "./survey";
// Mock dependencies
vi.mock("@/lib/cache");
vi.mock("@/lib/environment/service");
vi.mock("@/lib/organization/service");
vi.mock("@/lib/posthogServer");
vi.mock("@/modules/ee/license-check/lib/utils");
vi.mock("@formbricks/database", () => ({
prisma: {
environment: {
update: vi.fn(),
},
},
}));
vi.mock("@formbricks/logger", () => ({
logger: {
error: vi.fn(),
},
}));
vi.mock("./actionClass");
vi.mock("./project");
vi.mock("./survey");
vi.mock("@/lib/constants", () => ({
IS_FORMBRICKS_CLOUD: true, // Default to false, override in specific tests
RECAPTCHA_SITE_KEY: "mock_recaptcha_site_key",
RECAPTCHA_SECRET_KEY: "mock_recaptcha_secret_key",
IS_RECAPTCHA_CONFIGURED: true,
IS_PRODUCTION: true,
IS_POSTHOG_CONFIGURED: false,
ENTERPRISE_LICENSE_KEY: "mock_enterprise_license_key",
}));
const environmentId = "test-environment-id";
const mockEnvironment: TEnvironment = {
id: environmentId,
createdAt: new Date(),
updatedAt: new Date(),
projectId: "test-project-id",
type: "production",
appSetupCompleted: true, // Default to true
};
const mockOrganization: TOrganization = {
id: "test-org-id",
name: "Test Organization",
createdAt: new Date(),
updatedAt: new Date(),
billing: {
plan: "free",
stripeCustomerId: null,
period: "monthly",
limits: {
projects: 1,
monthly: {
responses: 100, // Default limit
miu: 1000,
},
},
periodStart: new Date(),
},
isAIEnabled: false,
};
const mockProject: TProject = {
id: "test-project-id",
createdAt: new Date(),
updatedAt: new Date(),
name: "Test Project",
config: {
channel: "link",
industry: "eCommerce",
},
organizationId: mockOrganization.id,
styling: {
allowStyleOverwrite: false,
},
recontactDays: 30,
inAppSurveyBranding: true,
linkSurveyBranding: true,
placement: "bottomRight",
clickOutsideClose: true,
darkOverlay: false,
environments: [],
languages: [],
};
const mockSurveys: TSurvey[] = [
{
id: "survey-app-inProgress",
createdAt: new Date(),
updatedAt: new Date(),
name: "App Survey In Progress",
environmentId: environmentId,
type: "app",
status: "inProgress",
displayLimit: null,
endings: [],
followUps: [],
isBackButtonHidden: false,
isSingleResponsePerEmailEnabled: false,
isVerifyEmailEnabled: false,
projectOverwrites: null,
runOnDate: null,
showLanguageSwitch: false,
questions: [],
displayOption: "displayOnce",
recontactDays: null,
autoClose: null,
closeOnDate: null,
delay: 0,
displayPercentage: null,
autoComplete: null,
singleUse: null,
triggers: [],
languages: [],
pin: null,
resultShareKey: null,
segment: null,
styling: null,
surveyClosedMessage: null,
hiddenFields: { enabled: false },
welcomeCard: { enabled: false, showResponseCount: false, timeToFinish: false },
variables: [],
createdBy: null,
recaptcha: { enabled: false, threshold: 0.5 },
},
{
id: "survey-app-paused",
createdAt: new Date(),
updatedAt: new Date(),
name: "App Survey Paused",
environmentId: environmentId,
displayLimit: null,
endings: [],
followUps: [],
isBackButtonHidden: false,
isSingleResponsePerEmailEnabled: false,
isVerifyEmailEnabled: false,
projectOverwrites: null,
runOnDate: null,
showLanguageSwitch: false,
type: "app",
status: "paused",
questions: [],
displayOption: "displayOnce",
recontactDays: null,
autoClose: null,
closeOnDate: null,
delay: 0,
displayPercentage: null,
autoComplete: null,
singleUse: null,
triggers: [],
languages: [],
pin: null,
resultShareKey: null,
segment: null,
styling: null,
surveyClosedMessage: null,
hiddenFields: { enabled: false },
welcomeCard: { enabled: false, showResponseCount: false, timeToFinish: false },
variables: [],
createdBy: null,
recaptcha: { enabled: false, threshold: 0.5 },
},
{
id: "survey-web-inProgress",
createdAt: new Date(),
updatedAt: new Date(),
name: "Web Survey In Progress",
environmentId: environmentId,
type: "link",
displayLimit: null,
endings: [],
followUps: [],
isBackButtonHidden: false,
isSingleResponsePerEmailEnabled: false,
isVerifyEmailEnabled: false,
projectOverwrites: null,
runOnDate: null,
showLanguageSwitch: false,
status: "inProgress",
questions: [],
displayOption: "displayOnce",
recontactDays: null,
autoClose: null,
closeOnDate: null,
delay: 0,
displayPercentage: null,
autoComplete: null,
singleUse: null,
triggers: [],
languages: [],
pin: null,
resultShareKey: null,
segment: null,
styling: null,
surveyClosedMessage: null,
hiddenFields: { enabled: false },
welcomeCard: { enabled: false, showResponseCount: false, timeToFinish: false },
variables: [],
createdBy: null,
recaptcha: { enabled: false, threshold: 0.5 },
},
];
const mockActionClasses: TActionClass[] = [
{
id: "action-1",
createdAt: new Date(),
updatedAt: new Date(),
name: "Action 1",
description: null,
type: "code",
noCodeConfig: null,
environmentId: environmentId,
key: "action1",
},
];
describe("getEnvironmentState", () => {
beforeEach(() => {
vi.resetAllMocks();
// Mock the cache implementation
vi.mocked(cache).mockImplementation((fn) => async () => {
return fn();
});
// Default mocks for successful retrieval
vi.mocked(getEnvironment).mockResolvedValue(mockEnvironment);
vi.mocked(getOrganizationByEnvironmentId).mockResolvedValue(mockOrganization);
vi.mocked(getProjectForEnvironmentState).mockResolvedValue(mockProject);
vi.mocked(getSurveysForEnvironmentState).mockResolvedValue(mockSurveys);
vi.mocked(getActionClassesForEnvironmentState).mockResolvedValue(mockActionClasses);
vi.mocked(getMonthlyOrganizationResponseCount).mockResolvedValue(50); // Default below limit
});
afterEach(() => {
vi.resetAllMocks();
});
test("should return the correct environment state", async () => {
const result = await getEnvironmentState(environmentId);
const expectedData: TJsEnvironmentState["data"] = {
recaptchaSiteKey: "mock_recaptcha_site_key",
surveys: [mockSurveys[0]], // Only app, inProgress survey
actionClasses: mockActionClasses,
project: mockProject,
};
expect(result.data).toEqual(expectedData);
expect(result.revalidateEnvironment).toBe(false);
expect(getEnvironment).toHaveBeenCalledWith(environmentId);
expect(getOrganizationByEnvironmentId).toHaveBeenCalledWith(environmentId);
expect(getProjectForEnvironmentState).toHaveBeenCalledWith(environmentId);
expect(getSurveysForEnvironmentState).toHaveBeenCalledWith(environmentId);
expect(getActionClassesForEnvironmentState).toHaveBeenCalledWith(environmentId);
expect(prisma.environment.update).not.toHaveBeenCalled();
expect(capturePosthogEnvironmentEvent).not.toHaveBeenCalled();
expect(getMonthlyOrganizationResponseCount).toHaveBeenCalled(); // Not cloud
expect(sendPlanLimitsReachedEventToPosthogWeekly).not.toHaveBeenCalled();
});
test("should throw ResourceNotFoundError if environment not found", async () => {
vi.mocked(getEnvironment).mockResolvedValue(null);
await expect(getEnvironmentState(environmentId)).rejects.toThrow(ResourceNotFoundError);
});
test("should throw ResourceNotFoundError if organization not found", async () => {
vi.mocked(getOrganizationByEnvironmentId).mockResolvedValue(null);
await expect(getEnvironmentState(environmentId)).rejects.toThrow(ResourceNotFoundError);
});
test("should throw ResourceNotFoundError if project not found", async () => {
vi.mocked(getProjectForEnvironmentState).mockResolvedValue(null);
await expect(getEnvironmentState(environmentId)).rejects.toThrow(ResourceNotFoundError);
});
test("should update environment and capture event if app setup not completed", async () => {
const incompleteEnv = { ...mockEnvironment, appSetupCompleted: false };
vi.mocked(getEnvironment).mockResolvedValue(incompleteEnv);
const result = await getEnvironmentState(environmentId);
expect(prisma.environment.update).toHaveBeenCalledWith({
where: { id: environmentId },
data: { appSetupCompleted: true },
});
expect(capturePosthogEnvironmentEvent).toHaveBeenCalledWith(environmentId, "app setup completed");
expect(result.revalidateEnvironment).toBe(true);
});
test("should return empty surveys if monthly response limit reached (Cloud)", async () => {
vi.mocked(getMonthlyOrganizationResponseCount).mockResolvedValue(100); // Exactly at limit
vi.mocked(getSurveysForEnvironmentState).mockResolvedValue(mockSurveys);
const result = await getEnvironmentState(environmentId);
expect(result.data.surveys).toEqual([]);
expect(getMonthlyOrganizationResponseCount).toHaveBeenCalledWith(mockOrganization.id);
expect(sendPlanLimitsReachedEventToPosthogWeekly).toHaveBeenCalledWith(environmentId, {
plan: mockOrganization.billing.plan,
limits: {
projects: null,
monthly: {
miu: null,
responses: mockOrganization.billing.limits.monthly.responses,
},
},
});
});
test("should return surveys if monthly response limit not reached (Cloud)", async () => {
vi.mocked(getMonthlyOrganizationResponseCount).mockResolvedValue(99); // Below limit
const result = await getEnvironmentState(environmentId);
expect(result.data.surveys).toEqual([mockSurveys[0]]);
expect(getMonthlyOrganizationResponseCount).toHaveBeenCalledWith(mockOrganization.id);
expect(sendPlanLimitsReachedEventToPosthogWeekly).not.toHaveBeenCalled();
});
test("should handle error when sending Posthog limit reached event", async () => {
vi.mocked(getMonthlyOrganizationResponseCount).mockResolvedValue(100);
const posthogError = new Error("Posthog failed");
vi.mocked(sendPlanLimitsReachedEventToPosthogWeekly).mockRejectedValue(posthogError);
const result = await getEnvironmentState(environmentId);
expect(result.data.surveys).toEqual([]);
expect(logger.error).toHaveBeenCalledWith(
posthogError,
"Error sending plan limits reached event to Posthog"
);
});
test("should include recaptchaSiteKey if recaptcha variables are set", async () => {
const result = await getEnvironmentState(environmentId);
expect(result.data.recaptchaSiteKey).toBe("mock_recaptcha_site_key");
});
test("should filter surveys correctly (only app type and inProgress status)", async () => {
const result = await getEnvironmentState(environmentId);
expect(result.data.surveys).toHaveLength(1);
expect(result.data.surveys[0].id).toBe("survey-app-inProgress");
});
});

View File

@@ -1,6 +1,6 @@
import { actionClassCache } from "@/lib/actionClass/cache";
import { cache } from "@/lib/cache";
import { IS_FORMBRICKS_CLOUD } from "@/lib/constants";
import { IS_FORMBRICKS_CLOUD, IS_RECAPTCHA_CONFIGURED, RECAPTCHA_SITE_KEY } from "@/lib/constants";
import { environmentCache } from "@/lib/environment/cache";
import { getEnvironment } from "@/lib/environment/service";
import { organizationCache } from "@/lib/organization/cache";
@@ -107,6 +107,7 @@ export const getEnvironmentState = async (
surveys: !isMonthlyResponsesLimitReached ? filteredSurveys : [],
actionClasses,
project: project,
...(IS_RECAPTCHA_CONFIGURED ? { recaptchaSiteKey: RECAPTCHA_SITE_KEY } : {}),
};
return {

View File

@@ -0,0 +1,120 @@
import { cache } from "@/lib/cache";
import { projectCache } from "@/lib/project/cache";
import { Prisma } from "@prisma/client";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { logger } from "@formbricks/logger";
import { DatabaseError } from "@formbricks/types/errors";
import { TJsEnvironmentStateProject } from "@formbricks/types/js";
import { getProjectForEnvironmentState } from "./project";
// Mock dependencies
vi.mock("@/lib/cache");
vi.mock("@/lib/project/cache");
vi.mock("@formbricks/database", () => ({
prisma: {
project: {
findFirst: vi.fn(),
},
},
}));
vi.mock("@formbricks/logger", () => ({
logger: {
error: vi.fn(),
},
}));
vi.mock("@/lib/utils/validate"); // Mock validateInputs if needed, though it's often tested elsewhere
const environmentId = "test-environment-id";
const mockProject: TJsEnvironmentStateProject = {
id: "test-project-id",
recontactDays: 30,
clickOutsideClose: true,
darkOverlay: false,
placement: "bottomRight",
inAppSurveyBranding: true,
styling: { allowStyleOverwrite: false },
};
describe("getProjectForEnvironmentState", () => {
beforeEach(() => {
vi.resetAllMocks();
// Mock cache implementation
vi.mocked(cache).mockImplementation((fn) => async () => {
return fn();
});
// Mock projectCache tags
vi.mocked(projectCache.tag.byEnvironmentId).mockReturnValue(`project-env-${environmentId}`);
});
afterEach(() => {
vi.resetAllMocks();
});
test("should return project state successfully", async () => {
vi.mocked(prisma.project.findFirst).mockResolvedValue(mockProject);
const result = await getProjectForEnvironmentState(environmentId);
expect(result).toEqual(mockProject);
expect(prisma.project.findFirst).toHaveBeenCalledWith({
where: {
environments: {
some: {
id: environmentId,
},
},
},
select: {
id: true,
recontactDays: true,
clickOutsideClose: true,
darkOverlay: true,
placement: true,
inAppSurveyBranding: true,
styling: true,
},
});
expect(cache).toHaveBeenCalledTimes(1);
expect(cache).toHaveBeenCalledWith(
expect.any(Function),
[`getProjectForEnvironmentState-${environmentId}`],
{
tags: [`project-env-${environmentId}`],
}
);
});
test("should return null if project not found", async () => {
vi.mocked(prisma.project.findFirst).mockResolvedValue(null);
const result = await getProjectForEnvironmentState(environmentId);
expect(result).toBeNull();
expect(prisma.project.findFirst).toHaveBeenCalledTimes(1);
expect(cache).toHaveBeenCalledTimes(1);
});
test("should throw DatabaseError on PrismaClientKnownRequestError", async () => {
const prismaError = new Prisma.PrismaClientKnownRequestError("Test error", {
code: "P2001",
clientVersion: "test",
});
vi.mocked(prisma.project.findFirst).mockRejectedValue(prismaError);
await expect(getProjectForEnvironmentState(environmentId)).rejects.toThrow(DatabaseError);
expect(logger.error).toHaveBeenCalledWith(prismaError, "Error getting project for environment state");
expect(cache).toHaveBeenCalledTimes(1);
});
test("should re-throw unknown errors", async () => {
const unknownError = new Error("Something went wrong");
vi.mocked(prisma.project.findFirst).mockRejectedValue(unknownError);
await expect(getProjectForEnvironmentState(environmentId)).rejects.toThrow(unknownError);
expect(logger.error).not.toHaveBeenCalled(); // Should not log unknown errors here
expect(cache).toHaveBeenCalledTimes(1);
});
});

View File

@@ -0,0 +1,143 @@
import { cache } from "@/lib/cache";
import { validateInputs } from "@/lib/utils/validate";
import { transformPrismaSurvey } from "@/modules/survey/lib/utils";
import { Prisma } from "@prisma/client";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { logger } from "@formbricks/logger";
import { DatabaseError } from "@formbricks/types/errors";
import { TJsEnvironmentStateSurvey } from "@formbricks/types/js";
import { getSurveysForEnvironmentState } from "./survey";
// Mock dependencies
vi.mock("@/lib/cache");
vi.mock("@/lib/utils/validate");
vi.mock("@/modules/survey/lib/utils");
vi.mock("@formbricks/database", () => ({
prisma: {
survey: {
findMany: vi.fn(),
},
},
}));
vi.mock("@formbricks/logger", () => ({
logger: {
error: vi.fn(),
},
}));
const environmentId = "test-environment-id";
const mockPrismaSurvey = {
id: "survey-1",
welcomeCard: { enabled: false },
name: "Test Survey",
questions: [],
variables: [],
type: "app",
showLanguageSwitch: false,
languages: [],
endings: [],
autoClose: null,
styling: null,
status: "inProgress",
recaptcha: null,
segment: null,
recontactDays: null,
displayLimit: null,
displayOption: "displayOnce",
hiddenFields: { enabled: false },
isBackButtonHidden: false,
triggers: [],
displayPercentage: null,
delay: 0,
projectOverwrites: null,
};
const mockTransformedSurvey: TJsEnvironmentStateSurvey = {
id: "survey-1",
welcomeCard: { enabled: false } as TJsEnvironmentStateSurvey["welcomeCard"],
name: "Test Survey",
questions: [],
variables: [],
type: "app",
showLanguageSwitch: false,
languages: [],
endings: [],
autoClose: null,
styling: null,
status: "inProgress",
recaptcha: null,
segment: null,
recontactDays: null,
displayLimit: null,
displayOption: "displayOnce",
hiddenFields: { enabled: false },
isBackButtonHidden: false,
triggers: [],
displayPercentage: null,
delay: 0,
projectOverwrites: null,
};
describe("getSurveysForEnvironmentState", () => {
beforeEach(() => {
vi.mocked(cache).mockImplementation((fn) => async () => {
return fn();
});
vi.mocked(validateInputs).mockReturnValue([environmentId]); // Assume validation passes
vi.mocked(transformPrismaSurvey).mockReturnValue(mockTransformedSurvey);
});
afterEach(() => {
vi.resetAllMocks();
});
test("should return transformed surveys on successful fetch", async () => {
vi.mocked(prisma.survey.findMany).mockResolvedValue([mockPrismaSurvey]);
const result = await getSurveysForEnvironmentState(environmentId);
expect(validateInputs).toHaveBeenCalledWith([environmentId, expect.any(Object)]);
expect(prisma.survey.findMany).toHaveBeenCalledWith({
where: { environmentId },
select: expect.any(Object), // Check if select is called, specific fields are in the original code
});
expect(transformPrismaSurvey).toHaveBeenCalledWith(mockPrismaSurvey);
expect(result).toEqual([mockTransformedSurvey]);
expect(logger.error).not.toHaveBeenCalled();
});
test("should return an empty array if no surveys are found", async () => {
vi.mocked(prisma.survey.findMany).mockResolvedValue([]);
const result = await getSurveysForEnvironmentState(environmentId);
expect(prisma.survey.findMany).toHaveBeenCalledWith({
where: { environmentId },
select: expect.any(Object),
});
expect(transformPrismaSurvey).not.toHaveBeenCalled();
expect(result).toEqual([]);
expect(logger.error).not.toHaveBeenCalled();
});
test("should throw DatabaseError on Prisma known request error", async () => {
const prismaError = new Prisma.PrismaClientKnownRequestError("Test Prisma Error", {
code: "P2025",
clientVersion: "5.0.0",
});
vi.mocked(prisma.survey.findMany).mockRejectedValue(prismaError);
await expect(getSurveysForEnvironmentState(environmentId)).rejects.toThrow(DatabaseError);
expect(logger.error).toHaveBeenCalledWith(prismaError, "Error getting surveys for environment state");
});
test("should rethrow unknown errors", async () => {
const unknownError = new Error("Something went wrong");
vi.mocked(prisma.survey.findMany).mockRejectedValue(unknownError);
await expect(getSurveysForEnvironmentState(environmentId)).rejects.toThrow(unknownError);
expect(logger.error).not.toHaveBeenCalled();
});
});

View File

@@ -49,6 +49,7 @@ export const getSurveysForEnvironmentState = reactCache(
autoClose: true,
styling: true,
status: true,
recaptcha: true,
segment: {
include: {
surveys: {

View File

@@ -1,6 +1,7 @@
import { responses } from "@/app/lib/api/response";
import { transformErrorToDetails } from "@/app/lib/api/validator";
import { sendToPipeline } from "@/app/lib/pipelines";
import { validateFileUploads } from "@/lib/fileValidation";
import { updateResponse } from "@/lib/response/service";
import { getSurvey } from "@/lib/survey/service";
import { logger } from "@formbricks/logger";
@@ -11,6 +12,20 @@ export const OPTIONS = async (): Promise<Response> => {
return responses.successResponse({}, true);
};
const handleDatabaseError = (error: Error, url: string, endpoint: string, responseId: string): Response => {
if (error instanceof ResourceNotFoundError) {
return responses.notFoundResponse("Response", responseId, true);
}
if (error instanceof InvalidInputError) {
return responses.badRequestResponse(error.message, undefined, true);
}
if (error instanceof DatabaseError) {
logger.error({ error, url }, `Error in ${endpoint}`);
return responses.internalServerErrorResponse(error.message, true);
}
return responses.internalServerErrorResponse("Unknown error occurred", true);
};
export const PUT = async (
request: Request,
props: { params: Promise<{ responseId: string }> }
@@ -23,7 +38,6 @@ export const PUT = async (
}
const responseUpdate = await request.json();
const inputValidation = ZResponseUpdateInput.safeParse(responseUpdate);
if (!inputValidation.success) {
@@ -39,19 +53,8 @@ export const PUT = async (
try {
response = await updateResponse(responseId, inputValidation.data);
} catch (error) {
if (error instanceof ResourceNotFoundError) {
return responses.notFoundResponse("Response", responseId, true);
}
if (error instanceof InvalidInputError) {
return responses.badRequestResponse(error.message);
}
if (error instanceof DatabaseError) {
logger.error(
{ error, url: request.url },
"Error in PUT /api/v1/client/[environmentId]/responses/[responseId]"
);
return responses.internalServerErrorResponse(error.message);
}
const endpoint = "PUT /api/v1/client/[environmentId]/responses/[responseId]";
return handleDatabaseError(error, request.url, endpoint, responseId);
}
// get survey to get environmentId
@@ -59,16 +62,12 @@ export const PUT = async (
try {
survey = await getSurvey(response.surveyId);
} catch (error) {
if (error instanceof InvalidInputError) {
return responses.badRequestResponse(error.message);
}
if (error instanceof DatabaseError) {
logger.error(
{ error, url: request.url },
"Error in PUT /api/v1/client/[environmentId]/responses/[responseId]"
);
return responses.internalServerErrorResponse(error.message);
}
const endpoint = "PUT /api/v1/client/[environmentId]/responses/[responseId]";
return handleDatabaseError(error, request.url, endpoint, responseId);
}
if (!validateFileUploads(response.data, survey.questions)) {
return responses.badRequestResponse("Invalid file upload response", undefined, true);
}
// send response update to pipeline
@@ -87,7 +86,7 @@ export const PUT = async (
event: "responseFinished",
environmentId: survey.environmentId,
surveyId: survey.id,
response: response,
response,
});
}
return responses.successResponse({}, true);

View File

@@ -0,0 +1,160 @@
import { cache } from "@/lib/cache";
import { Prisma } from "@prisma/client";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { DatabaseError } from "@formbricks/types/errors";
import { getContact, getContactByUserId } from "./contact";
// Mock prisma
vi.mock("@formbricks/database", () => ({
prisma: {
contact: {
findUnique: vi.fn(),
findFirst: vi.fn(),
},
},
}));
// Mock cache module
vi.mock("@/lib/cache");
// Mock react cache
vi.mock("react", async () => {
const actual = await vi.importActual("react");
return {
...actual,
cache: vi.fn((fn) => fn), // Mock react's cache to just return the function
};
});
const mockContactId = "test-contact-id";
const mockEnvironmentId = "test-env-id";
const mockUserId = "test-user-id";
describe("Contact API Lib", () => {
beforeEach(() => {
vi.mocked(cache).mockImplementation((fn) => async () => {
return fn();
});
});
afterEach(() => {
vi.resetAllMocks();
});
describe("getContact", () => {
test("should return contact if found", async () => {
const mockContactData = { id: mockContactId };
vi.mocked(prisma.contact.findUnique).mockResolvedValue(mockContactData);
const contact = await getContact(mockContactId);
expect(prisma.contact.findUnique).toHaveBeenCalledWith({
where: { id: mockContactId },
select: { id: true },
});
expect(contact).toEqual(mockContactData);
});
test("should return null if contact not found", async () => {
vi.mocked(prisma.contact.findUnique).mockResolvedValue(null);
const contact = await getContact(mockContactId);
expect(prisma.contact.findUnique).toHaveBeenCalledWith({
where: { id: mockContactId },
select: { id: true },
});
expect(contact).toBeNull();
});
test("should throw DatabaseError on Prisma error", async () => {
const prismaError = new Prisma.PrismaClientKnownRequestError("Test Prisma Error", {
code: "P2025",
clientVersion: "5.0.0",
});
vi.mocked(prisma.contact.findUnique).mockRejectedValue(prismaError);
await expect(getContact(mockContactId)).rejects.toThrow(DatabaseError);
expect(prisma.contact.findUnique).toHaveBeenCalledWith({
where: { id: mockContactId },
select: { id: true },
});
});
});
describe("getContactByUserId", () => {
test("should return contact with formatted attributes if found", async () => {
const mockContactData = {
id: mockContactId,
attributes: [
{ attributeKey: { key: "userId" }, value: mockUserId },
{ attributeKey: { key: "email" }, value: "test@example.com" },
],
};
vi.mocked(prisma.contact.findFirst).mockResolvedValue(mockContactData);
const contact = await getContactByUserId(mockEnvironmentId, mockUserId);
expect(prisma.contact.findFirst).toHaveBeenCalledWith({
where: {
attributes: {
some: {
attributeKey: {
key: "userId",
environmentId: mockEnvironmentId,
},
value: mockUserId,
},
},
},
select: {
id: true,
attributes: {
select: {
attributeKey: { select: { key: true } },
value: true,
},
},
},
});
expect(contact).toEqual({
id: mockContactId,
attributes: {
userId: mockUserId,
email: "test@example.com",
},
});
});
test("should return null if contact not found by userId", async () => {
vi.mocked(prisma.contact.findFirst).mockResolvedValue(null);
const contact = await getContactByUserId(mockEnvironmentId, mockUserId);
expect(prisma.contact.findFirst).toHaveBeenCalledWith({
where: {
attributes: {
some: {
attributeKey: {
key: "userId",
environmentId: mockEnvironmentId,
},
value: mockUserId,
},
},
},
select: {
id: true,
attributes: {
select: {
attributeKey: { select: { key: true } },
value: true,
},
},
},
});
expect(contact).toBeNull();
});
});
});

View File

@@ -0,0 +1,201 @@
import {
getMonthlyOrganizationResponseCount,
getOrganizationByEnvironmentId,
} from "@/lib/organization/service";
import { sendPlanLimitsReachedEventToPosthogWeekly } from "@/lib/posthogServer";
import { calculateTtcTotal } from "@/lib/response/utils";
import { Prisma } from "@prisma/client";
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
import { prisma } from "@formbricks/database";
import { logger } from "@formbricks/logger";
import { DatabaseError, ResourceNotFoundError } from "@formbricks/types/errors";
import { TResponseInput } from "@formbricks/types/responses";
import { createResponse } from "./response";
let mockIsFormbricksCloud = false;
vi.mock("@/lib/constants", () => ({
get IS_FORMBRICKS_CLOUD() {
return mockIsFormbricksCloud;
},
}));
vi.mock("@/lib/organization/service", () => ({
getMonthlyOrganizationResponseCount: vi.fn(),
getOrganizationByEnvironmentId: vi.fn(),
}));
vi.mock("@/lib/posthogServer", () => ({
sendPlanLimitsReachedEventToPosthogWeekly: vi.fn(),
}));
vi.mock("@/lib/response/cache", () => ({
responseCache: {
revalidate: vi.fn(),
},
}));
vi.mock("@/lib/response/utils", () => ({
calculateTtcTotal: vi.fn((ttc) => ttc),
}));
vi.mock("@/lib/responseNote/cache", () => ({
responseNoteCache: {
revalidate: vi.fn(),
},
}));
vi.mock("@/lib/telemetry", () => ({
captureTelemetry: vi.fn(),
}));
vi.mock("@/lib/utils/validate", () => ({
validateInputs: vi.fn(),
}));
vi.mock("@formbricks/database", () => ({
prisma: {
response: {
create: vi.fn(),
},
},
}));
vi.mock("@formbricks/logger", () => ({
logger: {
error: vi.fn(),
},
}));
vi.mock("./contact", () => ({
getContactByUserId: vi.fn(),
}));
const environmentId = "test-environment-id";
const surveyId = "test-survey-id";
const organizationId = "test-organization-id";
const responseId = "test-response-id";
const mockOrganization = {
id: organizationId,
name: "Test Org",
billing: {
limits: { monthly: { responses: 100 } },
plan: "free",
},
};
const mockResponseInput: TResponseInput = {
environmentId,
surveyId,
userId: null,
finished: false,
data: { question1: "answer1" },
meta: { source: "web" },
ttc: { question1: 1000 },
};
const mockResponsePrisma = {
id: responseId,
createdAt: new Date(),
updatedAt: new Date(),
surveyId,
finished: false,
data: { question1: "answer1" },
meta: { source: "web" },
ttc: { question1: 1000 },
variables: {},
contactAttributes: {},
singleUseId: null,
language: null,
displayId: null,
tags: [],
notes: [],
};
describe("createResponse", () => {
beforeEach(() => {
vi.resetAllMocks();
vi.mocked(getOrganizationByEnvironmentId).mockResolvedValue(mockOrganization as any);
vi.mocked(prisma.response.create).mockResolvedValue(mockResponsePrisma as any);
vi.mocked(calculateTtcTotal).mockImplementation((ttc) => ttc);
});
afterEach(() => {
mockIsFormbricksCloud = false;
});
test("should handle finished response and calculate TTC", async () => {
const finishedInput = { ...mockResponseInput, finished: true };
await createResponse(finishedInput);
expect(calculateTtcTotal).toHaveBeenCalledWith(mockResponseInput.ttc);
expect(prisma.response.create).toHaveBeenCalledWith(
expect.objectContaining({
data: expect.objectContaining({ finished: true }),
})
);
});
test("should check response limits if IS_FORMBRICKS_CLOUD is true", async () => {
mockIsFormbricksCloud = true;
vi.mocked(getMonthlyOrganizationResponseCount).mockResolvedValue(50);
await createResponse(mockResponseInput);
expect(getMonthlyOrganizationResponseCount).toHaveBeenCalledWith(organizationId);
expect(sendPlanLimitsReachedEventToPosthogWeekly).not.toHaveBeenCalled();
});
test("should send limit reached event if IS_FORMBRICKS_CLOUD is true and limit reached", async () => {
mockIsFormbricksCloud = true;
vi.mocked(getMonthlyOrganizationResponseCount).mockResolvedValue(100);
await createResponse(mockResponseInput);
expect(getMonthlyOrganizationResponseCount).toHaveBeenCalledWith(organizationId);
expect(sendPlanLimitsReachedEventToPosthogWeekly).toHaveBeenCalledWith(environmentId, {
plan: "free",
limits: {
projects: null,
monthly: {
responses: 100,
miu: null,
},
},
});
});
test("should throw ResourceNotFoundError if organization not found", async () => {
vi.mocked(getOrganizationByEnvironmentId).mockResolvedValue(null);
await expect(createResponse(mockResponseInput)).rejects.toThrow(ResourceNotFoundError);
});
test("should throw DatabaseError on Prisma known request error", async () => {
const prismaError = new Prisma.PrismaClientKnownRequestError("Test Prisma Error", {
code: "P2002",
clientVersion: "test",
});
vi.mocked(prisma.response.create).mockRejectedValue(prismaError);
await expect(createResponse(mockResponseInput)).rejects.toThrow(DatabaseError);
});
test("should throw original error on other Prisma errors", async () => {
const genericError = new Error("Generic database error");
vi.mocked(prisma.response.create).mockRejectedValue(genericError);
await expect(createResponse(mockResponseInput)).rejects.toThrow(genericError);
});
test("should log error but not throw if sendPlanLimitsReachedEventToPosthogWeekly fails", async () => {
mockIsFormbricksCloud = true;
vi.mocked(getMonthlyOrganizationResponseCount).mockResolvedValue(100);
const posthogError = new Error("PostHog error");
vi.mocked(sendPlanLimitsReachedEventToPosthogWeekly).mockRejectedValue(posthogError);
await createResponse(mockResponseInput);
expect(logger.error).toHaveBeenCalledWith(
posthogError,
"Error sending plan limits reached event to Posthog"
);
});
});

Some files were not shown because too many files have changed in this diff Show More