Compare commits

...

326 Commits

Author SHA1 Message Date
TheodorTomas
d77facfacf chore: making updates on coderabbitai feedback 2026-02-02 16:00:47 +08:00
TheodorTomas
8ef7e47bee feat: add skill filter to reduce AI token costs
Implements opt-in filtering system for Vercel React best practices.
Reduces skill set by 50% while keeping CRITICAL/HIGH/MEDIUM priority rules.
Includes automatic GitHub fetch and Prettier formatting.
2026-02-02 15:48:45 +08:00
Matti Nannt
8f6d27c1ef fix: upgrade next.js and preact to fix high-severity vulnerabilities (#7134) 2026-01-20 11:22:01 +00:00
Dhruwang Jariwala
a37815b831 fix: breaking email embed preview for single select question (#7133) 2026-01-20 06:42:15 +00:00
Dhruwang Jariwala
2b526a87ca fix: email locale in invite accepted email (#7124) 2026-01-19 13:32:01 +00:00
Dhruwang Jariwala
047750967c fix: console warnings in survey ui package (#7130) 2026-01-19 07:19:13 +00:00
Johannes
a54356c3b0 docs: add CSAT and update Survey Cooldown (#7128)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2026-01-19 07:06:16 +00:00
Matti Nannt
38ea5ed6ae perf: remove redundant database indexes (#7104) 2026-01-16 10:17:05 +00:00
Dhruwang Jariwala
6e19de32f7 fix: org managers not able to access api keys (#7123) 2026-01-16 09:54:54 +00:00
Johannes
957a4432f4 feat: introduce language variations (#7082)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2026-01-16 08:51:20 +00:00
Matti Nannt
22a5d4bb7d chore: consolidate agent instructions and remove Cursor rules (#7096) 2026-01-16 08:20:23 +00:00
Matti Nannt
226dff0344 fix: upgrade storybook to v10.1.11 (#7120) 2026-01-16 07:19:18 +00:00
Dhruwang Jariwala
d474a94a21 fix: multi lang button label issue (#7117) 2026-01-15 17:57:50 +00:00
dependabot[bot]
c1a4cc308b chore(deps): bump the npm_and_yarn group across 2 directories with 1 update (#7081)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matti Nannt <matti@formbricks.com>
2026-01-15 15:10:33 +01:00
Dhruwang Jariwala
210da98b69 fix: scrolling in project breadcrumb dropdown (#7118) 2026-01-15 11:59:17 +00:00
Matti Nannt
2fc183d384 chore: update pre-commit hook to address husky warning (#7106) 2026-01-15 07:42:37 +00:00
Dhruwang Jariwala
78fb111610 fix: syntax issue in pr check size github action (#7116) 2026-01-15 06:43:59 +00:00
Bhagya Amarasinghe
11c0cb4b61 fix: add required WEBAPP_URL/NEXTAUTH_URL config and improve helm chart (#7107) 2026-01-14 18:26:40 +00:00
Johannes
95831f7c7f feat: add auto-save for draft surveys and Cmd+S hotkey (#7087)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2026-01-14 17:23:34 +00:00
Anshuman Pandey
a31e7bfaa5 feat: security signup ui (#7088)
Co-authored-by: Johannes <johannes@formbricks.com>
2026-01-14 16:45:21 +00:00
Matti Nannt
6e35fc1769 fix: update systeminformation to 5.27.14 (#7105) 2026-01-14 11:04:43 +00:00
Theodór Tómas
48cded1646 perf: decouple constants from zod and add bundle analyzer (#7101) 2026-01-14 09:50:05 +00:00
Dhruwang Jariwala
db752cee15 feat: add support for mp3 file extension and corresponding MIME type (#7103)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2026-01-13 12:19:22 +00:00
Dhruwang Jariwala
b33aae0a73 fix: missing Russian langauge in language select dropdown (#7099) 2026-01-13 10:08:50 +00:00
Matti Nannt
72126ad736 fix: required label not being translated (#7092) 2026-01-13 10:05:11 +00:00
Theodór Tómas
4a2eeac90b perf: reduce bundle size (#7094) 2026-01-12 16:57:12 +00:00
Anshuman Pandey
46be3e7d70 feat: webhook secret (#7084) 2026-01-09 12:31:29 +00:00
Dhruwang Jariwala
6d140532a7 feat: add IP address capture functionality to surveys (#7079) 2026-01-09 11:28:05 +00:00
Dhruwang Jariwala
8c4a7f1518 fix: remove subheader field from survey element presets (#7078) 2026-01-09 08:28:48 +00:00
Dhruwang Jariwala
63fe32a786 chore: parallel processing in lingo.dev (#7080) 2026-01-08 05:03:31 +00:00
Matti Nannt
84c465f974 fix: ensure deterministic instanceId via secondary sort key (#7070) 2026-01-07 14:04:56 +00:00
Johannes
6a33498737 feat: Custom HTML scripts in link surveys (#7064)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2026-01-07 10:06:41 +00:00
Matti Nannt
5130c747d4 chore: license server staging config (#7075)
Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2026-01-07 09:50:18 +00:00
Dhruwang Jariwala
f5583d2652 fix: add background color to button URL input in CTA element form (#7077) 2026-01-07 09:17:38 +00:00
Fahleen Arif
e0d75914a4 fix: update placeholder text for name input field in invite members form (#7054)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2026-01-07 08:18:36 +00:00
Dhruwang Jariwala
f02ca1cfe1 chore: remove string concatenation welcome card (#7073)
Co-authored-by: Balázs Úr <balazs@urbalazs.hu>
2026-01-07 07:25:20 +00:00
Anshuman Pandey
4ade83f189 fix: contacts refresh button (#7066) 2026-01-06 12:31:20 +00:00
Jagadish Madavalkar
f1fc9fea2c fix: api-wrapper returns valid malformed response (#7053)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2026-01-06 10:24:39 +00:00
Dhruwang Jariwala
25266e4566 fix: disappearing survey preview (#7065) 2026-01-06 06:23:11 +00:00
Matti Nannt
b960cfd2a1 chore: harden CSP and X-Frame-Options headers (#7062)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2026-01-06 06:21:19 +00:00
Matti Nannt
9e1d1c1dc2 feat: implement robust database seeding strategy (#7017)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2026-01-05 15:58:58 +00:00
Matti Nannt
8c63a9f7af chore: remove debug log from next.config.mjs (#7063) 2026-01-05 15:52:04 +00:00
Anshuman Pandey
fff0a7f052 fix: fixes duplicate userId issue with the contacts UI (#7051)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2026-01-05 09:21:50 +00:00
Anshuman Pandey
0ecc8aabff fix: fixes single use multi lang surveyUrl issue (#7057) 2026-01-05 06:08:15 +00:00
Dhruwang Jariwala
01cc0ab64d fix: correct typo in recontact waiting time description and adjust da… (#7056) 2026-01-05 06:02:28 +00:00
Anshuman Pandey
1d125bdac2 fix: fixes user api attribute override error (#7050) 2026-01-05 05:55:22 +00:00
Anshuman Pandey
ca67c4d5a8 feat: rename projects to workspaces (#7041)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-12-31 07:24:04 +00:00
Dhruwang Jariwala
d167d591ce fix: make description optional for consent and CTA elements (#7047) 2025-12-30 10:05:26 +00:00
Anshuman Pandey
acc3b0179a fix: defers page view actions to allow user context to be set first (#7048) 2025-12-30 08:56:14 +00:00
Johannes
3434b5cf08 fix: tweak edit attributes for contact UI (#7046)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-12-29 14:58:15 +00:00
Dhruwang Jariwala
a618f2df95 fix(types): use z.coerce.date() for ZActionClass timestamps (#7045)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-29 14:47:09 +00:00
Dhruwang Jariwala
5b334f6623 feat: UI to change attribute value for contacts (#7040) 2025-12-29 13:09:29 +00:00
Anshuman Pandey
fa2b63d6a1 feat: custom favicon (#7044) 2025-12-29 12:44:32 +00:00
Dhruwang Jariwala
9f0fe69b6b fix: typos (Duplicate of 7042) (#7043)
Co-authored-by: Balázs Úr <balazs@urbalazs.hu>
2025-12-29 06:19:54 +00:00
Dhruwang Jariwala
98cb2de02b feat: UI to manage attribute keys (#7038) 2025-12-26 10:02:37 +00:00
Anshuman Pandey
f00d0b7e20 fix: setUserId lets users override the previous userId (#7035) 2025-12-25 07:10:56 +00:00
Johannes
65abd4ee07 feat: add pretty URL UI components for surveys (#6969)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-12-24 06:39:46 +00:00
Johannes
939f135bf4 chore: unify error state for all questions types (#7001)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-12-24 06:36:48 +00:00
Johannes
729a16854a fix: German translations (#7033)
Co-authored-by: Balázs Úr <balazs@urbalazs.hu>
2025-12-24 06:36:21 +00:00
Dhruwang Jariwala
a2d3e37d69 fix: CSS variable pollution (#7026) 2025-12-24 05:54:52 +00:00
Dhruwang Jariwala
adf12f551d fix: Swedish translations (#7032) 2025-12-23 12:02:26 +00:00
Dhruwang Jariwala
3f2bddc358 feat: Russian translations (#7027) 2025-12-23 10:31:09 +00:00
Dhruwang Jariwala
ae6d1ac133 chore: improve wording in email text (Duplicate of #7003) (#7025)
Co-authored-by: Balázs Úr <balazs@urbalazs.hu>
Co-authored-by: Johannes <johannes@formbricks.com>
2025-12-23 09:56:53 +00:00
Dhruwang Jariwala
7c4569cd50 fix: file upload validation (#7028) 2025-12-23 09:36:45 +00:00
Matti Nannt
7354122447 fix: update V2 API OpenAPI paths to include full prefixes (#6983)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-23 06:29:25 +00:00
Matti Nannt
d54dca2b27 docs: update thanks section with chromatic and sentry logos (#7031) 2025-12-22 16:40:39 +00:00
Anshuman Pandey
acd5cff534 feat: email package for client side email components (#6986) 2025-12-22 14:13:06 +00:00
Matti Nannt
834929e766 feat: configure @formbricks/survey-ui for external publishing (#6991) 2025-12-22 12:39:54 +00:00
Dhruwang Jariwala
09f40ad816 fix: required cta issue (#7022) 2025-12-22 08:35:08 +00:00
Harsh Bhat
689b6491b3 docs: Link vs In app surveys (#7006)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-12-22 08:13:45 +00:00
Johannes
b70b2eef95 fix: vimeo + loom embed (#7018)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-12-20 08:08:48 +00:00
Harsh Bhat
392a95834b docs: Best practices Panel Management (#7011) 2025-12-20 06:32:57 +00:00
Anshuman Pandey
66d9cc8eac chore: adds docs for min browser version support (#7014) 2025-12-19 10:02:01 +00:00
Johannes
befdc078f1 fix: replace isomorphic-dompurify with sanitize-html in server component (#7002) 2025-12-19 07:34:56 +00:00
Dhruwang Jariwala
13b983b3b2 fix: missing question media (#6997)
Co-authored-by: Johannes <johannes@formbricks.com>
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-19 07:29:06 +00:00
Harsh Bhat
1e285ebe4e docs: Remove references of delay removal with debug mode (#7009) 2025-12-19 07:03:02 +00:00
Dhruwang Jariwala
a7c4971952 fix: replaced bg-white with survey-bg color in surveys package (#7004)
Co-authored-by: Luis Gustavo S. Barreto <gustavo@ossystems.com.br>
2025-12-19 06:50:33 +00:00
Dhruwang Jariwala
c8689d91d5 fix: empty button in cta question (#6995) 2025-12-18 21:18:48 +00:00
Dhruwang Jariwala
73a2ff7421 fix: border radius for inputs (#6996) 2025-12-18 20:56:47 +00:00
Dhruwang Jariwala
0c28e89b41 fix: missing required question warning (#6998) 2025-12-18 19:12:47 +00:00
Anshuman Pandey
a736436e29 chore: fixes typo (#6993) 2025-12-18 09:25:12 +00:00
Johannes
7dbb0300d3 fix: Pass the isExternalUrlAllowed prop to welcome card (#6992) 2025-12-18 08:51:21 +00:00
Matti Nannt
e71f3f412c feat: Add base path support for Formbricks (#6853) 2025-12-17 17:13:32 +00:00
Anshuman Pandey
07ed926225 fix: updates the patch to fix the next-auth no proxy issue (#6987)
Co-authored-by: Matti Nannt <matti@formbricks.com>
2025-12-17 17:11:40 +00:00
Dhruwang Jariwala
15dc83a4eb feat: improved survey UI (#6988)
Co-authored-by: Matti Nannt <matti@formbricks.com>
Co-authored-by: Johannes <johannes@formbricks.com>
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-17 16:13:28 +01:00
Johannes
3ce07edf43 chore: replacing intercom with chatwoot (#6980)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-16 16:16:09 +00:00
Johannes
0f34d9cc5f feat: standardize URL prefilling with option ID support and MQB support (#6970)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-16 10:09:47 +00:00
Matti Nannt
e9f800f017 fix: prepare pnpm in runner stage for airgapped deployments (#6925) 2025-12-15 13:30:55 +00:00
Johannes
ba2070b638 feat: add vars & hidden fields + send to verified email to followups (#6874)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-14 09:09:43 +00:00
Johannes
75cdb25d27 fix: improve survey response queue robustness to prevent data loss (#6959)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-14 08:18:11 +00:00
Johannes
6bc7db852c feat: Save draft without validation (Duplicate of #6847) (#6966)
Co-authored-by: Mahadeva Peruka <97960828+mahadevaperuka@users.noreply.github.com>
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-12 21:52:00 +00:00
Matti Nannt
ffb4eac1a4 chore: upgrade azure-playwright (#6949) 2025-12-12 18:14:21 +00:00
Bhagya Amarasinghe
56da3b5725 chore: remove docker compose version pinning and update Traefik image version to v2.11.31 in docker-compose and documentation (#6967)
Co-authored-by: Matti Nannt <matti@formbricks.com>
2025-12-12 11:29:26 +01:00
dependabot[bot]
c189af5482 chore(deps): bump the npm_and_yarn group across 2 directories with 1 update (#6971)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matti Nannt <matti@formbricks.com>
2025-12-12 11:25:57 +01:00
Johannes
5dbf42fd6a feat: add bulk edit for single-select and multi-select options (#6951)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-12 06:49:49 +00:00
Anshuman Pandey
42525a86a8 fix: close the survey on formbricks.logout (#6955) 2025-12-12 06:03:35 +00:00
Anshuman Pandey
b96f0e67c5 fix: preserve attribute key casing during CSV contact upload (#6958)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-12-12 05:22:48 +00:00
Johannes
2d7b99ba26 feat: allow team admins to invite members to their own teams (#6891)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-12 05:01:48 +00:00
Matti Nannt
666a79044f fix: skip instance ID in license check during E2E tests (#6968)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-12 04:05:25 +00:00
Johannes
c3d97c2932 fix: docs links (#6960) 2025-12-10 10:59:25 +00:00
Anshuman Pandey
cc5d630a05 chore: adds docs for min ios and android versions (#6956) 2025-12-09 10:11:00 +00:00
Anshuman Pandey
be38d76ccf fix: removes empty imageUrl and videoUrl keys from elements (#6950) 2025-12-09 09:52:01 +00:00
Joel Ekström Svensson
a8eea306e5 feat: Add Swedish sv-SE translation (#6913)
Co-authored-by: Matti Nannt <matti@formbricks.com>
2025-12-08 14:49:44 +00:00
Matti Nannt
4fd53ac115 refactor: centralize instance ID generation (#6952) 2025-12-08 13:42:54 +00:00
Matti Nannt
eb92392ed1 fix: add node-forge security override to resolve Dependabot #230 (#6948) 2025-12-08 12:34:36 +00:00
dependabot[bot]
7412b32526 chore(deps): bump the npm_and_yarn group across 2 directories with 1 update (#6928)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matti Nannt <matti@formbricks.com>
2025-12-04 13:40:52 +00:00
Matti Nannt
193346a70d fix: upgrade Next.js to 15.5.7 and React to 19.1.2 to fix CVE-2025-66478 and CVE-2025-55182 (#6943) 2025-12-04 10:50:04 +00:00
Johannes
a1d4754b04 feat: allow survey-level logo override in styling tab (#6887)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-04 08:51:56 +00:00
Johannes
f4b918a4b6 feat: add survey metadata to webhook payload (#6939)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-04 07:08:42 +00:00
Dhruwang Jariwala
fb9a0b197a fix: disable keyboard navigation for 'other' option in multiple-choice component (#6941) 2025-12-04 06:59:13 +00:00
Dhruwang Jariwala
95b6c16dd1 fix: truncate language switch text #6910 (#6934)
Co-authored-by: Mahadeva Peruka <97960828+mahadevaperuka@users.noreply.github.com>
2025-12-03 13:40:26 +00:00
Johannes
cfdf09650f fix: error message in rating Question (#6909)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
Co-authored-by: Matti Nannt <matti@formbricks.com>
2025-12-03 09:15:34 +00:00
Anshuman Pandey
4c94fc25ae fix: fixes pnpm i18n script to generate surveys package translations as well (#6930) 2025-12-02 09:56:35 +00:00
Johannes
ccf501d925 fix: keyboard nav for MQP with multiple questions (#6926)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-12-02 06:40:30 +00:00
Dhruwang Jariwala
04dfbe0777 fix: removed unused t wrapper (#6923) 2025-12-01 16:35:13 +00:00
Matti Nannt
cbf255ab0d docs: add custom subpath deployment guide (#6922) 2025-12-01 15:33:51 +01:00
Dhruwang Jariwala
942366956c fix: missing finish label on last card (#6915) 2025-12-01 13:50:49 +00:00
Dhruwang Jariwala
a6ee796cef fix: back button label validation (#6916) 2025-12-01 12:09:50 +00:00
Dhruwang Jariwala
a535529bd3 fix: border around language select dropdown (#6914) 2025-12-01 08:57:36 +00:00
Dhruwang Jariwala
018cef61a6 feat: telemetry setup (#6888)
Co-authored-by: Matti Nannt <matti@formbricks.com>
2025-11-29 11:57:14 +00:00
Matti Nannt
c53e4f54cb feat: migrate integration configs from questions to elements (#6906)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-11-28 17:07:58 +00:00
Anshuman Pandey
e2fd71abfd fix: fixes the blocks deletion issue (#6907) 2025-11-28 14:04:37 +00:00
Anshuman Pandey
f888aa8a19 feat: MQP (#6901)
Co-authored-by: Matti Nannt <matti@formbricks.com>
Co-authored-by: Johannes <johannes@formbricks.com>
Co-authored-by: Dhruwang Jariwala <67850763+Dhruwang@users.noreply.github.com>
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-28 12:36:17 +00:00
Dhruwang Jariwala
2698817adb fix: language select UI (#6890)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-11-27 20:10:03 +00:00
Matti Nannt
2c18912f2f fix: use correct permission check for remove branding feature (#6895) 2025-11-27 15:56:43 +00:00
Johannes
f57497d8b3 fix: improve Contacts and Segments UX and functionality (#6855)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-26 07:49:23 +00:00
Johannes
aab6798b29 chore: Remove old telemetry & usage tracking (#6844)
Co-authored-by: Matti Nannt <matti@formbricks.com>
2025-11-25 12:57:43 +00:00
Johannes
f07092595f feat: UI improvements to survey editor and summary cards (#6857) 2025-11-25 09:49:59 +00:00
Johannes
c03c7ec1ed fix: Clarify wording around custom links against phishing (#6875) 2025-11-25 08:57:10 +00:00
Johannes
628de8e6ae fix: add missing filter option (#6879) 2025-11-25 08:55:34 +00:00
Matti Nannt
be4b54a827 docs: add S3 CORS configuration to file uploads documentation (#6877) 2025-11-24 13:00:28 +00:00
Harsh Bhat
e03df83e88 docs: Add GTM docs (#6830)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-11-24 10:59:27 +00:00
Dhruwang Jariwala
ed26427302 feat: add CSP nonce support for inline styles (#6796) (#6801) 2025-11-21 15:17:39 +00:00
Matti Nannt
554809742b fix: release pipeline boolean comparison for is_latest output (#6870) 2025-11-21 09:10:55 +00:00
Johannes
28adfb905c fix: Matrix filter (#6864)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-21 07:13:21 +00:00
Johannes
05c455ed62 fix: Link metadata (#6865) 2025-11-21 06:56:43 +00:00
Matti Nannt
f7687bc0ea fix: pin Prisma CLI to version 6 in Dockerfile (#6868) 2025-11-21 06:36:12 +00:00
Dhruwang Jariwala
af34391309 fix: filters not persisting in response page (#6862) 2025-11-20 15:14:44 +00:00
Dhruwang Jariwala
70978fbbdf fix: update preview when props change (#6860) 2025-11-20 13:26:55 +00:00
Matti Nannt
f6683d1165 fix: optimize survey list performance with client-side filtering (#6812)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-19 06:36:07 +00:00
Matti Nannt
13be7a8970 perf: Optimize link survey with server/client component architecture (#6764)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-19 06:31:41 +00:00
Dhruwang Jariwala
0472d5e8f0 fix: language switch tweak and docs feedback template (#6811) 2025-11-18 17:00:23 +00:00
Dhruwang Jariwala
00a61f7abe chore: response page optimization (#6843)
Co-authored-by: igor-srdoc <igor@srdoc.si>
2025-11-18 16:50:48 +00:00
Matti Nannt
6999abba3b fix: add typeorm security override (Dependabot #223) (#6842) 2025-11-18 10:35:34 +00:00
Matti Nannt
9ae66f44ae feat: add filterDateField parameter to enable filtering by updated-at in responses endpoint (#6833)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-18 10:14:45 +00:00
dependabot[bot]
7933d0077a chore(deps): bump glob from 11.0.2 to 11.1.0 in the npm_and_yarn group across 1 directory (#6838)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-18 11:13:41 +01:00
Johannes
cc8289fa33 feat: improve rating and NPS summary UI with aggregated view (#6834)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-18 08:38:11 +00:00
Matti Nannt
c458051839 chore: upgrade playwright to fix dependabot warnings (#6840) 2025-11-18 08:33:52 +00:00
Johannes
718a199d5b feat: add Personal Link generation UI (#6819)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-18 05:37:23 +00:00
Matti Nannt
5ab9fdf1e3 feat: reduce environment cache TTL to 1 minute for CDN and Redis (#6825)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-18 05:20:38 +00:00
Johannes
5741209aa9 fix: resolve metadata in hover confusion + other UI tweaks (#6821)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-17 11:51:49 +00:00
Johannes
35d0d8ed54 feat: add AND relationship support for URL filters in No Code Actions (#6822) 2025-11-17 11:06:32 +00:00
Johannes
5bce5c0a3b perf: Duplicate of Parallelize responses page data fetching v2 (#6831)
Co-authored-by: igor-srdoc <igor@srdoc.si>
2025-11-17 09:39:40 +00:00
Igor Srdoc
c61212964c perf: Parallelize independent data fetching in responses page (#6762)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-11-17 09:39:40 +00:00
Johannes
b8d41a6e9b perf: optimize survey editor drag and drop performance (#6823) 2025-11-17 09:36:13 +00:00
Johannes
eedd5200a4 fix: allow 1 option + other in select question (#6824)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-17 08:39:40 +00:00
Matti Nannt
71a85c7126 feat: add CUID v1 validation for environment ID endpoints (#6827)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-17 07:33:52 +00:00
Dhruwang Jariwala
341e2639e1 feat: spanish translations (#6817)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-11-13 14:48:37 +00:00
Dhruwang Jariwala
056470e6f0 fix: added variable key id mapping UI (#6814) 2025-11-13 09:56:42 +00:00
Dhruwang Jariwala
e965ad4b97 fix: raw html issues (#6813) 2025-11-13 09:12:39 +00:00
Johannes
12e703c02b feat: add scroll indicator button to scrollable container (#6803)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-11 11:59:58 +00:00
Johannes
07065f2675 fix: include responseStatus filter in active filter count display (#6809)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-11 11:05:02 +00:00
Johannes
7ca45cefeb fix: copy recontact options when copying surveys between environments (#6802) 2025-11-11 10:39:37 +00:00
Dhruwang Jariwala
4df28878db fix: preview animation fix (duplicate) (#6784)
Co-authored-by: Praveen Thanikachalam <100035228+prave01@users.noreply.github.com>
2025-11-06 20:16:26 +00:00
Johannes
b355d05b25 fix: Tweak Recontact UI (#6783)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-11-06 14:53:29 +00:00
Matti Nannt
e757e9aec9 fix: serve logo from self-hosted instance instead of external S3 bucket (#6781) 2025-11-05 14:57:44 +00:00
Dhruwang Jariwala
cf4119baf6 fix: update issue in welcome card (#6779) 2025-11-05 13:42:12 +00:00
Johannes
6be2ae3071 chore: update wording & UI tweak for easier SDK setup (#6777) 2025-11-05 06:10:14 +00:00
Dhruwang Jariwala
600b793641 chore: recalibrate survey editor width to 2/3 editor and 1/3 preview (#6772) 2025-11-04 09:10:31 +00:00
Dhruwang Jariwala
cde03b6997 fix: duplicate survey issue (#6774) 2025-11-04 08:19:25 +00:00
Anshuman Pandey
00371bfb01 docs: minio intructions for docker setup (#6773)
Co-authored-by: Akhilesh Patidar <akhileshpatidar989368@gmail.com>
Co-authored-by: Akhilesh <126186908+Akhileshait@users.noreply.github.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2025-11-04 06:23:05 +00:00
Johannes
6be6782531 docs: improve API docs for better DX (#6760)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-31 11:59:40 +00:00
Pyrrian
3ae4f8aa68 fix: nindent typo in securityContext helm chart (#6753) 2025-10-31 12:35:20 +01:00
Thomas Brugman
3d3c69a92b feat: Add Dutch language support. (#6737) 2025-10-31 12:35:08 +01:00
dependabot[bot]
b1b94eaa66 chore(deps): bump next-auth from 4.24.11 to 4.24.12 in /apps/web in the npm_and_yarn group across 1 directory (#6751)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-30 13:09:31 +00:00
Marc T.
67cc96449d fix: allow access of /animated-bgs/** from public url (#6748)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-30 12:21:50 +00:00
Dhruwang Jariwala
bf41a53b86 fix: survey ui loading issue (#6755) 2025-10-30 07:32:44 +00:00
Anshuman Pandey
26292ecf39 fix: welcome card headline in survey title (#6749) 2025-10-29 07:57:27 +00:00
Johannes
056e572a31 fix: move Follow ups to Enterprise plan (#6734)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-28 09:04:22 +00:00
Johannes
d7bbd219a3 refactor: simplify Stripe integration and rename enterprise to custom (#6720) 2025-10-28 07:45:59 +00:00
Hemachandar
fe5ff9a71c feat: Show SingleUse ID data in survey responses table (#6742)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-28 08:38:44 +01:00
Johannes
4e3438683e chore: Response page data handling optimization + UI tweaks (#6716)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-28 06:56:06 +00:00
Matti Nannt
f587446079 feat: Optimize layout data fetching and reduce database queries by 50% (#6729)
Co-authored-by: Johannes <johannes@formbricks.com>
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-28 06:55:44 +00:00
Dhruwang Jariwala
7a3d05eb9a fix: prevent browser confirmation dialog after successful survey save (#6744) 2025-10-28 06:03:43 +00:00
Johannes
906b4da33c fix: execute pipeline on Create Response of Management API (#6712)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-27 17:34:00 +00:00
Aashish
33b9ee3a50 fix: enter button event applying to preview on right side when enter in welcome card editor #6739 (#6740)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-27 16:53:12 +00:00
Dhruwang Jariwala
5a693a548c fix: 1135 translation updates (#6743) 2025-10-27 10:52:04 +00:00
Matti Nannt
20614c2b12 chore: update Next.js to 15.5.6 (#6727)
Co-authored-by: Johannes <72809645+jobenjada@users.noreply.github.com>
2025-10-27 08:44:36 +00:00
Johannes
0c5e079d6f fix: embed mode for relevant question types (#6705)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-27 08:03:39 +00:00
Hemachandar
b3c16c8731 fix: parse question text-content for GSheets header row (#6736)
Co-authored-by: Matti Nannt <matti@formbricks.com>
2025-10-27 09:25:15 +01:00
Johannes
a6d45a63fa fix: breadcrumb dropdown active state and loading indicators (#6714)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-27 05:54:18 +00:00
Dhruwang Jariwala
a5fa876aa3 feat: refactor translation key management (#6717)
Co-authored-by: Piyush Gupta <piyushguptaa2z123@gmail.com>
Co-authored-by: Piyush Gupta <56182734+gupta-piyush19@users.noreply.github.com>
Co-authored-by: Victor Hugo dos Santos <115753265+victorvhs017@users.noreply.github.com>
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
Co-authored-by: Matti Nannt <matti@formbricks.com>
Co-authored-by: Matti Nannt <mail@matthiasnannt.com>
Co-authored-by: Johannes <johannes@formbricks.com>
Co-authored-by: Johannes <72809645+jobenjada@users.noreply.github.com>
2025-10-23 14:53:11 +00:00
Matti Nannt
c9a50a6ff2 chore(deps-dev): bump the npm_and_yarn group across 9 directories wit… (#6730)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-23 10:09:57 +00:00
Matti Nannt
19389bfffc chore: exclude TSX files from unit test coverage (#6723)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-10-22 12:55:44 +00:00
Johannes
accb4f461d docs: Open API Docs for Create Attribute Class (#6713) 2025-10-22 12:39:22 +00:00
Matti Nannt
c04c351244 chore: remove Next.js Redis cache handler (#6725) 2025-10-21 12:18:44 +00:00
Johannes
f7f8f07778 chore: clean up login screen (#6710) 2025-10-21 11:06:59 +00:00
Matti Nannt
3634385c6c docs: add AGENTS guidelines (#6718)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-10-21 09:45:17 +00:00
Matti Nannt
8bdfc0686f chore: apply prettier formatting (#6719) 2025-10-20 14:28:14 +00:00
Dhruwang Jariwala
74405cc05f fix: update OpenAPI schema for action class creation endpoint (#6617)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-18 15:16:48 +00:00
Johannes
785359955a chore: prevent phishing for CTA question & on thank you page (#6694)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-18 09:58:12 +00:00
Anshuman Pandey
f6157d5109 fix: Duplicate PR for fixing invalid email validation (#6709)
Co-authored-by: Aashish-png <aashishsarwa512@gmail.com>
Co-authored-by: Aashish <59650752+Aashish-png@users.noreply.github.com>
Co-authored-by: Johannes <johannes@formbricks.com>
2025-10-17 19:10:45 +00:00
Matti Nannt
070dd9f268 chore: remove cloud infrastructure from main repository (#6686) 2025-10-17 12:58:03 +00:00
Johannes
7a40d647d8 fix: prevent navigation collapse/expand flash on page load (quick fix) (#6678)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-17 12:56:13 +00:00
Johannes
2186a1c60d revert: revert accidental merges (#6701) (#6703) 2025-10-17 05:47:17 +00:00
Victor Hugo dos Santos
2054de4a9d chore: add PR size guidelines and pre-push hook for size checks (#6679) 2025-10-17 04:57:18 +00:00
Johannes
e068955fbf fix: removes unused migration and language flag from the codebase (#6704) 2025-10-16 15:34:04 +00:00
Johannes
4f5180ea8f fix: revert accidental merges (#6701) 2025-10-16 05:42:00 -07:00
Johannes
093013e1d2 Merge branch 'main' of https://github.com/formbricks/formbricks 2025-10-16 14:33:09 +02:00
Johannes
8b5b4b4172 Merge branch 'main' of https://github.com/formbricks/formbricks 2025-10-16 14:32:41 +02:00
Anshuman Pandey
36c5fc4a65 feat: rich text in headlines (#6685)
Co-authored-by: Johannes <johannes@formbricks.com>
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-16 10:29:46 +00:00
Harsh Bhat
df191de1b4 docs: Add docs for headless use of Formbricks (#6700) 2025-10-16 03:28:35 -07:00
Johannes
8bb5428548 Merge branch 'main' of https://github.com/formbricks/formbricks 2025-10-15 18:32:34 +02:00
Johannes
b78f8d0599 fix: API key docs (#6697) 2025-10-15 09:12:45 -07:00
Johannes
36535e1e50 feat: Add language as default contact attribute for case-insensitive CSV matching
- Add language as a default attribute key in environment creation
- Create data migration to add language attribute key to existing environments
- Update tests to verify language is treated like other default attributes
- Fixes issue where CSV columns with 'Language' (capital L) would create duplicate custom attributes

The existing isStringMatch() function already handles case-insensitive matching,
so this change ensures language is properly matched alongside userId, email,
firstName, and lastName without any hardcoding in the UI layer.
2025-10-15 18:07:04 +02:00
Dhruwang Jariwala
e26a188d1b fix: use /releases/latest endpoint to fetch correct latest version (#6690) 2025-10-15 07:01:00 +00:00
Victor Hugo dos Santos
aaea129d4f fix: api key hashing algorithm (#6639)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-13 14:36:37 +00:00
Johannes
18f4cd977d feat: Add "None of the above" option for Multi-Select and Single-Select questions (#6646) 2025-10-10 07:50:45 -07:00
Dhruwang Jariwala
5468510f5a feat: recall in rich text (#6630)
Co-authored-by: Johannes <johannes@formbricks.com>
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-09 09:45:08 +00:00
Victor Hugo dos Santos
76213af5d7 chore: update dependencies and improve logging format (#6672) 2025-10-09 09:02:07 +00:00
Anshuman Pandey
cdf0926c60 fix: restricts management file uploads size to be less than 5MB (#6669) 2025-10-09 05:02:52 +00:00
devin-ai-integration[bot]
84b3c57087 docs: add setLanguage method to user identification documentation (#6670)
Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Co-authored-by: Johannes <johannes@formbricks.com>
2025-10-08 16:20:11 +00:00
Victor Hugo dos Santos
ed10069b39 chore: update esbuild to latest version (#6662) 2025-10-08 14:11:24 +00:00
Anshuman Pandey
7c1033af20 fix: bumps nodemailer version (#6667) 2025-10-08 06:03:45 +00:00
Matti Nannt
98e3ad1068 perf(web): optimize Next.js image processing to prevent timeouts (#6665) 2025-10-08 05:02:04 +00:00
Johannes
b11fbd9f95 fix: upgrade axios and tar-fs to resolve dependabot issues (#6655) 2025-10-07 05:27:24 +00:00
Matti Nannt
c5e31d14d1 feat(docker): upgrade Traefik from v2.7 to v2.11.29 for security (#6636) 2025-10-07 05:20:49 +00:00
Matti Nannt
d64d561498 feat(ci): add conditional tagging based on 'Set as latest release' option (#6628) 2025-10-06 12:25:19 +00:00
Johannes
1bddc9e960 refactor: remove hidden fields toggle from UI (#6649) 2025-10-06 12:19:45 +00:00
Matti Nannt
3f122ed9ee perf: reduce cache TTL to 1 minute for SDK environment state and segments (#6635) 2025-10-06 10:12:46 +00:00
Jakob Schott
bdad80d6d1 fix: remove capitalize functions (#6610)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-10-06 10:07:23 +00:00
Johannes
d9ea00d86e fix: allow deselecting optional single-select question responses (#6643)
Co-authored-by: Victor Santos <victor@formbricks.com>
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-06 09:32:24 +00:00
Johannes
4a3c2fccba chore: add Cursor rule for Review & Refinement (#6648) 2025-10-06 01:38:42 -07:00
Johannes
3a09af674a feat: hit ENTER for new option (#6624)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-10-06 07:23:17 +00:00
Dhruwang Jariwala
1ced76c44d chore: added expirationDays param support in personal link api (#6578)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-06 07:12:29 +00:00
Victor Hugo dos Santos
fa1663d858 docs: enhance file upload troubleshooting guidance in migration (#6645)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-10-06 06:40:06 +00:00
Victor Hugo dos Santos
ebf591a7e0 fix: improve E2E test reliability and security (#6653) 2025-10-06 05:02:51 +00:00
Dhruwang Jariwala
5c9795cd23 chore: update @boxyhq/saml-jackson and posthog-node (#6647) 2025-10-04 09:26:30 +02:00
Victor Hugo dos Santos
b67177ba55 Merge commit from fork
* fix(auth): enhance password validation and rate limiting for login attempts

- Added password length validation to prevent CPU DoS attacks, limiting to 128 characters.
- Implemented constant-time password verification to mitigate timing attacks.
- Adjusted rate limit for login attempts from 30 to 10 per 15 minutes for improved security.
- Updated login form validation to reflect new password length constraints.
- Introduced constants for authentication endpoints in the API.

* fixed sample size for timing test

* password validation messages

---------

Co-authored-by: Your Name <you@example.com>
2025-10-02 11:09:28 +02:00
Johannes
6cf1f49c8e docs: add tag docs (#6640) 2025-10-02 01:47:31 -07:00
Johannes
4afb95b92a fix: switch Manage Subscription button bg to stripe color (#6633) 2025-10-01 12:00:44 +00:00
Piyush Gupta
38089241b4 chore: adds surveys package readme (#6598) 2025-10-01 11:26:03 +00:00
Johannes
07487d4871 docs: update license pages (#6631) 2025-10-01 01:40:19 -07:00
Johannes
fa0879e3a0 chore: increase visibility of hover effect to indicate clickability (#6622)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-09-30 12:44:13 +00:00
Anshuman Pandey
3733c22a6f fix: file uploads and cluster setup docs (#6623) 2025-09-30 01:46:02 -07:00
Anshuman Pandey
5e5baa76ab fix: fixes the formbricks.sh redis undefined volume bug (#6604) 2025-09-25 13:55:43 +00:00
Dhruwang Jariwala
2153d2aa16 fix: replace button with div in IdBadge to prevent hydration issues (#6601) 2025-09-25 13:42:41 +00:00
Matti Nannt
7fa4862fd9 feat: make S3_REGION optional in storage client configuration (#6577) 2025-09-25 12:25:35 +00:00
Matti Nannt
411e9a26ee fix(ci): update release tag validation to accept format without v prefix (#6585) 2025-09-25 12:09:19 +00:00
Victor Hugo dos Santos
eb1349f205 fix: enhance JWT handling with improved encryption and decryption logic (#6596) 2025-09-25 11:45:08 +00:00
Johannes
5c25f25212 docs: remove beta note (#6593) 2025-09-24 02:51:58 -07:00
Victor Hugo dos Santos
6af81e46ee chore: improve Sentry API logs with correlation ID and request context (#6584) 2025-09-24 09:25:51 +00:00
Jakob Schott
7423fc9472 fix: Improve messaging for mobile users (#6579)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-09-23 10:13:00 +00:00
Victor Hugo dos Santos
1557ffcca1 feat: add redis migration script (#6575)
Co-authored-by: Matti Nannt <matti@formbricks.com>
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-09-22 11:18:02 +00:00
Piyush Gupta
5d53ed76ed fix: logic fallback cleanup (#6568) 2025-09-22 08:10:27 +00:00
Dhruwang Jariwala
ebd399e611 fix: block previews for completed and paused surveys (#6576) 2025-09-22 07:21:38 +00:00
Dhruwang Jariwala
843110b0d6 fix: followup toast (#6565) 2025-09-19 13:03:56 +00:00
Anshuman Pandey
51babf2f98 fix: minor csp change and removes uploads volume (#6566) 2025-09-19 10:20:38 +00:00
Victor Hugo dos Santos
6bc5f1e168 feat: add cache integration tests and update E2E workflow (#6551) 2025-09-19 08:44:31 +00:00
Piyush Gupta
c9016802e7 docs: updated screenshots in docs (#6562) 2025-09-18 19:19:14 +00:00
Anshuman Pandey
6a49fb4700 feat: adds one-click MinIO migration script for Formbricks 4.0 (#6553)
Co-authored-by: Victor Santos <victor@formbricks.com>
2025-09-18 16:23:03 +00:00
Dhruwang Jariwala
646921cd37 fix: logic issues (#6561) 2025-09-18 18:31:44 +02:00
Dhruwang Jariwala
34d3145fcd fix: broken churn survey template (#6559) 2025-09-18 11:18:39 +00:00
Dhruwang Jariwala
c3c06eb309 fix: empty container in template UI (#6556) 2025-09-18 06:45:20 +00:00
Dhruwang Jariwala
bf4c6238d5 fix: api key modal tweaks (#6552)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-09-17 15:00:42 +00:00
Dhruwang Jariwala
8972ef0fef fix: integration redirect links (#6555) 2025-09-17 14:59:35 +00:00
Matti Nannt
4e59924a5a fix: e2e tests issue due to security policy (#6558) 2025-09-17 16:54:07 +02:00
Matti Nannt
8b28353b79 fix: release tag extraction in release action (#6554) 2025-09-16 17:33:32 +00:00
Matti Nannt
abbc7a065b chore: update release pipeline for new infrastructure (#6541) 2025-09-16 10:33:24 +00:00
Harsh Bhat
00e8ee27a2 docs: Add redirect error handling (#6548) 2025-09-15 06:03:41 -07:00
Dhruwang Jariwala
379aeba71a fix: synced translations (#6547) 2025-09-15 10:19:02 +00:00
Anshuman Pandey
717adddeae feat: adds docs for s3 compatible storage (#6538)
Co-authored-by: Matthias Nannt <mail@matthiasnannt.com>
2025-09-15 07:34:46 +00:00
Dhruwang Jariwala
41798266a0 fix: quota translations (#6546) 2025-09-15 07:04:40 +00:00
Matti Nannt
a93fa8ec76 chore: use stable tag to manage releases and ensure one-click-setup c… (#6540) 2025-09-12 17:03:13 +00:00
Piyush Gupta
47c3df0466 feat: adds survey package translation files (#6539)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-09-12 12:35:37 +00:00
Victor Hugo dos Santos
935e24bd43 chore: clean-up new cache package (#6532) 2025-09-12 11:16:13 +00:00
dependabot[bot]
3879d86f63 chore(deps-dev): bump the npm_and_yarn group across 2 directories with 1 update (#6537)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-12 12:28:36 +02:00
Matti Nannt
839144d338 chore: remove unused fields and tables from prisma schema (#6531)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-09-12 09:01:03 +00:00
Anshuman Pandey
96031822a6 feat: s3 compatible storage (#6536)
Co-authored-by: Victor Santos <victor@formbricks.com>
2025-09-12 08:17:33 +00:00
Piyush Gupta
21c8b5d6e4 feat: adds multi language functionality to surveys package (#6527)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-09-11 13:48:08 +00:00
Matti Nannt
22d4952a40 chore: remove ios and android package from monorepo (#6533) 2025-09-11 12:57:55 +00:00
dependabot[bot]
933723f1fe chore(deps-dev): bump the npm_and_yarn group across 9 directories with 1 update (#6526)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-11 09:25:10 +02:00
Piyush Gupta
dd394f1d2c chore: remove cron jobs and survey scheduling functionality (#6505)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-09-11 06:57:11 +00:00
Dhruwang Jariwala
0188aad97b feat: nav cleanup pt. 2 (#6515)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-09-11 04:07:17 +00:00
Yuuenn
d46644fe0d feat: add Simplified Chinese (zh-Hans-CN) translations #6511 (#6518)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-09-10 12:13:48 +00:00
Victor Hugo dos Santos
c259a61f0e feat: unified cache (#6520) 2025-09-10 09:59:16 +00:00
Piyush Gupta
feee22b5c3 feat: Quota management(part 1 & part 2) (#6521)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
Co-authored-by: Dhruwang Jariwala <67850763+Dhruwang@users.noreply.github.com>
2025-09-09 13:25:05 +00:00
Dhruwang Jariwala
a5433f6748 feat: improved project and org switch (#6500)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-09-09 12:58:44 +00:00
Dhruwang Jariwala
557f14bab8 fix: requried questions being skipped (#6506)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-09-09 08:33:41 +00:00
Dhruwang Jariwala
fdba260301 fix: project styling settings issues (#6488) 2025-09-09 08:33:28 +00:00
devin-ai-integration[bot]
764b8ec260 docs: update single use links documentation to reflect sharing modal location (#6513)
Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Co-authored-by: Johannes <johannes@formbricks.com>
2025-09-08 12:08:26 +00:00
Dhruwang Jariwala
ac5d1e651e fix: untranslated string (#6512) 2025-09-08 10:27:31 +00:00
Johannes
62ffcc8e68 docs: clarified Roles docs + added 2FA (#6507) 2025-09-05 04:03:44 -07:00
Dhruwang Jariwala
326872a86b fix: response data table settings modal breaking (#6501) 2025-09-05 10:41:39 +00:00
Victor Hugo dos Santos
892b55662e chore: conditionally enable Sentry plugin based on authentication token (#6502) 2025-09-05 09:44:46 +00:00
Harsh Bhat
23143c8664 docs: Integrate mintlify docs with Posthog (#6487)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-09-05 07:27:21 +00:00
dependabot[bot]
4c71caf0da chore(deps): bump the npm_and_yarn group across 2 directories with 1 update (#6491)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-05 07:50:27 +02:00
Dhruwang Jariwala
173821f846 chore: dropdown menu storybook (#6453) 2025-09-04 05:20:38 +00:00
Matti Nannt
f139830020 fix: sentry source map upload workflow authentication (#6327)
Co-authored-by: Victor Santos <victor@formbricks.com>
Co-authored-by: Victor Hugo dos Santos <115753265+victorvhs017@users.noreply.github.com>
2025-09-02 21:57:41 +00:00
Jonathan Reimer
70979a3b5b Add Linux Foundation health score badge to README (#6496) 2025-09-02 18:35:14 +02:00
Matti Nannt
061fa036be chore: add deployment options to ECR image build action (#6498) 2025-09-02 17:54:15 +02:00
Dhruwang Jariwala
b83c0a4a5d chore: update romanian translations (#6495) 2025-09-02 10:58:49 +00:00
Dhruwang Jariwala
1bc0563965 fix: update action class issue (#6484) 2025-09-02 10:44:22 +00:00
Dhruwang Jariwala
3a4e2a9f85 fix: duplicate response and contact deletion calls (#6489) 2025-09-02 05:49:20 +00:00
Dhruwang Jariwala
bd48139a4f chore: tag stories (#6468) 2025-09-01 13:46:10 +00:00
Harsh Bhat
89fe82a0d6 docs: Add docs for Link settings (#6492) 2025-09-01 13:44:06 +00:00
om pharate
65dc1fa771 fix(tooltip): wrap TooltipContent in a Portal for improved rendering (#6458)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-09-01 11:49:54 +00:00
Dhruwang Jariwala
438990bffc chore: slider component story (#6469) 2025-09-01 10:56:50 +00:00
Dhruwang Jariwala
7f7bc989c6 fix: data table toolbar alignment (#6486) 2025-09-01 10:14:22 +00:00
Victor Hugo dos Santos
baa2b31bc9 fix: conditional logic build groups bug (#6476) 2025-09-01 10:04:31 +00:00
Matti Nannt
77aecf3aad chore: upgrade nextjs to 15.5.0 (#6454)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-09-01 09:51:17 +02:00
Dhruwang Jariwala
7c1110239b fix: mobile preview on large screens (#6478) 2025-08-29 08:51:40 +00:00
Dhruwang Jariwala
eeb337521b fix: email verify survey question preview (#6474) 2025-08-29 05:46:14 +00:00
Dhruwang Jariwala
182f674879 fix: multiple recalls in redirect url (#6467) 2025-08-28 08:38:58 +00:00
Piyush Gupta
73c0da4b75 chore: Updates prisma to the latest version (#6457) 2025-08-28 07:44:01 +00:00
Matti Nannt
f475b2e6d5 chore: remove deprecated scale plan from stripe subscription update (#6472) 2025-08-27 14:38:38 +00:00
Dhruwang Jariwala
e5e8941016 chore: tweaked confirmation modal (#6471)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-08-27 13:11:23 +00:00
Anshuman Pandey
c39c9998f0 fix: surveys package rtl (#6379)
Co-authored-by: Johannes <johannes@formbricks.com>
2025-08-27 05:52:46 +00:00
Piyush Gupta
a8c8e6f83f feat: adds switch component stories (#6462)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2025-08-26 06:15:03 +00:00
Dhruwang Jariwala
8a5e9f38d7 chore: delete dialog stories (#6452)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2025-08-26 05:54:28 +00:00
Dhruwang Jariwala
a0740d20ea chore: improved version comparison (#6413) 2025-08-26 05:54:16 +00:00
Dhruwang Jariwala
71f378a494 fix: select dropdown in project create modal (#6465) 2025-08-26 04:46:23 +00:00
Dhruwang Jariwala
4bececeb56 fix: Japanese translations (#6464) 2025-08-25 12:06:49 +00:00
Satoshi
71c96f48d7 feat: japanese translations (#6461) 2025-08-25 02:27:31 -07:00
Johannes
05d88a3069 docs: add API reference to Personal Links (#6463) 2025-08-24 23:48:36 -07:00
Piyush Gupta
b6a63edc88 feat: adds line break support in open text question textarea (#6456) 2025-08-25 05:57:04 +00:00
2692 changed files with 145145 additions and 167202 deletions

71
.agent/scripts/README.md Normal file
View File

@@ -0,0 +1,71 @@
# Skill Filter
Automatically filters Vercel React best practices to reduce AI token costs while keeping high-impact performance patterns.
## Quick Start
```bash
# Fetch and filter (first time)
pnpm filter-skills --fetch
# Re-filter after config changes
pnpm filter-skills
```
**Result:** ~50% reduction in skill files (keeps CRITICAL/HIGH/MEDIUM priorities, removes LOW priority rules)
## Configuration
Edit `.agent/skills/react-best-practices/skill-filter-config.json`:
```json
{
"featureFlags": {
"keepCriticalPriority": true, // async-*, bundle-*
"keepHighPriority": true, // server-*
"keepMediumPriority": true, // rerender-*
"keepLowPriority": false, // js-*, rendering-*, advanced-*
"removeJsOptimizations": true,
"removeRenderingOptimizations": true,
"removeAdvancedPatterns": true
}
}
```
**Toggle LOW priority rules:** Set `keepLowPriority: true`
## What It Does
1. Downloads latest skills from GitHub (with `--fetch`)
2. Filters based on priority and used technologies
3. Archives unused rules to `.archived/` (not tracked in git)
4. Formats markdown with Prettier to match project style
## Why This Works
- **AI Skills = Proactive:** Guide developers to write correct code from the start
- **Linting = Reactive:** Catch mistakes after code is written
- **Together:** AI prevents issues, linting catches what slips through
Token costs are an investment in preventing technical debt rather than fixing it later.
## Restore Archived Rules
```bash
mv .agent/skills/react-best-practices/.archived/rule-name.md \
.agent/skills/react-best-practices/rules/
```
Then re-run: `pnpm filter-skills`
## Commands
```bash
pnpm filter-skills # Filter with current config
pnpm filter-skills:dry-run # Preview changes
pnpm filter-skills --fetch # Fetch latest + filter
```
---
**Source:** [vercel-labs/agent-skills](https://github.com/vercel-labs/agent-skills)

439
.agent/scripts/filter-skills.ts Executable file
View File

@@ -0,0 +1,439 @@
#!/usr/bin/env tsx
import * as fs from 'node:fs';
import * as path from 'node:path';
import * as os from 'node:os';
import { execSync } from 'node:child_process';
interface FilterConfig {
featureFlags: {
keepCriticalPriority: boolean;
keepHighPriority: boolean;
keepMediumPriority: boolean;
keepLowPriority: boolean;
removeJsOptimizations: boolean;
removeRenderingOptimizations: boolean;
removeAdvancedPatterns: boolean;
};
priorities: {
keep: string[];
conditionalKeep: string[];
remove: string[];
};
technologyDetection: Record<string, {
packageNames: string[];
codePatterns: string[];
relatedRules: string[];
}>;
alwaysKeep: string[];
alwaysRemove: string[];
}
interface FilterReport {
kept: { file: string; reason: string }[];
archived: { file: string; reason: string }[];
technologiesDetected: string[];
summary: {
totalRules: number;
keptRules: number;
archivedRules: number;
reductionPercent: number;
};
}
const PROJECT_ROOT = path.resolve(__dirname, '../..');
const SKILLS_DIR = path.join(PROJECT_ROOT, '.agent/skills/react-best-practices');
const RULES_DIR = path.join(SKILLS_DIR, 'rules');
const ARCHIVE_DIR = path.join(SKILLS_DIR, '.archived');
const CONFIG_PATH = path.join(PROJECT_ROOT, '.agent/skill-filter-config.json');
const PACKAGE_JSON_PATH = path.join(PROJECT_ROOT, 'package.json');
// Parse command line arguments
const args = new Set(process.argv.slice(2));
const isDryRun = args.has('--dry-run');
function loadConfig(): FilterConfig {
const configContent = fs.readFileSync(CONFIG_PATH, 'utf-8');
return JSON.parse(configContent);
}
function validateConfig(config: FilterConfig): void {
console.log('✓ Configuration is valid');
console.log(` - ${config.alwaysKeep.length} rules marked as always keep`);
console.log(` - ${config.alwaysRemove.length} rules marked as always remove`);
console.log(` - ${Object.keys(config.technologyDetection).length} technologies configured for detection`);
}
function hasRipgrep(): boolean {
try {
execSync('rg --version', { stdio: 'ignore' });
return true;
} catch {
return false;
}
}
function detectTechnologies(config: FilterConfig): Set<string> {
const detected = new Set<string>();
// Check package.json dependencies
const packageJson = JSON.parse(fs.readFileSync(PACKAGE_JSON_PATH, 'utf-8'));
const allDeps = {
...packageJson.dependencies,
...packageJson.devDependencies,
};
const hasRg = hasRipgrep();
if (!hasRg && Object.keys(config.technologyDetection).some(t => config.technologyDetection[t].codePatterns.length > 0)) {
console.warn('⚠️ Ripgrep (rg) not found. Code pattern detection will be skipped.');
}
for (const [techName, techConfig] of Object.entries(config.technologyDetection)) {
// Check for package dependencies
const hasPackage = techConfig.packageNames.some(pkg => allDeps[pkg]);
if (hasPackage) {
detected.add(techName);
continue;
}
// Check for code patterns using ripgrep
if (hasRg && techConfig.codePatterns.length > 0) {
for (const pattern of techConfig.codePatterns) {
try {
// Use ripgrep to search for patterns in TypeScript/JavaScript files
// Use String.raw to avoid escaping issues, though redundant with single quotes in shell
execSync(
`rg -q '${pattern.replace(/'/g, "\\'")}' -g '*.ts' -g '*.tsx' -g '*.js' -g '*.jsx' "${PROJECT_ROOT}"`,
{ stdio: 'ignore' }
);
detected.add(techName);
break;
} catch {
// Pattern not found or error, continue checking
}
}
}
}
return detected;
}
function getRulePriority(filename: string): string | null {
const content = fs.readFileSync(path.join(RULES_DIR, filename), 'utf-8');
const match = content.match(/impact:\s*([A-Z-]+)/);
return match ? match[1] : null;
}
function shouldKeepRule(
filename: string,
config: FilterConfig,
detectedTechnologies: Set<string>
): { keep: boolean; reason: string } {
const flags = config.featureFlags;
// 1. Check feature flag naming conventions (hardcoded optimization flags)
if (flags.removeJsOptimizations && filename.startsWith('js-')) {
return { keep: false, reason: 'Feature flag: removeJsOptimizations' };
}
if (flags.removeRenderingOptimizations && filename.startsWith('rendering-')) {
return { keep: false, reason: 'Feature flag: removeRenderingOptimizations' };
}
if (flags.removeAdvancedPatterns && filename.startsWith('advanced-')) {
return { keep: false, reason: 'Feature flag: removeAdvancedPatterns' };
}
// 2. Check always keep/remove lists
if (config.alwaysKeep.includes(filename)) {
return { keep: true, reason: 'Always keep (critical pattern)' };
}
if (config.alwaysRemove.includes(filename)) {
return { keep: false, reason: 'Always remove (low priority optimization)' };
}
// 3. Check technology detection
for (const [techName, techConfig] of Object.entries(config.technologyDetection)) {
if (techConfig.relatedRules.includes(filename)) {
if (detectedTechnologies.has(techName)) {
return { keep: true, reason: `Technology detected: ${techName}` };
} else {
return { keep: false, reason: `Technology not used: ${techName}` };
}
}
}
// 4. Check priority
const priority = getRulePriority(filename);
if (priority) {
// Feature flag overrides for priorities
if (priority === 'CRITICAL' && !flags.keepCriticalPriority) return { keep: false, reason: 'Feature flag: keepCriticalPriority disabled' };
if (priority === 'HIGH' && !flags.keepHighPriority) return { keep: false, reason: 'Feature flag: keepHighPriority disabled' };
if ((priority === 'MEDIUM' || priority === 'MEDIUM-HIGH') && !flags.keepMediumPriority) return { keep: false, reason: 'Feature flag: keepMediumPriority disabled' };
if ((priority === 'LOW' || priority === 'LOW-MEDIUM') && flags.keepLowPriority) return { keep: true, reason: 'Feature flag: keepLowPriority enabled' };
// Standard priority lists
if (config.priorities.keep.includes(priority)) return { keep: true, reason: `Priority: ${priority}` };
if (config.priorities.conditionalKeep.includes(priority)) return { keep: true, reason: `Priority: ${priority} (conditional keep)` };
if (config.priorities.remove.includes(priority)) return { keep: false, reason: `Priority: ${priority}` };
}
// Default
return { keep: true, reason: 'Default (no matching rule)' };
}
function filterRules(config: FilterConfig, detectedTechnologies: Set<string>): FilterReport {
const report: FilterReport = {
kept: [],
archived: [],
technologiesDetected: Array.from(detectedTechnologies),
summary: {
totalRules: 0,
keptRules: 0,
archivedRules: 0,
reductionPercent: 0,
},
};
const ruleFiles = fs.readdirSync(RULES_DIR).filter(f => f.endsWith('.md'));
report.summary.totalRules = ruleFiles.length;
for (const filename of ruleFiles) {
const decision = shouldKeepRule(filename, config, detectedTechnologies);
if (decision.keep) {
report.kept.push({ file: filename, reason: decision.reason });
report.summary.keptRules++;
} else {
report.archived.push({ file: filename, reason: decision.reason });
report.summary.archivedRules++;
if (!isDryRun) {
// Move to archive
const sourcePath = path.join(RULES_DIR, filename);
const archivePath = path.join(ARCHIVE_DIR, filename);
fs.mkdirSync(ARCHIVE_DIR, { recursive: true });
fs.renameSync(sourcePath, archivePath);
}
}
}
report.summary.reductionPercent = Math.round(
report.summary.totalRules > 0
? (report.summary.archivedRules / report.summary.totalRules) * 100
: 0
);
return report;
}
function printReport(report: FilterReport): void {
console.log('\n' + '='.repeat(80));
console.log('SKILL FILTER REPORT');
console.log('='.repeat(80) + '\n');
console.log('📊 SUMMARY');
console.log(` Total rules: ${report.summary.totalRules}`);
console.log(` Kept: ${report.summary.keptRules} (${100 - report.summary.reductionPercent}%)`);
console.log(` Archived: ${report.summary.archivedRules} (${report.summary.reductionPercent}%)`);
console.log('');
console.log('🔍 TECHNOLOGIES DETECTED');
if (report.technologiesDetected.length > 0) {
report.technologiesDetected.forEach(tech => console.log(`${tech}`));
} else {
console.log(' (none detected)');
}
console.log('');
console.log('✅ KEPT RULES (' + report.kept.length + ')');
report.kept.forEach(({ file, reason }) => {
console.log(`${file.padEnd(45)}${reason}`);
});
console.log('');
console.log('📦 ARCHIVED RULES (' + report.archived.length + ')');
report.archived.forEach(({ file, reason }) => {
console.log(`${file.padEnd(45)}${reason}`);
});
console.log('');
if (isDryRun) {
console.log('🔍 DRY RUN MODE - No files were modified');
} else {
console.log('✨ Filtering complete! Archived rules moved to .archived/');
}
console.log('');
}
function saveReport(report: FilterReport): void {
const reportPath = path.join(SKILLS_DIR, 'filter-report.json');
fs.writeFileSync(reportPath, JSON.stringify(report, null, 2));
console.log(`📝 Report saved to: ${reportPath}`);
}
function fetchSkills(): void {
console.log('📥 Fetching Vercel React best practices from GitHub...\n');
// Use os.tmpdir() for safer temp directory
const tempBase = fs.mkdtempSync(path.join(os.tmpdir(), 'agent-skills-'));
const tarballUrl = 'https://github.com/vercel-labs/agent-skills/archive/refs/heads/main.tar.gz';
try {
console.log(' → Downloading tarball from GitHub...');
// Download and extract the entire repo first
try {
execSync(
`curl -sL ${tarballUrl} | tar -xz -C "${tempBase}"`,
{ stdio: 'pipe' }
);
} catch (e) {
throw new Error('Failed to download skills. Check your internet connection or curl availability.');
}
// Find the extracted directory and move the skills subdirectory
const extractedDir = path.join(tempBase, 'agent-skills-main/skills/react-best-practices');
if (!fs.existsSync(extractedDir)) {
throw new Error(`Skills directory not found in downloaded content: ${extractedDir}`);
}
// Move to final location
if (fs.existsSync(SKILLS_DIR)) {
console.log(' → Removing old skills...');
fs.rmSync(SKILLS_DIR, { recursive: true, force: true });
}
console.log(' → Installing to .agent/skills/...');
fs.mkdirSync(path.dirname(SKILLS_DIR), { recursive: true });
fs.renameSync(extractedDir, SKILLS_DIR);
// Create default config file if it doesn't exist AND not present in the new location
// Note: We moved the config out, so we don't need to recreate it inside SKILLS_DIR
// But if the external one is missing, we could offer to create it?
// For now, let's keep the logic simple and rely on the external config.
if (!fs.existsSync(CONFIG_PATH)) {
console.log('⚠️ Config file missing at new location. Creating default...');
const defaultConfig = {
featureFlags: {
keepCriticalPriority: true,
keepHighPriority: true,
keepMediumPriority: true,
keepLowPriority: false,
removeJsOptimizations: true,
removeRenderingOptimizations: true,
removeAdvancedPatterns: true
},
priorities: {
keep: ["CRITICAL", "HIGH"],
conditionalKeep: ["MEDIUM", "MEDIUM-HIGH"],
remove: ["LOW", "LOW-MEDIUM"]
},
technologyDetection: {},
alwaysKeep: [
"async-defer-await.md",
"async-parallel.md",
"async-dependencies.md",
"async-api-routes.md",
"bundle-barrel-imports.md",
"bundle-dynamic-imports.md",
"bundle-defer-third-party.md",
"bundle-conditional.md",
"bundle-preload.md",
"rerender-functional-setstate.md",
"rerender-memo.md",
"rerender-dependencies.md",
"rerender-defer-reads.md"
],
alwaysRemove: [
"js-batch-dom-css.md",
"js-cache-property-access.md",
"js-combine-iterations.md",
"js-early-exit.md",
"js-hoist-regexp.md",
"js-index-maps.md",
"js-length-check-first.md",
"js-min-max-loop.md",
"js-set-map-lookups.md",
"js-tosorted-immutable.md",
"js-cache-function-results.md",
"rendering-activity.md",
"rendering-animate-svg-wrapper.md",
"rendering-conditional-render.md",
"rendering-content-visibility.md",
"rendering-hoist-jsx.md",
"rendering-hydration-no-flicker.md",
"rendering-svg-precision.md",
"advanced-event-handler-refs.md",
"advanced-use-latest.md"
]
};
fs.writeFileSync(CONFIG_PATH, JSON.stringify(defaultConfig, null, 2));
}
console.log('✓ Skills fetched successfully\n');
} finally {
// Always clean up temp directory
try {
fs.rmSync(tempBase, { recursive: true, force: true });
} catch (e) {
// Ignore cleanup errors
}
}
}
function formatSkills(): void {
console.log('🎨 Formatting skill files to match project code style...\n');
try {
execSync(
`prettier --write "${SKILLS_DIR}/**/*.md"`,
{ stdio: 'inherit', cwd: PROJECT_ROOT }
);
console.log('✓ Formatting complete\n');
} catch (error) {
console.log('⚠️ Formatting failed (non-critical):', error);
}
}
// Main execution
try {
const shouldFetch = args.has('--fetch') || !fs.existsSync(SKILLS_DIR);
// Auto-fetch if skills don't exist
if (shouldFetch) {
fetchSkills();
}
// Check if skills exist after potential fetch
if (!fs.existsSync(SKILLS_DIR)) {
console.error('❌ Skills directory not found!\n');
console.error('Please run with --fetch flag:');
console.error(' pnpm filter-skills --fetch\n');
process.exit(1);
}
const validateOnly = args.has('--validate-config');
const config = loadConfig();
if (validateOnly) {
validateConfig(config);
process.exit(0);
}
console.log('🔍 Detecting technologies used in codebase...\n');
const detectedTechnologies = detectTechnologies(config);
console.log('🎯 Filtering skills...\n');
const report = filterRules(config, detectedTechnologies);
printReport(report);
if (!isDryRun) {
saveReport(report);
formatSkills();
}
process.exit(0);
} catch (error) {
console.error('❌ Error:', error);
process.exit(1);
}

View File

@@ -0,0 +1,63 @@
{
"featureFlags": {
"keepCriticalPriority": true,
"keepHighPriority": true,
"keepMediumPriority": true,
"keepLowPriority": false,
"removeJsOptimizations": true,
"removeRenderingOptimizations": true,
"removeAdvancedPatterns": true
},
"priorities": {
"keep": [
"CRITICAL",
"HIGH"
],
"conditionalKeep": [
"MEDIUM",
"MEDIUM-HIGH"
],
"remove": [
"LOW",
"LOW-MEDIUM"
]
},
"technologyDetection": {},
"alwaysKeep": [
"async-defer-await.md",
"async-parallel.md",
"async-dependencies.md",
"async-api-routes.md",
"bundle-barrel-imports.md",
"bundle-dynamic-imports.md",
"bundle-defer-third-party.md",
"bundle-conditional.md",
"bundle-preload.md",
"rerender-functional-setstate.md",
"rerender-memo.md",
"rerender-dependencies.md",
"rerender-defer-reads.md"
],
"alwaysRemove": [
"js-batch-dom-css.md",
"js-cache-property-access.md",
"js-combine-iterations.md",
"js-early-exit.md",
"js-hoist-regexp.md",
"js-index-maps.md",
"js-length-check-first.md",
"js-min-max-loop.md",
"js-set-map-lookups.md",
"js-tosorted-immutable.md",
"js-cache-function-results.md",
"rendering-activity.md",
"rendering-animate-svg-wrapper.md",
"rendering-conditional-render.md",
"rendering-content-visibility.md",
"rendering-hoist-jsx.md",
"rendering-hydration-no-flicker.md",
"rendering-svg-precision.md",
"advanced-event-handler-refs.md",
"advanced-use-latest.md"
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,127 @@
# React Best Practices
A structured repository for creating and maintaining React Best Practices optimized for agents and LLMs.
## Structure
- `rules/` - Individual rule files (one per rule)
- `_sections.md` - Section metadata (titles, impacts, descriptions)
- `_template.md` - Template for creating new rules
- `area-description.md` - Individual rule files
- `src/` - Build scripts and utilities
- `metadata.json` - Document metadata (version, organization, abstract)
- **`AGENTS.md`** - Compiled output (generated)
- **`test-cases.json`** - Test cases for LLM evaluation (generated)
## Getting Started
1. Install dependencies:
```bash
pnpm install
```
2. Build AGENTS.md from rules:
```bash
pnpm build
```
3. Validate rule files:
```bash
pnpm validate
```
4. Extract test cases:
```bash
pnpm extract-tests
```
## Creating a New Rule
1. Copy `rules/_template.md` to `rules/area-description.md`
2. Choose the appropriate area prefix:
- `async-` for Eliminating Waterfalls (Section 1)
- `bundle-` for Bundle Size Optimization (Section 2)
- `server-` for Server-Side Performance (Section 3)
- `client-` for Client-Side Data Fetching (Section 4)
- `rerender-` for Re-render Optimization (Section 5)
- `rendering-` for Rendering Performance (Section 6)
- `js-` for JavaScript Performance (Section 7)
- `advanced-` for Advanced Patterns (Section 8)
3. Fill in the frontmatter and content
4. Ensure you have clear examples with explanations
5. Run `pnpm build` to regenerate AGENTS.md and test-cases.json
## Rule File Structure
Each rule file should follow this structure:
````markdown
---
title: Rule Title Here
impact: MEDIUM
impactDescription: Optional description
tags: tag1, tag2, tag3
---
## Rule Title Here
Brief explanation of the rule and why it matters.
**Incorrect (description of what's wrong):**
```typescript
// Bad code example
```
````
**Correct (description of what's right):**
```typescript
// Good code example
```
Optional explanatory text after examples.
Reference: [Link](https://example.com)
## File Naming Convention
- Files starting with `_` are special (excluded from build)
- Rule files: `area-description.md` (e.g., `async-parallel.md`)
- Section is automatically inferred from filename prefix
- Rules are sorted alphabetically by title within each section
- IDs (e.g., 1.1, 1.2) are auto-generated during build
## Impact Levels
- `CRITICAL` - Highest priority, major performance gains
- `HIGH` - Significant performance improvements
- `MEDIUM-HIGH` - Moderate-high gains
- `MEDIUM` - Moderate performance improvements
- `LOW-MEDIUM` - Low-medium gains
- `LOW` - Incremental improvements
## Scripts
- `pnpm build` - Compile rules into AGENTS.md
- `pnpm validate` - Validate all rule files
- `pnpm extract-tests` - Extract test cases for LLM evaluation
- `pnpm dev` - Build and validate
## Contributing
When adding or modifying rules:
1. Use the correct filename prefix for your section
2. Follow the `_template.md` structure
3. Include clear bad/good examples with explanations
4. Add appropriate tags
5. Run `pnpm build` to regenerate AGENTS.md and test-cases.json
6. Rules are automatically sorted by title - no need to manage numbers!
## Acknowledgments
Originally created by [@shuding](https://x.com/shuding) at [Vercel](https://vercel.com).

View File

@@ -0,0 +1,138 @@
---
name: vercel-react-best-practices
description: React and Next.js performance optimization guidelines from Vercel Engineering. This skill should be used when writing, reviewing, or refactoring React/Next.js code to ensure optimal performance patterns. Triggers on tasks involving React components, Next.js pages, data fetching, bundle optimization, or performance improvements.
license: MIT
metadata:
author: vercel
version: "1.0.0"
---
# Vercel React Best Practices
Comprehensive performance optimization guide for React and Next.js applications, maintained by Vercel. Contains 57 rules across 8 categories, prioritized by impact to guide automated refactoring and code generation.
## When to Apply
Reference these guidelines when:
- Writing new React components or Next.js pages
- Implementing data fetching (client or server-side)
- Reviewing code for performance issues
- Refactoring existing React/Next.js code
- Optimizing bundle size or load times
## Rule Categories by Priority
| Priority | Category | Impact | Prefix |
| -------- | ------------------------- | ----------- | ------------ |
| 1 | Eliminating Waterfalls | CRITICAL | `async-` |
| 2 | Bundle Size Optimization | CRITICAL | `bundle-` |
| 3 | Server-Side Performance | HIGH | `server-` |
| 4 | Client-Side Data Fetching | MEDIUM-HIGH | `client-` |
| 5 | Re-render Optimization | MEDIUM | `rerender-` |
| 6 | Rendering Performance | MEDIUM | `rendering-` |
| 7 | JavaScript Performance | LOW-MEDIUM | `js-` |
| 8 | Advanced Patterns | LOW | `advanced-` |
## Quick Reference
### 1. Eliminating Waterfalls (CRITICAL)
- `async-defer-await` - Move await into branches where actually used
- `async-parallel` - Use Promise.all() for independent operations
- `async-dependencies` - Use better-all for partial dependencies
- `async-api-routes` - Start promises early, await late in API routes
- `async-suspense-boundaries` - Use Suspense to stream content
### 2. Bundle Size Optimization (CRITICAL)
- `bundle-barrel-imports` - Import directly, avoid barrel files
- `bundle-dynamic-imports` - Use next/dynamic for heavy components
- `bundle-defer-third-party` - Load analytics/logging after hydration
- `bundle-conditional` - Load modules only when feature is activated
- `bundle-preload` - Preload on hover/focus for perceived speed
### 3. Server-Side Performance (HIGH)
- `server-auth-actions` - Authenticate server actions like API routes
- `server-cache-react` - Use React.cache() for per-request deduplication
- `server-cache-lru` - Use LRU cache for cross-request caching
- `server-dedup-props` - Avoid duplicate serialization in RSC props
- `server-serialization` - Minimize data passed to client components
- `server-parallel-fetching` - Restructure components to parallelize fetches
- `server-after-nonblocking` - Use after() for non-blocking operations
### 4. Client-Side Data Fetching (MEDIUM-HIGH)
- `client-swr-dedup` - Use SWR for automatic request deduplication
- `client-event-listeners` - Deduplicate global event listeners
- `client-passive-event-listeners` - Use passive listeners for scroll
- `client-localstorage-schema` - Version and minimize localStorage data
### 5. Re-render Optimization (MEDIUM)
- `rerender-defer-reads` - Don't subscribe to state only used in callbacks
- `rerender-memo` - Extract expensive work into memoized components
- `rerender-memo-with-default-value` - Hoist default non-primitive props
- `rerender-dependencies` - Use primitive dependencies in effects
- `rerender-derived-state` - Subscribe to derived booleans, not raw values
- `rerender-derived-state-no-effect` - Derive state during render, not effects
- `rerender-functional-setstate` - Use functional setState for stable callbacks
- `rerender-lazy-state-init` - Pass function to useState for expensive values
- `rerender-simple-expression-in-memo` - Avoid memo for simple primitives
- `rerender-move-effect-to-event` - Put interaction logic in event handlers
- `rerender-transitions` - Use startTransition for non-urgent updates
- `rerender-use-ref-transient-values` - Use refs for transient frequent values
### 6. Rendering Performance (MEDIUM)
- `rendering-animate-svg-wrapper` - Animate div wrapper, not SVG element
- `rendering-content-visibility` - Use content-visibility for long lists
- `rendering-hoist-jsx` - Extract static JSX outside components
- `rendering-svg-precision` - Reduce SVG coordinate precision
- `rendering-hydration-no-flicker` - Use inline script for client-only data
- `rendering-hydration-suppress-warning` - Suppress expected mismatches
- `rendering-activity` - Use Activity component for show/hide
- `rendering-conditional-render` - Use ternary, not && for conditionals
- `rendering-usetransition-loading` - Prefer useTransition for loading state
### 7. JavaScript Performance (LOW-MEDIUM)
- `js-batch-dom-css` - Group CSS changes via classes or cssText
- `js-index-maps` - Build Map for repeated lookups
- `js-cache-property-access` - Cache object properties in loops
- `js-cache-function-results` - Cache function results in module-level Map
- `js-cache-storage` - Cache localStorage/sessionStorage reads
- `js-combine-iterations` - Combine multiple filter/map into one loop
- `js-length-check-first` - Check array length before expensive comparison
- `js-early-exit` - Return early from functions
- `js-hoist-regexp` - Hoist RegExp creation outside loops
- `js-min-max-loop` - Use loop for min/max instead of sort
- `js-set-map-lookups` - Use Set/Map for O(1) lookups
- `js-tosorted-immutable` - Use toSorted() for immutability
### 8. Advanced Patterns (LOW)
- `advanced-event-handler-refs` - Store event handlers in refs
- `advanced-init-once` - Initialize app once per app load
- `advanced-use-latest` - useLatest for stable callback refs
## How to Use
Read individual rule files for detailed explanations and code examples:
```
rules/async-parallel.md
rules/bundle-barrel-imports.md
```
Each rule file contains:
- Brief explanation of why it matters
- Incorrect code example with explanation
- Correct code example with explanation
- Additional context and references
## Full Compiled Document
For the complete guide with all rules expanded: `AGENTS.md`

View File

@@ -0,0 +1,249 @@
{
"kept": [
{
"file": "_sections.md",
"reason": "Default (no matching rule)"
},
{
"file": "_template.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "async-api-routes.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "async-defer-await.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "async-dependencies.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "async-parallel.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "async-suspense-boundaries.md",
"reason": "Priority: HIGH"
},
{
"file": "bundle-barrel-imports.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "bundle-conditional.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "bundle-defer-third-party.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "bundle-dynamic-imports.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "bundle-preload.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "client-localstorage-schema.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "client-passive-event-listeners.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "client-swr-dedup.md",
"reason": "Priority: MEDIUM-HIGH (conditional keep)"
},
{
"file": "rerender-defer-reads.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "rerender-dependencies.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "rerender-derived-state-no-effect.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "rerender-derived-state.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "rerender-functional-setstate.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "rerender-lazy-state-init.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "rerender-memo-with-default-value.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "rerender-memo.md",
"reason": "Always keep (critical pattern)"
},
{
"file": "rerender-move-effect-to-event.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "rerender-transitions.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "rerender-use-ref-transient-values.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "server-after-nonblocking.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "server-auth-actions.md",
"reason": "Priority: CRITICAL"
},
{
"file": "server-cache-lru.md",
"reason": "Priority: HIGH"
},
{
"file": "server-cache-react.md",
"reason": "Priority: MEDIUM (conditional keep)"
},
{
"file": "server-parallel-fetching.md",
"reason": "Priority: CRITICAL"
},
{
"file": "server-serialization.md",
"reason": "Priority: HIGH"
}
],
"archived": [
{
"file": "advanced-event-handler-refs.md",
"reason": "Feature flag: removeAdvancedPatterns"
},
{
"file": "advanced-init-once.md",
"reason": "Feature flag: removeAdvancedPatterns"
},
{
"file": "advanced-use-latest.md",
"reason": "Feature flag: removeAdvancedPatterns"
},
{
"file": "client-event-listeners.md",
"reason": "Priority: LOW"
},
{
"file": "js-batch-dom-css.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-cache-function-results.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-cache-property-access.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-cache-storage.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-combine-iterations.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-early-exit.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-hoist-regexp.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-index-maps.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-length-check-first.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-min-max-loop.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-set-map-lookups.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "js-tosorted-immutable.md",
"reason": "Feature flag: removeJsOptimizations"
},
{
"file": "rendering-activity.md",
"reason": "Feature flag: removeRenderingOptimizations"
},
{
"file": "rendering-animate-svg-wrapper.md",
"reason": "Feature flag: removeRenderingOptimizations"
},
{
"file": "rendering-conditional-render.md",
"reason": "Feature flag: removeRenderingOptimizations"
},
{
"file": "rendering-content-visibility.md",
"reason": "Feature flag: removeRenderingOptimizations"
},
{
"file": "rendering-hoist-jsx.md",
"reason": "Feature flag: removeRenderingOptimizations"
},
{
"file": "rendering-hydration-no-flicker.md",
"reason": "Feature flag: removeRenderingOptimizations"
},
{
"file": "rendering-hydration-suppress-warning.md",
"reason": "Feature flag: removeRenderingOptimizations"
},
{
"file": "rendering-svg-precision.md",
"reason": "Feature flag: removeRenderingOptimizations"
},
{
"file": "rendering-usetransition-loading.md",
"reason": "Feature flag: removeRenderingOptimizations"
},
{
"file": "rerender-simple-expression-in-memo.md",
"reason": "Priority: LOW-MEDIUM"
},
{
"file": "server-dedup-props.md",
"reason": "Priority: LOW"
}
],
"technologiesDetected": [],
"summary": {
"totalRules": 59,
"keptRules": 32,
"archivedRules": 27,
"reductionPercent": 46
}
}

View File

@@ -0,0 +1,15 @@
{
"version": "1.0.0",
"organization": "Vercel Engineering",
"date": "January 2026",
"abstract": "Comprehensive performance optimization guide for React and Next.js applications, designed for AI agents and LLMs. Contains 40+ rules across 8 categories, prioritized by impact from critical (eliminating waterfalls, reducing bundle size) to incremental (advanced patterns). Each rule includes detailed explanations, real-world examples comparing incorrect vs. correct implementations, and specific impact metrics to guide automated refactoring and code generation.",
"references": [
"https://react.dev",
"https://nextjs.org",
"https://swr.vercel.app",
"https://github.com/shuding/better-all",
"https://github.com/isaacs/node-lru-cache",
"https://vercel.com/blog/how-we-optimized-package-imports-in-next-js",
"https://vercel.com/blog/how-we-made-the-vercel-dashboard-twice-as-fast"
]
}

View File

@@ -0,0 +1,46 @@
# Sections
This file defines all sections, their ordering, impact levels, and descriptions.
The section ID (in parentheses) is the filename prefix used to group rules.
---
## 1. Eliminating Waterfalls (async)
**Impact:** CRITICAL
**Description:** Waterfalls are the #1 performance killer. Each sequential await adds full network latency. Eliminating them yields the largest gains.
## 2. Bundle Size Optimization (bundle)
**Impact:** CRITICAL
**Description:** Reducing initial bundle size improves Time to Interactive and Largest Contentful Paint.
## 3. Server-Side Performance (server)
**Impact:** HIGH
**Description:** Optimizing server-side rendering and data fetching eliminates server-side waterfalls and reduces response times.
## 4. Client-Side Data Fetching (client)
**Impact:** MEDIUM-HIGH
**Description:** Automatic deduplication and efficient data fetching patterns reduce redundant network requests.
## 5. Re-render Optimization (rerender)
**Impact:** MEDIUM
**Description:** Reducing unnecessary re-renders minimizes wasted computation and improves UI responsiveness.
## 6. Rendering Performance (rendering)
**Impact:** MEDIUM
**Description:** Optimizing the rendering process reduces the work the browser needs to do.
## 7. JavaScript Performance (js)
**Impact:** LOW-MEDIUM
**Description:** Micro-optimizations for hot paths can add up to meaningful improvements.
## 8. Advanced Patterns (advanced)
**Impact:** LOW
**Description:** Advanced patterns for specific cases that require careful implementation.

View File

@@ -0,0 +1,28 @@
---
title: Rule Title Here
impact: MEDIUM
impactDescription: Optional description of impact (e.g., "20-50% improvement")
tags: tag1, tag2
---
## Rule Title Here
**Impact: MEDIUM (optional impact description)**
Brief explanation of the rule and why it matters. This should be clear and concise, explaining the performance implications.
**Incorrect (description of what's wrong):**
```typescript
// Bad code example here
const bad = example();
```
**Correct (description of what's right):**
```typescript
// Good code example here
const good = example();
```
Reference: [Link to documentation or resource](https://example.com)

View File

@@ -0,0 +1,35 @@
---
title: Prevent Waterfall Chains in API Routes
impact: CRITICAL
impactDescription: 2-10× improvement
tags: api-routes, server-actions, waterfalls, parallelization
---
## Prevent Waterfall Chains in API Routes
In API routes and Server Actions, start independent operations immediately, even if you don't await them yet.
**Incorrect (config waits for auth, data waits for both):**
```typescript
export async function GET(request: Request) {
const session = await auth();
const config = await fetchConfig();
const data = await fetchData(session.user.id);
return Response.json({ data, config });
}
```
**Correct (auth and config start immediately):**
```typescript
export async function GET(request: Request) {
const sessionPromise = auth();
const configPromise = fetchConfig();
const session = await sessionPromise;
const [config, data] = await Promise.all([configPromise, fetchData(session.user.id)]);
return Response.json({ data, config });
}
```
For operations with more complex dependency chains, use `better-all` to automatically maximize parallelism (see Dependency-Based Parallelization).

View File

@@ -0,0 +1,80 @@
---
title: Defer Await Until Needed
impact: HIGH
impactDescription: avoids blocking unused code paths
tags: async, await, conditional, optimization
---
## Defer Await Until Needed
Move `await` operations into the branches where they're actually used to avoid blocking code paths that don't need them.
**Incorrect (blocks both branches):**
```typescript
async function handleRequest(userId: string, skipProcessing: boolean) {
const userData = await fetchUserData(userId);
if (skipProcessing) {
// Returns immediately but still waited for userData
return { skipped: true };
}
// Only this branch uses userData
return processUserData(userData);
}
```
**Correct (only blocks when needed):**
```typescript
async function handleRequest(userId: string, skipProcessing: boolean) {
if (skipProcessing) {
// Returns immediately without waiting
return { skipped: true };
}
// Fetch only when needed
const userData = await fetchUserData(userId);
return processUserData(userData);
}
```
**Another example (early return optimization):**
```typescript
// Incorrect: always fetches permissions
async function updateResource(resourceId: string, userId: string) {
const permissions = await fetchPermissions(userId)
const resource = await getResource(resourceId)
if (!resource) {
return { error: 'Not found' }
}
if (!permissions.canEdit) {
return { error: 'Forbidden' }
}
return await updateResourceData(resource, permissions)
}
// Correct: fetches only when needed
async function updateResource(resourceId: string, userId: string) {
const resource = await getResource(resourceId)
if (!resource) {
return { error: 'Not found' }
}
const permissions = await fetchPermissions(userId)
if (!permissions.canEdit) {
return { error: 'Forbidden' }
}
return await updateResourceData(resource, permissions)
}
```
This optimization is especially valuable when the skipped branch is frequently taken, or when the deferred operation is expensive.

View File

@@ -0,0 +1,48 @@
---
title: Dependency-Based Parallelization
impact: CRITICAL
impactDescription: 2-10× improvement
tags: async, parallelization, dependencies, better-all
---
## Dependency-Based Parallelization
For operations with partial dependencies, use `better-all` to maximize parallelism. It automatically starts each task at the earliest possible moment.
**Incorrect (profile waits for config unnecessarily):**
```typescript
const [user, config] = await Promise.all([fetchUser(), fetchConfig()]);
const profile = await fetchProfile(user.id);
```
**Correct (config and profile run in parallel):**
```typescript
import { all } from "better-all";
const { user, config, profile } = await all({
async user() {
return fetchUser();
},
async config() {
return fetchConfig();
},
async profile() {
return fetchProfile((await this.$.user).id);
},
});
```
**Alternative without extra dependencies:**
We can also create all the promises first, and do `Promise.all()` at the end.
```typescript
const userPromise = fetchUser();
const profilePromise = userPromise.then((user) => fetchProfile(user.id));
const [user, config, profile] = await Promise.all([userPromise, fetchConfig(), profilePromise]);
```
Reference: [https://github.com/shuding/better-all](https://github.com/shuding/better-all)

View File

@@ -0,0 +1,24 @@
---
title: Promise.all() for Independent Operations
impact: CRITICAL
impactDescription: 2-10× improvement
tags: async, parallelization, promises, waterfalls
---
## Promise.all() for Independent Operations
When async operations have no interdependencies, execute them concurrently using `Promise.all()`.
**Incorrect (sequential execution, 3 round trips):**
```typescript
const user = await fetchUser();
const posts = await fetchPosts();
const comments = await fetchComments();
```
**Correct (parallel execution, no waterfall):**
```typescript
const [user, posts, comments] = await Promise.all([fetchUser(), fetchPosts(), fetchComments()]);
```

View File

@@ -0,0 +1,99 @@
---
title: Strategic Suspense Boundaries
impact: HIGH
impactDescription: faster initial paint
tags: async, suspense, streaming, layout-shift
---
## Strategic Suspense Boundaries
Instead of awaiting data in async components before returning JSX, use Suspense boundaries to show the wrapper UI faster while data loads.
**Incorrect (wrapper blocked by data fetching):**
```tsx
async function Page() {
const data = await fetchData(); // Blocks entire page
return (
<div>
<div>Sidebar</div>
<div>Header</div>
<div>
<DataDisplay data={data} />
</div>
<div>Footer</div>
</div>
);
}
```
The entire layout waits for data even though only the middle section needs it.
**Correct (wrapper shows immediately, data streams in):**
```tsx
function Page() {
return (
<div>
<div>Sidebar</div>
<div>Header</div>
<div>
<Suspense fallback={<Skeleton />}>
<DataDisplay />
</Suspense>
</div>
<div>Footer</div>
</div>
);
}
async function DataDisplay() {
const data = await fetchData(); // Only blocks this component
return <div>{data.content}</div>;
}
```
Sidebar, Header, and Footer render immediately. Only DataDisplay waits for data.
**Alternative (share promise across components):**
```tsx
function Page() {
// Start fetch immediately, but don't await
const dataPromise = fetchData();
return (
<div>
<div>Sidebar</div>
<div>Header</div>
<Suspense fallback={<Skeleton />}>
<DataDisplay dataPromise={dataPromise} />
<DataSummary dataPromise={dataPromise} />
</Suspense>
<div>Footer</div>
</div>
);
}
function DataDisplay({ dataPromise }: { dataPromise: Promise<Data> }) {
const data = use(dataPromise); // Unwraps the promise
return <div>{data.content}</div>;
}
function DataSummary({ dataPromise }: { dataPromise: Promise<Data> }) {
const data = use(dataPromise); // Reuses the same promise
return <div>{data.summary}</div>;
}
```
Both components share the same promise, so only one fetch occurs. Layout renders immediately while both components wait together.
**When NOT to use this pattern:**
- Critical data needed for layout decisions (affects positioning)
- SEO-critical content above the fold
- Small, fast queries where suspense overhead isn't worth it
- When you want to avoid layout shift (loading → content jump)
**Trade-off:** Faster initial paint vs potential layout shift. Choose based on your UX priorities.

View File

@@ -0,0 +1,62 @@
---
title: Avoid Barrel File Imports
impact: CRITICAL
impactDescription: 200-800ms import cost, slow builds
tags: bundle, imports, tree-shaking, barrel-files, performance
---
## Avoid Barrel File Imports
Import directly from source files instead of barrel files to avoid loading thousands of unused modules. **Barrel files** are entry points that re-export multiple modules (e.g., `index.js` that does `export * from './module'`).
Popular icon and component libraries can have **up to 10,000 re-exports** in their entry file. For many React packages, **it takes 200-800ms just to import them**, affecting both development speed and production cold starts.
**Why tree-shaking doesn't help:** When a library is marked as external (not bundled), the bundler can't optimize it. If you bundle it to enable tree-shaking, builds become substantially slower analyzing the entire module graph.
**Incorrect (imports entire library):**
```tsx
// Loads 1,583 modules, takes ~2.8s extra in dev
// Runtime cost: 200-800ms on every cold start
import { Button, TextField } from "@mui/material";
import { Check, Menu, X } from "lucide-react";
// Loads 2,225 modules, takes ~4.2s extra in dev
```
**Correct (imports only what you need):**
```tsx
// Loads only 3 modules (~2KB vs ~1MB)
import Button from "@mui/material/Button";
import TextField from "@mui/material/TextField";
import Check from "lucide-react/dist/esm/icons/check";
import Menu from "lucide-react/dist/esm/icons/menu";
import X from "lucide-react/dist/esm/icons/x";
// Loads only what you use
```
**Alternative (Next.js 13.5+):**
```js
// Then you can keep the ergonomic barrel imports:
import { Check, Menu, X } from "lucide-react";
// next.config.js - use optimizePackageImports
module.exports = {
experimental: {
optimizePackageImports: ["lucide-react", "@mui/material"],
},
};
// Automatically transformed to direct imports at build time
```
Direct imports provide 15-70% faster dev boot, 28% faster builds, 40% faster cold starts, and significantly faster HMR.
Libraries commonly affected: `lucide-react`, `@mui/material`, `@mui/icons-material`, `@tabler/icons-react`, `react-icons`, `@headlessui/react`, `@radix-ui/react-*`, `lodash`, `ramda`, `date-fns`, `rxjs`, `react-use`.
Reference: [How we optimized package imports in Next.js](https://vercel.com/blog/how-we-optimized-package-imports-in-next-js)

View File

@@ -0,0 +1,35 @@
---
title: Conditional Module Loading
impact: HIGH
impactDescription: loads large data only when needed
tags: bundle, conditional-loading, lazy-loading
---
## Conditional Module Loading
Load large data or modules only when a feature is activated.
**Example (lazy-load animation frames):**
```tsx
function AnimationPlayer({
enabled,
setEnabled,
}: {
enabled: boolean;
setEnabled: React.Dispatch<React.SetStateAction<boolean>>;
}) {
const [frames, setFrames] = useState<Frame[] | null>(null);
useEffect(() => {
if (enabled && !frames && typeof window !== "undefined") {
import("./animation-frames.js").then((mod) => setFrames(mod.frames)).catch(() => setEnabled(false));
}
}, [enabled, frames, setEnabled]);
if (!frames) return <Skeleton />;
return <Canvas frames={frames} />;
}
```
The `typeof window !== 'undefined'` check prevents bundling this module for SSR, optimizing server bundle size and build speed.

View File

@@ -0,0 +1,46 @@
---
title: Defer Non-Critical Third-Party Libraries
impact: MEDIUM
impactDescription: loads after hydration
tags: bundle, third-party, analytics, defer
---
## Defer Non-Critical Third-Party Libraries
Analytics, logging, and error tracking don't block user interaction. Load them after hydration.
**Incorrect (blocks initial bundle):**
```tsx
import { Analytics } from "@vercel/analytics/react";
export default function RootLayout({ children }) {
return (
<html>
<body>
{children}
<Analytics />
</body>
</html>
);
}
```
**Correct (loads after hydration):**
```tsx
import dynamic from "next/dynamic";
const Analytics = dynamic(() => import("@vercel/analytics/react").then((m) => m.Analytics), { ssr: false });
export default function RootLayout({ children }) {
return (
<html>
<body>
{children}
<Analytics />
</body>
</html>
);
}
```

View File

@@ -0,0 +1,32 @@
---
title: Dynamic Imports for Heavy Components
impact: CRITICAL
impactDescription: directly affects TTI and LCP
tags: bundle, dynamic-import, code-splitting, next-dynamic
---
## Dynamic Imports for Heavy Components
Use `next/dynamic` to lazy-load large components not needed on initial render.
**Incorrect (Monaco bundles with main chunk ~300KB):**
```tsx
import { MonacoEditor } from "./monaco-editor";
function CodePanel({ code }: { code: string }) {
return <MonacoEditor value={code} />;
}
```
**Correct (Monaco loads on demand):**
```tsx
import dynamic from "next/dynamic";
const MonacoEditor = dynamic(() => import("./monaco-editor").then((m) => m.MonacoEditor), { ssr: false });
function CodePanel({ code }: { code: string }) {
return <MonacoEditor value={code} />;
}
```

View File

@@ -0,0 +1,44 @@
---
title: Preload Based on User Intent
impact: MEDIUM
impactDescription: reduces perceived latency
tags: bundle, preload, user-intent, hover
---
## Preload Based on User Intent
Preload heavy bundles before they're needed to reduce perceived latency.
**Example (preload on hover/focus):**
```tsx
function EditorButton({ onClick }: { onClick: () => void }) {
const preload = () => {
if (typeof window !== "undefined") {
void import("./monaco-editor");
}
};
return (
<button onMouseEnter={preload} onFocus={preload} onClick={onClick}>
Open Editor
</button>
);
}
```
**Example (preload when feature flag is enabled):**
```tsx
function FlagsProvider({ children, flags }: Props) {
useEffect(() => {
if (flags.editorEnabled && typeof window !== "undefined") {
void import("./monaco-editor").then((mod) => mod.init());
}
}, [flags.editorEnabled]);
return <FlagsContext.Provider value={flags}>{children}</FlagsContext.Provider>;
}
```
The `typeof window !== 'undefined'` check prevents bundling preloaded modules for SSR, optimizing server bundle size and build speed.

View File

@@ -0,0 +1,74 @@
---
title: Version and Minimize localStorage Data
impact: MEDIUM
impactDescription: prevents schema conflicts, reduces storage size
tags: client, localStorage, storage, versioning, data-minimization
---
## Version and Minimize localStorage Data
Add version prefix to keys and store only needed fields. Prevents schema conflicts and accidental storage of sensitive data.
**Incorrect:**
```typescript
// No version, stores everything, no error handling
localStorage.setItem("userConfig", JSON.stringify(fullUserObject));
const data = localStorage.getItem("userConfig");
```
**Correct:**
```typescript
const VERSION = "v2";
function saveConfig(config: { theme: string; language: string }) {
try {
localStorage.setItem(`userConfig:${VERSION}`, JSON.stringify(config));
} catch {
// Throws in incognito/private browsing, quota exceeded, or disabled
}
}
function loadConfig() {
try {
const data = localStorage.getItem(`userConfig:${VERSION}`);
return data ? JSON.parse(data) : null;
} catch {
return null;
}
}
// Migration from v1 to v2
function migrate() {
try {
const v1 = localStorage.getItem("userConfig:v1");
if (v1) {
const old = JSON.parse(v1);
saveConfig({ theme: old.darkMode ? "dark" : "light", language: old.lang });
localStorage.removeItem("userConfig:v1");
}
} catch {}
}
```
**Store minimal fields from server responses:**
```typescript
// User object has 20+ fields, only store what UI needs
function cachePrefs(user: FullUser) {
try {
localStorage.setItem(
"prefs:v1",
JSON.stringify({
theme: user.preferences.theme,
notifications: user.preferences.notifications,
})
);
} catch {}
}
```
**Always wrap in try-catch:** `getItem()` and `setItem()` throw in incognito/private browsing (Safari, Firefox), when quota exceeded, or when disabled.
**Benefits:** Schema evolution via versioning, reduced storage size, prevents storing tokens/PII/internal flags.

View File

@@ -0,0 +1,48 @@
---
title: Use Passive Event Listeners for Scrolling Performance
impact: MEDIUM
impactDescription: eliminates scroll delay caused by event listeners
tags: client, event-listeners, scrolling, performance, touch, wheel
---
## Use Passive Event Listeners for Scrolling Performance
Add `{ passive: true }` to touch and wheel event listeners to enable immediate scrolling. Browsers normally wait for listeners to finish to check if `preventDefault()` is called, causing scroll delay.
**Incorrect:**
```typescript
useEffect(() => {
const handleTouch = (e: TouchEvent) => console.log(e.touches[0].clientX);
const handleWheel = (e: WheelEvent) => console.log(e.deltaY);
document.addEventListener("touchstart", handleTouch);
document.addEventListener("wheel", handleWheel);
return () => {
document.removeEventListener("touchstart", handleTouch);
document.removeEventListener("wheel", handleWheel);
};
}, []);
```
**Correct:**
```typescript
useEffect(() => {
const handleTouch = (e: TouchEvent) => console.log(e.touches[0].clientX);
const handleWheel = (e: WheelEvent) => console.log(e.deltaY);
document.addEventListener("touchstart", handleTouch, { passive: true });
document.addEventListener("wheel", handleWheel, { passive: true });
return () => {
document.removeEventListener("touchstart", handleTouch);
document.removeEventListener("wheel", handleWheel);
};
}, []);
```
**Use passive when:** tracking/analytics, logging, any listener that doesn't call `preventDefault()`.
**Don't use passive when:** implementing custom swipe gestures, custom zoom controls, or any listener that needs `preventDefault()`.

View File

@@ -0,0 +1,56 @@
---
title: Use SWR for Automatic Deduplication
impact: MEDIUM-HIGH
impactDescription: automatic deduplication
tags: client, swr, deduplication, data-fetching
---
## Use SWR for Automatic Deduplication
SWR enables request deduplication, caching, and revalidation across component instances.
**Incorrect (no deduplication, each instance fetches):**
```tsx
function UserList() {
const [users, setUsers] = useState([]);
useEffect(() => {
fetch("/api/users")
.then((r) => r.json())
.then(setUsers);
}, []);
}
```
**Correct (multiple instances share one request):**
```tsx
import useSWR from "swr";
function UserList() {
const { data: users } = useSWR("/api/users", fetcher);
}
```
**For immutable data:**
```tsx
import { useImmutableSWR } from "@/lib/swr";
function StaticContent() {
const { data } = useImmutableSWR("/api/config", fetcher);
}
```
**For mutations:**
```tsx
import { useSWRMutation } from "swr/mutation";
function UpdateButton() {
const { trigger } = useSWRMutation("/api/user", updateUser);
return <button onClick={() => trigger()}>Update</button>;
}
```
Reference: [https://swr.vercel.app](https://swr.vercel.app)

View File

@@ -0,0 +1,39 @@
---
title: Defer State Reads to Usage Point
impact: MEDIUM
impactDescription: avoids unnecessary subscriptions
tags: rerender, searchParams, localStorage, optimization
---
## Defer State Reads to Usage Point
Don't subscribe to dynamic state (searchParams, localStorage) if you only read it inside callbacks.
**Incorrect (subscribes to all searchParams changes):**
```tsx
function ShareButton({ chatId }: { chatId: string }) {
const searchParams = useSearchParams();
const handleShare = () => {
const ref = searchParams.get("ref");
shareChat(chatId, { ref });
};
return <button onClick={handleShare}>Share</button>;
}
```
**Correct (reads on demand, no subscription):**
```tsx
function ShareButton({ chatId }: { chatId: string }) {
const handleShare = () => {
const params = new URLSearchParams(window.location.search);
const ref = params.get("ref");
shareChat(chatId, { ref });
};
return <button onClick={handleShare}>Share</button>;
}
```

View File

@@ -0,0 +1,45 @@
---
title: Narrow Effect Dependencies
impact: LOW
impactDescription: minimizes effect re-runs
tags: rerender, useEffect, dependencies, optimization
---
## Narrow Effect Dependencies
Specify primitive dependencies instead of objects to minimize effect re-runs.
**Incorrect (re-runs on any user field change):**
```tsx
useEffect(() => {
console.log(user.id);
}, [user]);
```
**Correct (re-runs only when id changes):**
```tsx
useEffect(() => {
console.log(user.id);
}, [user.id]);
```
**For derived state, compute outside effect:**
```tsx
// Incorrect: runs on width=767, 766, 765...
useEffect(() => {
if (width < 768) {
enableMobileMode();
}
}, [width]);
// Correct: runs only on boolean transition
const isMobile = width < 768;
useEffect(() => {
if (isMobile) {
enableMobileMode();
}
}, [isMobile]);
```

View File

@@ -0,0 +1,40 @@
---
title: Calculate Derived State During Rendering
impact: MEDIUM
impactDescription: avoids redundant renders and state drift
tags: rerender, derived-state, useEffect, state
---
## Calculate Derived State During Rendering
If a value can be computed from current props/state, do not store it in state or update it in an effect. Derive it during render to avoid extra renders and state drift. Do not set state in effects solely in response to prop changes; prefer derived values or keyed resets instead.
**Incorrect (redundant state and effect):**
```tsx
function Form() {
const [firstName, setFirstName] = useState("First");
const [lastName, setLastName] = useState("Last");
const [fullName, setFullName] = useState("");
useEffect(() => {
setFullName(firstName + " " + lastName);
}, [firstName, lastName]);
return <p>{fullName}</p>;
}
```
**Correct (derive during render):**
```tsx
function Form() {
const [firstName, setFirstName] = useState("First");
const [lastName, setLastName] = useState("Last");
const fullName = firstName + " " + lastName;
return <p>{fullName}</p>;
}
```
References: [You Might Not Need an Effect](https://react.dev/learn/you-might-not-need-an-effect)

View File

@@ -0,0 +1,29 @@
---
title: Subscribe to Derived State
impact: MEDIUM
impactDescription: reduces re-render frequency
tags: rerender, derived-state, media-query, optimization
---
## Subscribe to Derived State
Subscribe to derived boolean state instead of continuous values to reduce re-render frequency.
**Incorrect (re-renders on every pixel change):**
```tsx
function Sidebar() {
const width = useWindowWidth(); // updates continuously
const isMobile = width < 768;
return <nav className={isMobile ? "mobile" : "desktop"} />;
}
```
**Correct (re-renders only when boolean changes):**
```tsx
function Sidebar() {
const isMobile = useMediaQuery("(max-width: 767px)");
return <nav className={isMobile ? "mobile" : "desktop"} />;
}
```

View File

@@ -0,0 +1,77 @@
---
title: Use Functional setState Updates
impact: MEDIUM
impactDescription: prevents stale closures and unnecessary callback recreations
tags: react, hooks, useState, useCallback, callbacks, closures
---
## Use Functional setState Updates
When updating state based on the current state value, use the functional update form of setState instead of directly referencing the state variable. This prevents stale closures, eliminates unnecessary dependencies, and creates stable callback references.
**Incorrect (requires state as dependency):**
```tsx
function TodoList() {
const [items, setItems] = useState(initialItems);
// Callback must depend on items, recreated on every items change
const addItems = useCallback(
(newItems: Item[]) => {
setItems([...items, ...newItems]);
},
[items]
); // ❌ items dependency causes recreations
// Risk of stale closure if dependency is forgotten
const removeItem = useCallback((id: string) => {
setItems(items.filter((item) => item.id !== id));
}, []); // ❌ Missing items dependency - will use stale items!
return <ItemsEditor items={items} onAdd={addItems} onRemove={removeItem} />;
}
```
The first callback is recreated every time `items` changes, which can cause child components to re-render unnecessarily. The second callback has a stale closure bug—it will always reference the initial `items` value.
**Correct (stable callbacks, no stale closures):**
```tsx
function TodoList() {
const [items, setItems] = useState(initialItems);
// Stable callback, never recreated
const addItems = useCallback((newItems: Item[]) => {
setItems((curr) => [...curr, ...newItems]);
}, []); // ✅ No dependencies needed
// Always uses latest state, no stale closure risk
const removeItem = useCallback((id: string) => {
setItems((curr) => curr.filter((item) => item.id !== id));
}, []); // ✅ Safe and stable
return <ItemsEditor items={items} onAdd={addItems} onRemove={removeItem} />;
}
```
**Benefits:**
1. **Stable callback references** - Callbacks don't need to be recreated when state changes
2. **No stale closures** - Always operates on the latest state value
3. **Fewer dependencies** - Simplifies dependency arrays and reduces memory leaks
4. **Prevents bugs** - Eliminates the most common source of React closure bugs
**When to use functional updates:**
- Any setState that depends on the current state value
- Inside useCallback/useMemo when state is needed
- Event handlers that reference state
- Async operations that update state
**When direct updates are fine:**
- Setting state to a static value: `setCount(0)`
- Setting state from props/arguments only: `setName(newName)`
- State doesn't depend on previous value
**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, the compiler can automatically optimize some cases, but functional updates are still recommended for correctness and to prevent stale closure bugs.

View File

@@ -0,0 +1,56 @@
---
title: Use Lazy State Initialization
impact: MEDIUM
impactDescription: wasted computation on every render
tags: react, hooks, useState, performance, initialization
---
## Use Lazy State Initialization
Pass a function to `useState` for expensive initial values. Without the function form, the initializer runs on every render even though the value is only used once.
**Incorrect (runs on every render):**
```tsx
function FilteredList({ items }: { items: Item[] }) {
// buildSearchIndex() runs on EVERY render, even after initialization
const [searchIndex, setSearchIndex] = useState(buildSearchIndex(items));
const [query, setQuery] = useState("");
// When query changes, buildSearchIndex runs again unnecessarily
return <SearchResults index={searchIndex} query={query} />;
}
function UserProfile() {
// JSON.parse runs on every render
const [settings, setSettings] = useState(JSON.parse(localStorage.getItem("settings") || "{}"));
return <SettingsForm settings={settings} onChange={setSettings} />;
}
```
**Correct (runs only once):**
```tsx
function FilteredList({ items }: { items: Item[] }) {
// buildSearchIndex() runs ONLY on initial render
const [searchIndex, setSearchIndex] = useState(() => buildSearchIndex(items));
const [query, setQuery] = useState("");
return <SearchResults index={searchIndex} query={query} />;
}
function UserProfile() {
// JSON.parse runs only on initial render
const [settings, setSettings] = useState(() => {
const stored = localStorage.getItem("settings");
return stored ? JSON.parse(stored) : {};
});
return <SettingsForm settings={settings} onChange={setSettings} />;
}
```
Use lazy initialization when computing initial values from localStorage/sessionStorage, building data structures (indexes, maps), reading from the DOM, or performing heavy transformations.
For simple primitives (`useState(0)`), direct references (`useState(props.value)`), or cheap literals (`useState({})`), the function form is unnecessary.

View File

@@ -0,0 +1,36 @@
---
title: Extract Default Non-primitive Parameter Value from Memoized Component to Constant
impact: MEDIUM
impactDescription: restores memoization by using a constant for default value
tags: rerender, memo, optimization
---
## Extract Default Non-primitive Parameter Value from Memoized Component to Constant
When memoized component has a default value for some non-primitive optional parameter, such as an array, function, or object, calling the component without that parameter results in broken memoization. This is because new value instances are created on every rerender, and they do not pass strict equality comparison in `memo()`.
To address this issue, extract the default value into a constant.
**Incorrect (`onClick` has different values on every rerender):**
```tsx
const UserAvatar = memo(function UserAvatar({ onClick = () => {} }: { onClick?: () => void }) {
// ...
})
// Used without optional onClick
<UserAvatar />
```
**Correct (stable default value):**
```tsx
const NOOP = () => {};
const UserAvatar = memo(function UserAvatar({ onClick = NOOP }: { onClick?: () => void }) {
// ...
})
// Used without optional onClick
<UserAvatar />
```

View File

@@ -0,0 +1,44 @@
---
title: Extract to Memoized Components
impact: MEDIUM
impactDescription: enables early returns
tags: rerender, memo, useMemo, optimization
---
## Extract to Memoized Components
Extract expensive work into memoized components to enable early returns before computation.
**Incorrect (computes avatar even when loading):**
```tsx
function Profile({ user, loading }: Props) {
const avatar = useMemo(() => {
const id = computeAvatarId(user);
return <Avatar id={id} />;
}, [user]);
if (loading) return <Skeleton />;
return <div>{avatar}</div>;
}
```
**Correct (skips computation when loading):**
```tsx
const UserAvatar = memo(function UserAvatar({ user }: { user: User }) {
const id = useMemo(() => computeAvatarId(user), [user]);
return <Avatar id={id} />;
});
function Profile({ user, loading }: Props) {
if (loading) return <Skeleton />;
return (
<div>
<UserAvatar user={user} />
</div>
);
}
```
**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, manual memoization with `memo()` and `useMemo()` is not necessary. The compiler automatically optimizes re-renders.

View File

@@ -0,0 +1,45 @@
---
title: Put Interaction Logic in Event Handlers
impact: MEDIUM
impactDescription: avoids effect re-runs and duplicate side effects
tags: rerender, useEffect, events, side-effects, dependencies
---
## Put Interaction Logic in Event Handlers
If a side effect is triggered by a specific user action (submit, click, drag), run it in that event handler. Do not model the action as state + effect; it makes effects re-run on unrelated changes and can duplicate the action.
**Incorrect (event modeled as state + effect):**
```tsx
function Form() {
const [submitted, setSubmitted] = useState(false);
const theme = useContext(ThemeContext);
useEffect(() => {
if (submitted) {
post("/api/register");
showToast("Registered", theme);
}
}, [submitted, theme]);
return <button onClick={() => setSubmitted(true)}>Submit</button>;
}
```
**Correct (do it in the handler):**
```tsx
function Form() {
const theme = useContext(ThemeContext);
function handleSubmit() {
post("/api/register");
showToast("Registered", theme);
}
return <button onClick={handleSubmit}>Submit</button>;
}
```
Reference: [Should this code move to an event handler?](https://react.dev/learn/removing-effect-dependencies#should-this-code-move-to-an-event-handler)

View File

@@ -0,0 +1,40 @@
---
title: Use Transitions for Non-Urgent Updates
impact: MEDIUM
impactDescription: maintains UI responsiveness
tags: rerender, transitions, startTransition, performance
---
## Use Transitions for Non-Urgent Updates
Mark frequent, non-urgent state updates as transitions to maintain UI responsiveness.
**Incorrect (triggers high-frequency re-renders):**
```tsx
function ScrollTracker() {
const [scrollY, setScrollY] = useState(0);
useEffect(() => {
const handler = () => setScrollY(window.scrollY);
window.addEventListener("scroll", handler, { passive: true });
return () => window.removeEventListener("scroll", handler);
}, []);
}
```
**Correct (non-blocking updates):**
```tsx
import { startTransition } from "react";
function ScrollTracker() {
const [scrollY, setScrollY] = useState(0);
useEffect(() => {
const handler = () => {
startTransition(() => setScrollY(window.scrollY));
};
window.addEventListener("scroll", handler, { passive: true });
return () => window.removeEventListener("scroll", handler);
}, []);
}
```

View File

@@ -0,0 +1,73 @@
---
title: Use useRef for Transient Values
impact: MEDIUM
impactDescription: avoids unnecessary re-renders on frequent updates
tags: rerender, useref, state, performance
---
## Use useRef for Transient Values
When a value changes frequently and you don't want a re-render on every update (e.g., mouse trackers, intervals, transient flags), store it in `useRef` instead of `useState`. Keep component state for UI; use refs for temporary DOM-adjacent values. Updating a ref does not trigger a re-render.
**Incorrect (renders every update):**
```tsx
function Tracker() {
const [lastX, setLastX] = useState(0);
useEffect(() => {
const onMove = (e: MouseEvent) => setLastX(e.clientX);
window.addEventListener("mousemove", onMove);
return () => window.removeEventListener("mousemove", onMove);
}, []);
return (
<div
style={{
position: "fixed",
top: 0,
left: lastX,
width: 8,
height: 8,
background: "black",
}}
/>
);
}
```
**Correct (no re-render for tracking):**
```tsx
function Tracker() {
const lastXRef = useRef(0);
const dotRef = useRef<HTMLDivElement>(null);
useEffect(() => {
const onMove = (e: MouseEvent) => {
lastXRef.current = e.clientX;
const node = dotRef.current;
if (node) {
node.style.transform = `translateX(${e.clientX}px)`;
}
};
window.addEventListener("mousemove", onMove);
return () => window.removeEventListener("mousemove", onMove);
}, []);
return (
<div
ref={dotRef}
style={{
position: "fixed",
top: 0,
left: 0,
width: 8,
height: 8,
background: "black",
transform: "translateX(0px)",
}}
/>
);
}
```

View File

@@ -0,0 +1,73 @@
---
title: Use after() for Non-Blocking Operations
impact: MEDIUM
impactDescription: faster response times
tags: server, async, logging, analytics, side-effects
---
## Use after() for Non-Blocking Operations
Use Next.js's `after()` to schedule work that should execute after a response is sent. This prevents logging, analytics, and other side effects from blocking the response.
**Incorrect (blocks response):**
```tsx
import { logUserAction } from "@/app/utils";
export async function POST(request: Request) {
// Perform mutation
await updateDatabase(request);
// Logging blocks the response
const userAgent = request.headers.get("user-agent") || "unknown";
await logUserAction({ userAgent });
return new Response(JSON.stringify({ status: "success" }), {
status: 200,
headers: { "Content-Type": "application/json" },
});
}
```
**Correct (non-blocking):**
```tsx
import { cookies, headers } from "next/headers";
import { after } from "next/server";
import { logUserAction } from "@/app/utils";
export async function POST(request: Request) {
// Perform mutation
await updateDatabase(request);
// Log after response is sent
after(async () => {
const userAgent = (await headers()).get("user-agent") || "unknown";
const sessionCookie = (await cookies()).get("session-id")?.value || "anonymous";
logUserAction({ sessionCookie, userAgent });
});
return new Response(JSON.stringify({ status: "success" }), {
status: 200,
headers: { "Content-Type": "application/json" },
});
}
```
The response is sent immediately while logging happens in the background.
**Common use cases:**
- Analytics tracking
- Audit logging
- Sending notifications
- Cache invalidation
- Cleanup tasks
**Important notes:**
- `after()` runs even if the response fails or redirects
- Works in Server Actions, Route Handlers, and Server Components
Reference: [https://nextjs.org/docs/app/api-reference/functions/after](https://nextjs.org/docs/app/api-reference/functions/after)

View File

@@ -0,0 +1,96 @@
---
title: Authenticate Server Actions Like API Routes
impact: CRITICAL
impactDescription: prevents unauthorized access to server mutations
tags: server, server-actions, authentication, security, authorization
---
## Authenticate Server Actions Like API Routes
**Prevent unauthorized access to server mutations**
Server Actions (functions with `"use server"`) are exposed as public endpoints, just like API routes. Always verify authentication and authorization **inside** each Server Action—do not rely solely on middleware, layout guards, or page-level checks, as Server Actions can be invoked directly.
Next.js documentation explicitly states: "Treat Server Actions with the same security considerations as public-facing API endpoints, and verify if the user is allowed to perform a mutation."
**Incorrect (no authentication check):**
```typescript
"use server";
export async function deleteUser(userId: string) {
// Anyone can call this! No auth check
await db.user.delete({ where: { id: userId } });
return { success: true };
}
```
**Correct (authentication inside the action):**
```typescript
"use server";
import { verifySession } from "@/lib/auth";
import { unauthorized } from "@/lib/errors";
export async function deleteUser(userId: string) {
// Always check auth inside the action
const session = await verifySession();
if (!session) {
throw unauthorized("Must be logged in");
}
// Check authorization too
if (session.user.role !== "admin" && session.user.id !== userId) {
throw unauthorized("Cannot delete other users");
}
await db.user.delete({ where: { id: userId } });
return { success: true };
}
```
**With input validation:**
```typescript
"use server";
import { z } from "zod";
import { verifySession } from "@/lib/auth";
const updateProfileSchema = z.object({
userId: z.string().uuid(),
name: z.string().min(1).max(100),
email: z.string().email(),
});
export async function updateProfile(data: unknown) {
// Validate input first
const validated = updateProfileSchema.parse(data);
// Then authenticate
const session = await verifySession();
if (!session) {
throw new Error("Unauthorized");
}
// Then authorize
if (session.user.id !== validated.userId) {
throw new Error("Can only update own profile");
}
// Finally perform the mutation
await db.user.update({
where: { id: validated.userId },
data: {
name: validated.name,
email: validated.email,
},
});
return { success: true };
}
```
Reference: [https://nextjs.org/docs/app/guides/authentication](https://nextjs.org/docs/app/guides/authentication)

View File

@@ -0,0 +1,41 @@
---
title: Cross-Request LRU Caching
impact: HIGH
impactDescription: caches across requests
tags: server, cache, lru, cross-request
---
## Cross-Request LRU Caching
`React.cache()` only works within one request. For data shared across sequential requests (user clicks button A then button B), use an LRU cache.
**Implementation:**
```typescript
import { LRUCache } from "lru-cache";
const cache = new LRUCache<string, any>({
max: 1000,
ttl: 5 * 60 * 1000, // 5 minutes
});
export async function getUser(id: string) {
const cached = cache.get(id);
if (cached) return cached;
const user = await db.user.findUnique({ where: { id } });
cache.set(id, user);
return user;
}
// Request 1: DB query, result cached
// Request 2: cache hit, no DB query
```
Use when sequential user actions hit multiple endpoints needing the same data within seconds.
**With Vercel's [Fluid Compute](https://vercel.com/docs/fluid-compute):** LRU caching is especially effective because multiple concurrent requests can share the same function instance and cache. This means the cache persists across requests without needing external storage like Redis.
**In traditional serverless:** Each invocation runs in isolation, so consider Redis for cross-process caching.
Reference: [https://github.com/isaacs/node-lru-cache](https://github.com/isaacs/node-lru-cache)

View File

@@ -0,0 +1,76 @@
---
title: Per-Request Deduplication with React.cache()
impact: MEDIUM
impactDescription: deduplicates within request
tags: server, cache, react-cache, deduplication
---
## Per-Request Deduplication with React.cache()
Use `React.cache()` for server-side request deduplication. Authentication and database queries benefit most.
**Usage:**
```typescript
import { cache } from "react";
export const getCurrentUser = cache(async () => {
const session = await auth();
if (!session?.user?.id) return null;
return await db.user.findUnique({
where: { id: session.user.id },
});
});
```
Within a single request, multiple calls to `getCurrentUser()` execute the query only once.
**Avoid inline objects as arguments:**
`React.cache()` uses shallow equality (`Object.is`) to determine cache hits. Inline objects create new references each call, preventing cache hits.
**Incorrect (always cache miss):**
```typescript
const getUser = cache(async (params: { uid: number }) => {
return await db.user.findUnique({ where: { id: params.uid } });
});
// Each call creates new object, never hits cache
getUser({ uid: 1 });
getUser({ uid: 1 }); // Cache miss, runs query again
```
**Correct (cache hit):**
```typescript
const getUser = cache(async (uid: number) => {
return await db.user.findUnique({ where: { id: uid } });
});
// Primitive args use value equality
getUser(1);
getUser(1); // Cache hit, returns cached result
```
If you must pass objects, pass the same reference:
```typescript
const params = { uid: 1 };
getUser(params); // Query runs
getUser(params); // Cache hit (same reference)
```
**Next.js-Specific Note:**
In Next.js, the `fetch` API is automatically extended with request memoization. Requests with the same URL and options are automatically deduplicated within a single request, so you don't need `React.cache()` for `fetch` calls. However, `React.cache()` is still essential for other async tasks:
- Database queries (Prisma, Drizzle, etc.)
- Heavy computations
- Authentication checks
- File system operations
- Any non-fetch async work
Use `React.cache()` to deduplicate these operations across your component tree.
Reference: [React.cache documentation](https://react.dev/reference/react/cache)

View File

@@ -0,0 +1,83 @@
---
title: Parallel Data Fetching with Component Composition
impact: CRITICAL
impactDescription: eliminates server-side waterfalls
tags: server, rsc, parallel-fetching, composition
---
## Parallel Data Fetching with Component Composition
React Server Components execute sequentially within a tree. Restructure with composition to parallelize data fetching.
**Incorrect (Sidebar waits for Page's fetch to complete):**
```tsx
export default async function Page() {
const header = await fetchHeader();
return (
<div>
<div>{header}</div>
<Sidebar />
</div>
);
}
async function Sidebar() {
const items = await fetchSidebarItems();
return <nav>{items.map(renderItem)}</nav>;
}
```
**Correct (both fetch simultaneously):**
```tsx
async function Header() {
const data = await fetchHeader();
return <div>{data}</div>;
}
async function Sidebar() {
const items = await fetchSidebarItems();
return <nav>{items.map(renderItem)}</nav>;
}
export default function Page() {
return (
<div>
<Header />
<Sidebar />
</div>
);
}
```
**Alternative with children prop:**
```tsx
async function Header() {
const data = await fetchHeader();
return <div>{data}</div>;
}
async function Sidebar() {
const items = await fetchSidebarItems();
return <nav>{items.map(renderItem)}</nav>;
}
function Layout({ children }: { children: ReactNode }) {
return (
<div>
<Header />
{children}
</div>
);
}
export default function Page() {
return (
<Layout>
<Sidebar />
</Layout>
);
}
```

View File

@@ -0,0 +1,38 @@
---
title: Minimize Serialization at RSC Boundaries
impact: HIGH
impactDescription: reduces data transfer size
tags: server, rsc, serialization, props
---
## Minimize Serialization at RSC Boundaries
The React Server/Client boundary serializes all object properties into strings and embeds them in the HTML response and subsequent RSC requests. This serialized data directly impacts page weight and load time, so **size matters a lot**. Only pass fields that the client actually uses.
**Incorrect (serializes all 50 fields):**
```tsx
async function Page() {
const user = await fetchUser(); // 50 fields
return <Profile user={user} />;
}
("use client");
function Profile({ user }: { user: User }) {
return <div>{user.name}</div>; // uses 1 field
}
```
**Correct (serializes only 1 field):**
```tsx
async function Page() {
const user = await fetchUser();
return <Profile name={user.name} />;
}
("use client");
function Profile({ name }: { name: string }) {
return <div>{name}</div>;
}
```

View File

@@ -0,0 +1,352 @@
# Create New Question Element
Use this command to scaffold a new question element component in `packages/survey-ui/src/elements/`.
## Usage
When creating a new question type (e.g., `single-select`, `rating`, `nps`), follow these steps:
1. **Create the component file** `{question-type}.tsx` with this structure:
```typescript
import * as React from "react";
import { ElementHeader } from "../components/element-header";
import { useTextDirection } from "../hooks/use-text-direction";
import { cn } from "../lib/utils";
interface {QuestionType}Props {
/** Unique identifier for the element container */
elementId: string;
/** The main question or prompt text displayed as the headline */
headline: string;
/** Optional descriptive text displayed below the headline */
description?: string;
/** Unique identifier for the input/control group */
inputId: string;
/** Current value */
value?: {ValueType};
/** Callback function called when the value changes */
onChange: (value: {ValueType}) => void;
/** Whether the field is required (shows asterisk indicator) */
required?: boolean;
/** Error message to display */
errorMessage?: string;
/** Text direction: 'ltr' (left-to-right), 'rtl' (right-to-left), or 'auto' (auto-detect from content) */
dir?: "ltr" | "rtl" | "auto";
/** Whether the controls are disabled */
disabled?: boolean;
// Add question-specific props here
}
function {QuestionType}({
elementId,
headline,
description,
inputId,
value,
onChange,
required = false,
errorMessage,
dir = "auto",
disabled = false,
// ... question-specific props
}: {QuestionType}Props): React.JSX.Element {
// Ensure value is always the correct type (handle undefined/null)
const currentValue = value ?? {defaultValue};
// Detect text direction from content
const detectedDir = useTextDirection({
dir,
textContent: [headline, description ?? "", /* add other text content from question */],
});
return (
<div className="w-full space-y-4" id={elementId} dir={detectedDir}>
{/* Headline */}
<ElementHeader
headline={headline}
description={description}
required={required}
htmlFor={inputId}
/>
{/* Question-specific controls */}
{/* TODO: Add your question-specific UI here */}
{/* Error message */}
{errorMessage && (
<div className="text-destructive flex items-center gap-1 text-sm" dir={detectedDir}>
<span>{errorMessage}</span>
</div>
)}
</div>
);
}
export { {QuestionType} };
export type { {QuestionType}Props };
```
2. **Create the Storybook file** `{question-type}.stories.tsx`:
```typescript
import type { Decorator, Meta, StoryObj } from "@storybook/react";
import React from "react";
import { {QuestionType}, type {QuestionType}Props } from "./{question-type}";
// Styling options for the StylingPlayground story
interface StylingOptions {
// Question styling
questionHeadlineFontFamily: string;
questionHeadlineFontSize: string;
questionHeadlineFontWeight: string;
questionHeadlineColor: string;
questionDescriptionFontFamily: string;
questionDescriptionFontWeight: string;
questionDescriptionFontSize: string;
questionDescriptionColor: string;
// Add component-specific styling options here
}
type StoryProps = {QuestionType}Props & Partial<StylingOptions>;
const meta: Meta<StoryProps> = {
title: "UI-package/Elements/{QuestionType}",
component: {QuestionType},
parameters: {
layout: "centered",
docs: {
description: {
component: "A complete {question type} question element...",
},
},
},
tags: ["autodocs"],
argTypes: {
headline: {
control: "text",
description: "The main question text",
table: { category: "Content" },
},
description: {
control: "text",
description: "Optional description or subheader text",
table: { category: "Content" },
},
value: {
control: "object",
description: "Current value",
table: { category: "State" },
},
required: {
control: "boolean",
description: "Whether the field is required",
table: { category: "Validation" },
},
errorMessage: {
control: "text",
description: "Error message to display",
table: { category: "Validation" },
},
dir: {
control: { type: "select" },
options: ["ltr", "rtl", "auto"],
description: "Text direction for RTL support",
table: { category: "Layout" },
},
disabled: {
control: "boolean",
description: "Whether the controls are disabled",
table: { category: "State" },
},
onChange: {
action: "changed",
table: { category: "Events" },
},
// Add question-specific argTypes here
},
};
export default meta;
type Story = StoryObj<StoryProps>;
// Decorator to apply CSS variables from story args
const withCSSVariables: Decorator<StoryProps> = (Story, context) => {
const args = context.args as StoryProps;
const {
questionHeadlineFontFamily,
questionHeadlineFontSize,
questionHeadlineFontWeight,
questionHeadlineColor,
questionDescriptionFontFamily,
questionDescriptionFontSize,
questionDescriptionFontWeight,
questionDescriptionColor,
// Extract component-specific styling options
} = args;
const cssVarStyle: React.CSSProperties & Record<string, string | undefined> = {
"--fb-question-headline-font-family": questionHeadlineFontFamily,
"--fb-question-headline-font-size": questionHeadlineFontSize,
"--fb-question-headline-font-weight": questionHeadlineFontWeight,
"--fb-question-headline-color": questionHeadlineColor,
"--fb-question-description-font-family": questionDescriptionFontFamily,
"--fb-question-description-font-size": questionDescriptionFontSize,
"--fb-question-description-font-weight": questionDescriptionFontWeight,
"--fb-question-description-color": questionDescriptionColor,
// Add component-specific CSS variables
};
return (
<div style={cssVarStyle} className="w-[600px]">
<Story />
</div>
);
};
export const StylingPlayground: Story = {
args: {
headline: "Example question?",
description: "Example description",
// Default styling values
questionHeadlineFontFamily: "system-ui, sans-serif",
questionHeadlineFontSize: "1.125rem",
questionHeadlineFontWeight: "600",
questionHeadlineColor: "#1e293b",
questionDescriptionFontFamily: "system-ui, sans-serif",
questionDescriptionFontSize: "0.875rem",
questionDescriptionFontWeight: "400",
questionDescriptionColor: "#64748b",
// Add component-specific default values
},
argTypes: {
// Question styling argTypes
questionHeadlineFontFamily: {
control: "text",
table: { category: "Question Styling" },
},
questionHeadlineFontSize: {
control: "text",
table: { category: "Question Styling" },
},
questionHeadlineFontWeight: {
control: "text",
table: { category: "Question Styling" },
},
questionHeadlineColor: {
control: "color",
table: { category: "Question Styling" },
},
questionDescriptionFontFamily: {
control: "text",
table: { category: "Question Styling" },
},
questionDescriptionFontSize: {
control: "text",
table: { category: "Question Styling" },
},
questionDescriptionFontWeight: {
control: "text",
table: { category: "Question Styling" },
},
questionDescriptionColor: {
control: "color",
table: { category: "Question Styling" },
},
// Add component-specific argTypes
},
decorators: [withCSSVariables],
};
export const Default: Story = {
args: {
headline: "Example question?",
// Add default props
},
};
export const WithDescription: Story = {
args: {
headline: "Example question?",
description: "Example description text",
},
};
export const Required: Story = {
args: {
headline: "Example question?",
required: true,
},
};
export const WithError: Story = {
args: {
headline: "Example question?",
errorMessage: "This field is required",
required: true,
},
};
export const Disabled: Story = {
args: {
headline: "Example question?",
disabled: true,
},
};
export const RTL: Story = {
args: {
headline: "مثال على السؤال؟",
description: "مثال على الوصف",
// Add RTL-specific props
},
};
```
3. **Add CSS variables** to `packages/survey-ui/src/styles/globals.css` if needed:
```css
/* Component-specific CSS variables */
--fb-{component}-{property}: {default-value};
```
4. **Export from** `packages/survey-ui/src/index.ts`:
```typescript
export { {QuestionType}, type {QuestionType}Props } from "./elements/{question-type}";
```
## Key Requirements
- ✅ Always use `ElementHeader` component for headline/description
- ✅ Always use `useTextDirection` hook for RTL support
- ✅ Always handle undefined/null values safely (e.g., `Array.isArray(value) ? value : []`)
- ✅ Always include error message display if applicable
- ✅ Always support disabled state if applicable
- ✅ Always add JSDoc comments to props interface
- ✅ Always create Storybook stories with styling playground
- ✅ Always export types from component file
- ✅ Always add to index.ts exports
## Examples
- `open-text.tsx` - Text input/textarea question (string value)
- `multi-select.tsx` - Multiple checkbox selection (string[] value)
## Checklist
When creating a new question element, verify:
- [ ] Component file created with proper structure
- [ ] Props interface with JSDoc comments for all props
- [ ] Uses `ElementHeader` component (don't duplicate header logic)
- [ ] Uses `useTextDirection` hook for RTL support
- [ ] Handles undefined/null values safely
- [ ] Storybook file created with styling playground
- [ ] Includes common stories: Default, WithDescription, Required, WithError, Disabled, RTL
- [ ] CSS variables added to `globals.css` if component needs custom styling
- [ ] Exported from `index.ts` with types
- [ ] TypeScript types properly exported
- [ ] Error message display included if applicable
- [ ] Disabled state supported if applicable

View File

@@ -1,61 +0,0 @@
---
description:
globs:
alwaysApply: false
---
# Build & Deployment Best Practices
## Build Process
### Running Builds
- Use `pnpm build` from project root for full build
- Monitor for React hooks warnings and fix them immediately
- Ensure all TypeScript errors are resolved before deployment
### Common Build Issues & Fixes
#### React Hooks Warnings
- Capture ref values in variables within useEffect cleanup
- Avoid accessing `.current` directly in cleanup functions
- Pattern for fixing ref cleanup warnings:
```typescript
useEffect(() => {
const currentRef = myRef.current;
return () => {
if (currentRef) {
currentRef.cleanup();
}
};
}, []);
```
#### Test Failures During Build
- Ensure all test mocks include required constants like `SESSION_MAX_AGE`
- Mock Next.js navigation hooks properly: `useParams`, `useRouter`, `useSearchParams`
- Remove unused imports and constants from test files
- Use literal values instead of imported constants when the constant isn't actually needed
### Test Execution
- Run `pnpm test` to execute all tests
- Use `pnpm test -- --run filename.test.tsx` for specific test files
- Fix test failures before merging code
- Ensure 100% test coverage for new components
### Performance Monitoring
- Monitor build times and optimize if necessary
- Watch for memory usage during builds
- Use proper caching strategies for faster rebuilds
### Deployment Checklist
1. All tests passing
2. Build completes without warnings
3. TypeScript compilation successful
4. No linter errors
5. Database migrations applied (if any)
6. Environment variables configured
### EKS Deployment Considerations
- Ensure latest code is deployed to all pods
- Monitor AWS RDS Performance Insights for database issues
- Verify environment-specific configurations
- Check pod health and resource usage

View File

@@ -1,414 +0,0 @@
---
description: Caching rules for performance improvements
globs:
alwaysApply: false
---
# Cache Optimization Patterns for Formbricks
## Cache Strategy Overview
Formbricks uses a **hybrid caching approach** optimized for enterprise scale:
- **Redis** for persistent cross-request caching
- **React `cache()`** for request-level deduplication
- **NO Next.js `unstable_cache()`** - avoid for reliability
## Key Files
### Core Cache Infrastructure
- [apps/web/modules/cache/lib/service.ts](mdc:apps/web/modules/cache/lib/service.ts) - Redis cache service
- [apps/web/modules/cache/lib/withCache.ts](mdc:apps/web/modules/cache/lib/withCache.ts) - Cache wrapper utilities
- [apps/web/modules/cache/lib/cacheKeys.ts](mdc:apps/web/modules/cache/lib/cacheKeys.ts) - Enterprise cache key patterns and utilities
### Environment State Caching (Critical Endpoint)
- [apps/web/app/api/v1/client/[environmentId]/environment/route.ts](mdc:apps/web/app/api/v1/client/[environmentId]/environment/route.ts) - Main endpoint serving hundreds of thousands of SDK clients
- [apps/web/app/api/v1/client/[environmentId]/environment/lib/data.ts](mdc:apps/web/app/api/v1/client/[environmentId]/environment/lib/data.ts) - Optimized data layer with caching
## Enterprise-Grade Cache Key Patterns
**Always use** the `createCacheKey` utilities from [cacheKeys.ts](mdc:apps/web/modules/cache/lib/cacheKeys.ts):
```typescript
// ✅ Correct patterns
createCacheKey.environment.state(environmentId) // "fb:env:abc123:state"
createCacheKey.organization.billing(organizationId) // "fb:org:xyz789:billing"
createCacheKey.license.status(organizationId) // "fb:license:org123:status"
createCacheKey.user.permissions(userId, orgId) // "fb:user:456:org:123:permissions"
// ❌ Never use flat keys - collision-prone
"environment_abc123"
"user_data_456"
```
## When to Use Each Cache Type
### Use React `cache()` for Request Deduplication
```typescript
// ✅ Prevents multiple calls within same request
export const getEnterpriseLicense = reactCache(async () => {
// Complex license validation logic
});
```
### Use `withCache()` for Simple Database Queries
```typescript
// ✅ Simple caching with automatic fallback (TTL in milliseconds)
export const getActionClasses = (environmentId: string) => {
return withCache(() => fetchActionClassesFromDB(environmentId), {
key: createCacheKey.environment.actionClasses(environmentId),
ttl: 60 * 30 * 1000, // 30 minutes in milliseconds
})();
};
```
### Use Explicit Redis Cache for Complex Business Logic
```typescript
// ✅ Full control for high-stakes endpoints
export const getEnvironmentState = async (environmentId: string) => {
const cached = await environmentStateCache.getEnvironmentState(environmentId);
if (cached) return cached;
const fresh = await buildComplexState(environmentId);
await environmentStateCache.setEnvironmentState(environmentId, fresh);
return fresh;
};
```
## Caching Decision Framework
### When TO Add Caching
```typescript
// ✅ Expensive operations that benefit from caching
- Database queries (>10ms typical)
- External API calls (>50ms typical)
- Complex computations (>5ms)
- File system operations
- Heavy data transformations
// Example: Database query with complex joins (TTL in milliseconds)
export const getEnvironmentWithDetails = withCache(
async (environmentId: string) => {
return prisma.environment.findUnique({
where: { id: environmentId },
include: { /* complex joins */ }
});
},
{ key: createCacheKey.environment.details(environmentId), ttl: 60 * 30 * 1000 } // 30 minutes
)();
```
### When NOT to Add Caching
```typescript
// ❌ Don't cache these operations - minimal overhead
- Simple property access (<0.1ms)
- Basic transformations (<1ms)
- Functions that just call already-cached functions
- Pure computation without I/O
// ❌ Bad example: Redundant caching
const getCachedLicenseFeatures = withCache(
async () => {
const license = await getEnterpriseLicense(); // Already cached!
return license.active ? license.features : null; // Just property access
},
{ key: "license-features", ttl: 1800 * 1000 } // 30 minutes in milliseconds
);
// ✅ Good example: Simple and efficient
const getLicenseFeatures = async () => {
const license = await getEnterpriseLicense(); // Already cached
return license.active ? license.features : null; // 0.1ms overhead
};
```
### Computational Overhead Analysis
Before adding caching, analyze the overhead:
```typescript
// ✅ High overhead - CACHE IT
- Database queries: ~10-100ms
- External APIs: ~50-500ms
- File I/O: ~5-50ms
- Complex algorithms: >5ms
// ❌ Low overhead - DON'T CACHE
- Property access: ~0.001ms
- Simple lookups: ~0.1ms
- Basic validation: ~1ms
- Type checks: ~0.01ms
// Example decision tree:
const expensiveOperation = async () => {
return prisma.query(); // 50ms - CACHE IT
};
const cheapOperation = (data: any) => {
return data.property; // 0.001ms - DON'T CACHE
};
```
### Avoid Cache Wrapper Anti-Pattern
```typescript
// ❌ Don't create wrapper functions just for caching
const getCachedUserPermissions = withCache(
async (userId: string) => getUserPermissions(userId),
{ key: createCacheKey.user.permissions(userId), ttl: 3600 * 1000 } // 1 hour in milliseconds
);
// ✅ Add caching directly to the original function
export const getUserPermissions = withCache(
async (userId: string) => {
return prisma.user.findUnique({
where: { id: userId },
include: { permissions: true }
});
},
{ key: createCacheKey.user.permissions(userId), ttl: 3600 * 1000 } // 1 hour in milliseconds
);
```
## TTL Coordination Strategy
### Multi-Layer Cache Coordination
For endpoints serving client SDKs, coordinate TTLs across layers:
```typescript
// Client SDK cache (expiresAt) - longest TTL for fewer requests
const CLIENT_TTL = 60 * 60; // 1 hour (seconds for client)
// Server Redis cache - shorter TTL ensures fresh data for clients
const SERVER_TTL = 60 * 30 * 1000; // 30 minutes in milliseconds
// HTTP cache headers (seconds)
const BROWSER_TTL = 60 * 60; // 1 hour (max-age)
const CDN_TTL = 60 * 30; // 30 minutes (s-maxage)
const CORS_TTL = 60 * 60; // 1 hour (balanced approach)
```
### Standard TTL Guidelines (in milliseconds for cache-manager + Keyv)
```typescript
// Configuration data - rarely changes
const CONFIG_TTL = 60 * 60 * 24 * 1000; // 24 hours
// User data - moderate frequency
const USER_TTL = 60 * 60 * 2 * 1000; // 2 hours
// Survey data - changes moderately
const SURVEY_TTL = 60 * 15 * 1000; // 15 minutes
// Billing data - expensive to compute
const BILLING_TTL = 60 * 30 * 1000; // 30 minutes
// Action classes - infrequent changes
const ACTION_CLASS_TTL = 60 * 30 * 1000; // 30 minutes
```
## High-Frequency Endpoint Optimization
### Performance Patterns for High-Volume Endpoints
```typescript
// ✅ Optimized high-frequency endpoint pattern
export const GET = async (request: NextRequest, props: { params: Promise<{ id: string }> }) => {
const params = await props.params;
try {
// Simple validation (avoid Zod for high-frequency)
if (!params.id || typeof params.id !== 'string') {
return responses.badRequestResponse("ID is required", undefined, true);
}
// Single optimized query with caching
const data = await getOptimizedData(params.id);
return responses.successResponse(
{
data,
expiresAt: new Date(Date.now() + CLIENT_TTL * 1000), // SDK cache duration
},
true,
"public, s-maxage=1800, max-age=3600, stale-while-revalidate=1800, stale-if-error=3600"
);
} catch (err) {
// Simplified error handling for performance
if (err instanceof ResourceNotFoundError) {
return responses.notFoundResponse(err.resourceType, err.resourceId);
}
logger.error({ error: err, url: request.url }, "Error in high-frequency endpoint");
return responses.internalServerErrorResponse(err.message, true);
}
};
```
### Avoid These Performance Anti-Patterns
```typescript
// ❌ Avoid for high-frequency endpoints
const inputValidation = ZodSchema.safeParse(input); // Too slow
const startTime = Date.now(); logger.debug(...); // Logging overhead
const { data, revalidateEnvironment } = await get(); // Complex return types
```
### CORS Optimization
```typescript
// ✅ Balanced CORS caching (not too aggressive)
export const OPTIONS = async (): Promise<Response> => {
return responses.successResponse(
{},
true,
"public, s-maxage=3600, max-age=3600" // 1 hour balanced approach
);
};
```
## Redis Cache Migration from Next.js
### Avoid Legacy Next.js Patterns
```typescript
// ❌ Old Next.js unstable_cache pattern (avoid)
const getCachedData = unstable_cache(
async (id) => fetchData(id),
['cache-key'],
{ tags: ['environment'], revalidate: 900 }
);
// ❌ Don't use revalidateEnvironment flags with Redis
return { data, revalidateEnvironment: true }; // This gets cached incorrectly!
// ✅ New Redis pattern with withCache (TTL in milliseconds)
export const getCachedData = (id: string) =>
withCache(
() => fetchData(id),
{
key: createCacheKey.environment.data(id),
ttl: 60 * 15 * 1000, // 15 minutes in milliseconds
}
)();
```
### Remove Revalidation Logic
When migrating from Next.js `unstable_cache`:
- Remove `revalidateEnvironment` or similar flags
- Remove tag-based invalidation logic
- Use TTL-based expiration instead
- Handle one-time updates (like `appSetupCompleted`) directly in cache
## Data Layer Optimization
### Single Query Pattern
```typescript
// ✅ Optimize with single database query
export const getOptimizedEnvironmentData = async (environmentId: string) => {
return prisma.environment.findUniqueOrThrow({
where: { id: environmentId },
include: {
project: {
select: { id: true, recontactDays: true, /* ... */ }
},
organization: {
select: { id: true, billing: true }
},
surveys: {
where: { status: "inProgress" },
select: { id: true, name: true, /* ... */ }
},
actionClasses: {
select: { id: true, name: true, /* ... */ }
}
}
});
};
// ❌ Avoid multiple separate queries
const environment = await getEnvironment(id);
const organization = await getOrganization(environment.organizationId);
const surveys = await getSurveys(id);
const actionClasses = await getActionClasses(id);
```
## Invalidation Best Practices
**Always use explicit key-based invalidation:**
```typescript
// ✅ Clear and debuggable
await invalidateCache(createCacheKey.environment.state(environmentId));
await invalidateCache([
createCacheKey.environment.surveys(environmentId),
createCacheKey.environment.actionClasses(environmentId)
]);
// ❌ Avoid complex tag systems
await invalidateByTags(["environment", "survey"]); // Don't do this
```
## Critical Performance Targets
### High-Frequency Endpoint Goals
- **Cache hit ratio**: >85%
- **Response time P95**: <200ms
- **Database load reduction**: >60%
- **HTTP cache duration**: 1hr browser, 30min Cloudflare
- **SDK refresh interval**: 1 hour with 30min server cache
### Performance Monitoring
- Use **existing elastic cache analytics** for metrics
- Log cache errors and warnings (not debug info)
- Track database query reduction
- Monitor response times for cached endpoints
- **Avoid performance logging** in high-frequency endpoints
## Error Handling Pattern
Always provide fallback to fresh data on cache errors:
```typescript
try {
const cached = await cache.get(key);
if (cached) return cached;
const fresh = await fetchFresh();
await cache.set(key, fresh, ttl); // ttl in milliseconds
return fresh;
} catch (error) {
// ✅ Always fallback to fresh data
logger.warn("Cache error, fetching fresh", { key, error });
return fetchFresh();
}
```
## Common Pitfalls to Avoid
1. **Never use Next.js `unstable_cache()`** - unreliable in production
2. **Don't use revalidation flags with Redis** - they get cached incorrectly
3. **Avoid Zod validation** for simple parameters in high-frequency endpoints
4. **Don't add performance logging** to high-frequency endpoints
5. **Coordinate TTLs** between client and server caches
6. **Don't over-engineer** with complex tag systems
7. **Avoid caching rapidly changing data** (real-time metrics)
8. **Always validate cache keys** to prevent collisions
9. **Don't add redundant caching layers** - analyze computational overhead first
10. **Avoid cache wrapper functions** - add caching directly to expensive operations
11. **Don't cache property access or simple transformations** - overhead is negligible
12. **Analyze the full call chain** before adding caching to avoid double-caching
13. **Remember TTL is in milliseconds** for cache-manager + Keyv stack (not seconds)
## Monitoring Strategy
- Use **existing elastic cache analytics** for metrics
- Log cache errors and warnings
- Track database query reduction
- Monitor response times for cached endpoints
- **Don't add custom metrics** that duplicate existing monitoring
## Important Notes
### TTL Units
- **cache-manager + Keyv**: TTL in **milliseconds**
- **Direct Redis commands**: TTL in **seconds** (EXPIRE, SETEX) or **milliseconds** (PEXPIRE, PSETEX)
- **HTTP cache headers**: TTL in **seconds** (max-age, s-maxage)
- **Client SDK**: TTL in **seconds** (expiresAt calculation)

View File

@@ -1,41 +0,0 @@
---
description:
globs:
alwaysApply: false
---
# Database Performance & Prisma Best Practices
## Critical Performance Rules
### Response Count Queries
- **NEVER** use `skip`/`offset` with `prisma.response.count()` - this causes expensive subqueries with OFFSET
- Always use only `where` clauses for count operations: `prisma.response.count({ where: { ... } })`
- For pagination, separate count queries from data queries
- Reference: [apps/web/lib/response/service.ts](mdc:apps/web/lib/response/service.ts) line 654-686
### Prisma Query Optimization
- Use proper indexes defined in [packages/database/schema.prisma](mdc:packages/database/schema.prisma)
- Leverage existing indexes: `@@index([surveyId, createdAt])`, `@@index([createdAt])`
- Use cursor-based pagination for large datasets instead of offset-based
- Cache frequently accessed data using React Cache and custom cache tags
### Date Range Filtering
- When filtering by `createdAt`, always use indexed queries
- Combine with `surveyId` for optimal performance: `{ surveyId, createdAt: { gte: start, lt: end } }`
- Avoid complex WHERE clauses that can't utilize indexes
### Count vs Data Separation
- Always separate count queries from data fetching queries
- Use `Promise.all()` to run count and data queries in parallel
- Example pattern from [apps/web/modules/api/v2/management/responses/lib/response.ts](mdc:apps/web/modules/api/v2/management/responses/lib/response.ts):
```typescript
const [responses, totalCount] = await Promise.all([
prisma.response.findMany(query),
prisma.response.count({ where: whereClause }),
]);
```
### Monitoring & Debugging
- Monitor AWS RDS Performance Insights for problematic queries
- Look for queries with OFFSET in count operations - these indicate performance issues
- Use proper error handling with `DatabaseError` for Prisma exceptions

View File

@@ -1,105 +0,0 @@
---
description: It should be used **only when the agent explicitly requests database schema-level, details** to support tasks such as: writing/debugging Prisma queries, designing/reviewing data models, investigating multi-tenancy behavior, creating API endpoints, or understanding data relationships.
alwaysApply: false
---
# Formbricks Database Schema Reference
This rule provides a reference to the Formbricks database structure. For the most up-to-date and complete schema definitions, please refer to the schema.prisma file directly.
## Database Overview
Formbricks uses PostgreSQL with Prisma ORM. The schema is designed for multi-tenancy with strong data isolation between organizations.
### Core Hierarchy
```
Organization
└── Project
└── Environment (production/development)
├── Survey
├── Contact
├── ActionClass
└── Integration
```
## Schema Reference
For the complete and up-to-date database schema, please refer to:
- Main schema: `packages/database/schema.prisma`
- JSON type definitions: `packages/database/json-types.ts`
The schema.prisma file contains all model definitions, relationships, enums, and field types. The json-types.ts file contains TypeScript type definitions for JSON fields.
## Data Access Patterns
### Multi-tenancy
- All data is scoped by Organization
- Environment-level isolation for surveys and contacts
- Project-level grouping for related surveys
### Soft Deletion
Some models use soft deletion patterns:
- Check `isActive` fields where present
- Use proper filtering in queries
### Cascading Deletes
Configured cascade relationships:
- Organization deletion cascades to all child entities
- Survey deletion removes responses, displays, triggers
- Contact deletion removes attributes and responses
## Common Query Patterns
### Survey with Responses
```typescript
// Include response count and latest responses
const survey = await prisma.survey.findUnique({
where: { id: surveyId },
include: {
responses: {
take: 10,
orderBy: { createdAt: "desc" },
},
_count: {
select: { responses: true },
},
},
});
```
### Environment Scoping
```typescript
// Always scope by environment
const surveys = await prisma.survey.findMany({
where: {
environmentId: environmentId,
// Additional filters...
},
});
```
### Contact with Attributes
```typescript
const contact = await prisma.contact.findUnique({
where: { id: contactId },
include: {
attributes: {
include: {
attributeKey: true,
},
},
},
});
```
This schema supports Formbricks' core functionality: multi-tenant survey management, user targeting, response collection, and analysis, all while maintaining strict data isolation and security.

View File

@@ -1,23 +0,0 @@
---
description: Guideline for writing end-user facing documentation in the apps/docs folder
globs:
alwaysApply: false
---
Follow these instructions and guidelines when asked to write documentation in the apps/docs folder
Follow this structure to write the title, describtion and pick a matching icon and insert it at the top of the MDX file:
---
title: "FEATURE NAME"
description: "1 concise sentence to describe WHEN the feature is being used and FOR WHAT BENEFIT."
icon: "link"
---
- Description: 1 concise sentence to describe WHEN the feature is being used and FOR WHAT BENEFIT.
- Make ample use of the Mintlify components you can find here https://mintlify.com/docs/llms.txt
- In all Headlines, only capitalize the current feature and nothing else, to Camel Case
- If a feature is part of the Enterprise Edition, use this note:
<Note>
FEATURE NAME is part of the @Enterprise Edition.
</Note>

View File

@@ -1,152 +0,0 @@
---
description:
globs:
alwaysApply: false
---
# EKS & ALB Optimization Guide for Error Reduction
## Infrastructure Overview
This project uses AWS EKS with Application Load Balancer (ALB) for the Formbricks application. The infrastructure has been optimized to minimize ELB 502/504 errors through careful configuration of connection handling, health checks, and pod lifecycle management.
## Key Infrastructure Files
### Terraform Configuration
- **Main Infrastructure**: [infra/terraform/main.tf](mdc:infra/terraform/main.tf) - EKS cluster, VPC, Karpenter, and core AWS resources
- **Monitoring**: [infra/terraform/cloudwatch.tf](mdc:infra/terraform/cloudwatch.tf) - CloudWatch alarms for 502/504 error tracking and alerting
- **Database**: [infra/terraform/rds.tf](mdc:infra/terraform/rds.tf) - Aurora PostgreSQL configuration
### Helm Configuration
- **Production**: [infra/formbricks-cloud-helm/values.yaml.gotmpl](mdc:infra/formbricks-cloud-helm/values.yaml.gotmpl) - Optimized ALB and pod configurations
- **Staging**: [infra/formbricks-cloud-helm/values-staging.yaml.gotmpl](mdc:infra/formbricks-cloud-helm/values-staging.yaml.gotmpl) - Staging environment with spot instances
- **Deployment**: [infra/formbricks-cloud-helm/helmfile.yaml.gotmpl](mdc:infra/formbricks-cloud-helm/helmfile.yaml.gotmpl) - Multi-environment Helm releases
## ALB Optimization Patterns
### Connection Handling Optimizations
```yaml
# Key ALB annotations for reducing 502/504 errors
alb.ingress.kubernetes.io/load-balancer-attributes: |
idle_timeout.timeout_seconds=120,
connection_logs.s3.enabled=false,
access_logs.s3.enabled=false
alb.ingress.kubernetes.io/target-group-attributes: |
deregistration_delay.timeout_seconds=30,
stickiness.enabled=false,
load_balancing.algorithm.type=least_outstanding_requests,
target_group_health.dns_failover.minimum_healthy_targets.count=1
```
### Health Check Configuration
- **Interval**: 15 seconds for faster detection of unhealthy targets
- **Timeout**: 5 seconds to prevent false positives
- **Thresholds**: 2 healthy, 3 unhealthy for balanced responsiveness
- **Path**: `/health` endpoint optimized for < 100ms response time
## Pod Lifecycle Management
### Graceful Shutdown Pattern
```yaml
# PreStop hook to allow connection draining
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "sleep 15"]
# Termination grace period for complete cleanup
terminationGracePeriodSeconds: 45
```
### Health Probe Strategy
- **Startup Probe**: 5s initial delay, 5s interval, max 60s startup time
- **Readiness Probe**: 10s delay, 10s interval for traffic readiness
- **Liveness Probe**: 30s delay, 30s interval for container health
### Rolling Update Configuration
```yaml
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25% # Maintain capacity during updates
maxSurge: 50% # Allow faster rollouts
```
## Karpenter Node Management
### Node Lifecycle Optimization
- **Startup Taints**: Prevent traffic during node initialization
- **Graceful Shutdown**: 30s grace period for pod eviction
- **Consolidation Delay**: 60s to reduce unnecessary churn
- **Eviction Policies**: Configured for smooth pod migrations
### Instance Selection
- **Families**: c8g, c7g, m8g, m7g, r8g, r7g (ARM64 Graviton)
- **Sizes**: 2, 4, 8 vCPUs for cost optimization
- **Bottlerocket AMI**: Enhanced security and performance
## Monitoring & Alerting
### Critical ALB Metrics
1. **ELB 502 Errors**: Threshold 20 over 5 minutes
2. **ELB 504 Errors**: Threshold 15 over 5 minutes
3. **Target Connection Errors**: Threshold 50 over 5 minutes
4. **4XX Errors**: Threshold 100 over 10 minutes (client issues)
### Expected Improvements
- **60-80% reduction** in ELB 502 errors
- **Faster recovery** during pod restarts
- **Better connection reuse** efficiency
- **Improved autoscaling** responsiveness
## Deployment Patterns
### Infrastructure Updates
1. **Terraform First**: Apply infrastructure changes via [infra/deploy-improvements.sh](mdc:infra/deploy-improvements.sh)
2. **Helm Second**: Deploy application configurations
3. **Verification**: Check pod status, endpoints, and ALB health
4. **Monitoring**: Watch CloudWatch metrics for 24-48 hours
### Environment-Specific Configurations
- **Production**: On-demand instances, stricter resource limits
- **Staging**: Spot instances, rate limiting disabled, relaxed resources
## Troubleshooting Patterns
### 502 Error Investigation
1. Check pod readiness and health probe status
2. Verify ALB target group health
3. Review deregistration timing during deployments
4. Monitor connection pool utilization
### 504 Error Analysis
1. Check application response times
2. Verify timeout configurations (ALB: 120s, App: aligned)
3. Review database query performance
4. Monitor resource utilization during traffic spikes
### Connection Error Patterns
1. Verify Karpenter node lifecycle timing
2. Check pod termination grace periods
3. Review ALB connection draining settings
4. Monitor cluster autoscaling events
## Best Practices
### When Making Changes
- **Test in staging first** with same configurations
- **Monitor metrics** for 24-48 hours after changes
- **Use gradual rollouts** with proper health checks
- **Maintain ALB timeout alignment** across all layers
### Performance Optimization
- **Health endpoint** should respond < 100ms consistently
- **Connection pooling** aligned with ALB idle timeouts
- **Resource requests/limits** tuned for consistent performance
- **Graceful shutdown** implemented in application code
### Monitoring Strategy
- **Real-time alerts** for error rate spikes
- **Trend analysis** for connection patterns
- **Capacity planning** based on LCU usage
- **4XX pattern analysis** for client behavior insights

View File

@@ -1,332 +0,0 @@
---
description:
globs:
alwaysApply: false
---
# Formbricks Architecture & Patterns
## Monorepo Structure
### Apps Directory
- `apps/web/` - Main Next.js web application
- `packages/` - Shared packages and utilities
### Key Directories in Web App
```
apps/web/
├── app/ # Next.js 13+ app directory
│ ├── (app)/ # Main application routes
│ ├── (auth)/ # Authentication routes
│ ├── api/ # API routes
├── components/ # Shared components
├── lib/ # Utility functions and services
└── modules/ # Feature-specific modules
```
## Routing Patterns
### App Router Structure
The application uses Next.js 13+ app router with route groups:
```
(app)/environments/[environmentId]/
├── surveys/[surveyId]/
│ ├── (analysis)/ # Analysis views
│ │ ├── responses/ # Response management
│ │ ├── summary/ # Survey summary
│ │ └── hooks/ # Analysis-specific hooks
│ ├── edit/ # Survey editing
│ └── settings/ # Survey settings
```
### Dynamic Routes
- `[environmentId]` - Environment-specific routes
- `[surveyId]` - Survey-specific routes
## Service Layer Pattern
### Service Organization
Services are organized by domain in `apps/web/lib/`:
```typescript
// Example: Response service
// apps/web/lib/response/service.ts
export const getResponseCountAction = async ({
surveyId,
filterCriteria,
}: {
surveyId: string;
filterCriteria: any;
}) => {
// Service implementation
};
```
### Action Pattern
Server actions follow a consistent pattern:
```typescript
// Action wrapper for service calls
export const getResponseCountAction = async (params) => {
try {
const result = await responseService.getCount(params);
return { data: result };
} catch (error) {
return { error: error.message };
}
};
```
## Context Patterns
### Provider Structure
Context providers follow a consistent pattern:
```typescript
// Provider component
export const ResponseFilterProvider = ({ children }: { children: React.ReactNode }) => {
const [selectedFilter, setSelectedFilter] = useState(defaultFilter);
const value = {
selectedFilter,
setSelectedFilter,
// ... other state and methods
};
return (
<ResponseFilterContext.Provider value={value}>
{children}
</ResponseFilterContext.Provider>
);
};
// Hook for consuming context
export const useResponseFilter = () => {
const context = useContext(ResponseFilterContext);
if (!context) {
throw new Error('useResponseFilter must be used within ResponseFilterProvider');
}
return context;
};
```
### Context Composition
Multiple contexts are often composed together:
```typescript
// Layout component with multiple providers
export default function AnalysisLayout({ children }: { children: React.ReactNode }) {
return (
<ResponseFilterProvider>
<ResponseCountProvider>
{children}
</ResponseCountProvider>
</ResponseFilterProvider>
);
}
```
## Component Patterns
### Page Components
Page components are located in the app directory and follow this pattern:
```typescript
// apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/responses/page.tsx
export default function ResponsesPage() {
return (
<div>
<ResponsesTable />
<ResponsesPagination />
</div>
);
}
```
### Component Organization
- **Pages** - Route components in app directory
- **Components** - Reusable UI components
- **Modules** - Feature-specific components and logic
### Shared Components
Common components are in `apps/web/components/`:
- UI components (buttons, inputs, modals)
- Layout components (headers, sidebars)
- Data display components (tables, charts)
## Hook Patterns
### Custom Hook Structure
Custom hooks follow consistent patterns:
```typescript
export const useResponseCount = ({
survey,
initialCount
}: {
survey: TSurvey;
initialCount?: number;
}) => {
const [responseCount, setResponseCount] = useState(initialCount ?? 0);
const [isLoading, setIsLoading] = useState(false);
// Hook logic...
return {
responseCount,
isLoading,
refetch,
};
};
```
### Hook Dependencies
- Use context hooks for shared state
- Implement proper cleanup with AbortController
- Optimize dependency arrays to prevent unnecessary re-renders
## Data Fetching Patterns
### Server Actions
The app uses Next.js server actions for data fetching:
```typescript
// Server action
export async function getResponsesAction(params: GetResponsesParams) {
const responses = await getResponses(params);
return { data: responses };
}
// Client usage
const { data } = await getResponsesAction(params);
```
### Error Handling
Consistent error handling across the application:
```typescript
try {
const result = await apiCall();
return { data: result };
} catch (error) {
console.error("Operation failed:", error);
return { error: error.message };
}
```
## Type Safety
### Type Organization
Types are organized in packages:
- `@formbricks/types` - Shared type definitions
- Local types in component/hook files
### Common Types
```typescript
import { TSurvey } from "@formbricks/types/surveys/types";
import { TResponse } from "@formbricks/types/responses";
import { TEnvironment } from "@formbricks/types/environment";
```
## State Management
### Local State
- Use `useState` for component-specific state
- Use `useReducer` for complex state logic
- Use refs for mutable values that don't trigger re-renders
### Global State
- React Context for feature-specific shared state
- URL state for filters and pagination
- Server state through server actions
## Performance Considerations
### Code Splitting
- Dynamic imports for heavy components
- Route-based code splitting with app router
- Lazy loading for non-critical features
### Caching Strategy
- Server-side caching for database queries
- Client-side caching with React Query (where applicable)
- Static generation for public pages
## Testing Strategy
### Test Organization
```
component/
├── Component.tsx
├── Component.test.tsx
└── hooks/
├── useHook.ts
└── useHook.test.tsx
```
### Test Patterns
- Unit tests for utilities and services
- Integration tests for components with context
- Hook tests with proper mocking
## Build & Deployment
### Build Process
- TypeScript compilation
- Next.js build optimization
- Asset optimization and bundling
### Environment Configuration
- Environment-specific configurations
- Feature flags for gradual rollouts
- Database connection management
## Security Patterns
### Authentication
- Session-based authentication
- Environment-based access control
- API route protection
### Data Validation
- Input validation on both client and server
- Type-safe API contracts
- Sanitization of user inputs
## Monitoring & Observability
### Error Tracking
- Client-side error boundaries
- Server-side error logging
- Performance monitoring
### Analytics
- User interaction tracking
- Performance metrics
- Database query monitoring
## Best Practices Summary
### Code Organization
- ✅ Follow the established directory structure
- ✅ Use consistent naming conventions
- ✅ Separate concerns (UI, logic, data)
- ✅ Keep components focused and small
### Performance
- ✅ Implement proper loading states
- ✅ Use AbortController for async operations
- ✅ Optimize database queries
- ✅ Implement proper caching strategies
### Type Safety
- ✅ Use TypeScript throughout
- ✅ Define proper interfaces for props
- ✅ Use type guards for runtime validation
- ✅ Leverage shared type packages
### Testing
- ✅ Write tests for critical functionality
- ✅ Mock external dependencies properly
- ✅ Test error scenarios and edge cases
- ✅ Maintain good test coverage

View File

@@ -1,232 +0,0 @@
---
description: Security best practices and guidelines for writing GitHub Actions and workflows
globs: .github/workflows/*.yml,.github/workflows/*.yaml,.github/actions/*/action.yml,.github/actions/*/action.yaml
---
# GitHub Actions Security Best Practices
## Required Security Measures
### 1. Set Minimum GITHUB_TOKEN Permissions
Always explicitly set the minimum required permissions for GITHUB_TOKEN:
```yaml
permissions:
contents: read
# Only add additional permissions if absolutely necessary:
# pull-requests: write # for commenting on PRs
# issues: write # for creating/updating issues
# checks: write # for publishing check results
```
### 2. Add Harden-Runner as First Step
For **every job** on `ubuntu-latest`, add Harden-Runner as the first step:
```yaml
- name: Harden the runner
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit # or 'block' for stricter security
```
### 3. Pin Actions to Full Commit SHA
**Always** pin third-party actions to their full commit SHA, not tags:
```yaml
# ❌ BAD - uses mutable tag
- uses: actions/checkout@v4
# ✅ GOOD - pinned to immutable commit SHA
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
```
### 4. Secure Variable Handling
Prevent command injection by properly quoting variables:
```yaml
# ❌ BAD - potential command injection
run: echo "Processing ${{ inputs.user_input }}"
# ✅ GOOD - properly quoted
env:
USER_INPUT: ${{ inputs.user_input }}
run: echo "Processing ${USER_INPUT}"
```
Use `${VARIABLE}` syntax in shell scripts instead of `$VARIABLE`.
### 5. Environment Variables for Secrets
Store sensitive data in environment variables, not inline:
```yaml
# ❌ BAD
run: curl -H "Authorization: Bearer ${{ secrets.TOKEN }}" api.example.com
# ✅ GOOD
env:
API_TOKEN: ${{ secrets.TOKEN }}
run: curl -H "Authorization: Bearer ${API_TOKEN}" api.example.com
```
## Workflow Structure Best Practices
### Required Workflow Elements
```yaml
name: "Descriptive Workflow Name"
on:
# Define specific triggers
push:
branches: [main]
pull_request:
branches: [main]
# Always set explicit permissions
permissions:
contents: read
jobs:
job-name:
name: "Descriptive Job Name"
runs-on: ubuntu-latest
timeout-minutes: 30 # tune per job; standardize repo-wide
# Set job-level permissions if different from workflow level
permissions:
contents: read
steps:
# Always start with Harden-Runner on ubuntu-latest
- name: Harden the runner
uses: step-security/harden-runner@v2
with:
egress-policy: audit
# Pin all actions to commit SHA
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
```
### Input Validation for Actions
For composite actions, always validate inputs:
```yaml
inputs:
user_input:
description: "User provided input"
required: true
runs:
using: "composite"
steps:
- name: Validate input
shell: bash
run: |
# Harden shell and validate input format/content before use
set -euo pipefail
USER_INPUT="${{ inputs.user_input }}"
if [[ ! "${USER_INPUT}" =~ ^[A-Za-z0-9._-]+$ ]]; then
echo "❌ Invalid input format"
exit 1
fi
```
## Docker Security in Actions
### Pin Docker Images to Digests
```yaml
# ❌ BAD - mutable tag
container: node:18
# ✅ GOOD - pinned to digest
container: node:18@sha256:a1ba21bf0c92931d02a8416f0a54daad66cb36a85d6a37b82dfe1604c4c09cad
```
## Common Patterns
### Secure File Operations
```yaml
- name: Process files securely
shell: bash
env:
FILE_PATH: ${{ inputs.file_path }}
run: |
set -euo pipefail # Fail on errors, undefined vars, pipe failures
# Use absolute paths and validate
SAFE_PATH=$(realpath "${FILE_PATH}")
if [[ "$SAFE_PATH" != "${GITHUB_WORKSPACE}"/* ]]; then
echo "❌ Path outside workspace"
exit 1
fi
```
### Artifact Handling
```yaml
- name: Upload artifacts securely
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
with:
name: build-artifacts
path: |
dist/
!dist/**/*.log # Exclude sensitive files
retention-days: 30
```
### GHCR authentication for pulls/scans
```yaml
# Minimal permissions required for GHCR pulls/scans
permissions:
contents: read
packages: read
steps:
- name: Log in to GitHub Container Registry
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
```
## Security Checklist
- [ ] Minimum GITHUB_TOKEN permissions set
- [ ] Harden-Runner added to all ubuntu-latest jobs
- [ ] All third-party actions pinned to commit SHA
- [ ] Input validation implemented for custom actions
- [ ] Variables properly quoted in shell scripts
- [ ] Secrets stored in environment variables
- [ ] Docker images pinned to digests (if used)
- [ ] Error handling with `set -euo pipefail`
- [ ] File paths validated and sanitized
- [ ] No sensitive data in logs or outputs
- [ ] GHCR login performed before pulls/scans (packages: read)
- [ ] Job timeouts configured (`timeout-minutes`)
## Recommended Additional Workflows
Consider adding these security-focused workflows to your repository:
1. **CodeQL Analysis** - Static Application Security Testing (SAST)
2. **Dependency Review** - Scan for vulnerable dependencies in PRs
3. **Dependabot Configuration** - Automated dependency updates
## Resources
- [GitHub Security Hardening Guide](https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions)
- [Step Security Harden-Runner](https://github.com/step-security/harden-runner)
- [Secure-Repo Best Practices](https://github.com/step-security/secure-repo)

View File

@@ -1,74 +0,0 @@
---
alwaysApply: true
---
### Formbricks Monorepo Overview
- **Project**: Formbricks — opensource survey and experience management platform. Repo: [formbricks/formbricks](https://github.com/formbricks/formbricks)
- **Monorepo**: Turborepo + pnpm workspaces. Root configs: [package.json](mdc:package.json), [turbo.json](mdc:turbo.json)
- **Core app**: Next.js app in `apps/web` with Prisma, Auth.js, TailwindCSS, Vitest, Playwright. Enterprise modules live in [apps/web/modules/ee](mdc:apps/web/modules/ee)
- **Datastores**: PostgreSQL + Redis. Local dev via [docker-compose.dev.yml](mdc:docker-compose.dev.yml); Prisma schema at [packages/database/schema.prisma](mdc:packages/database/schema.prisma)
- **Docs & Ops**: Docs in `docs/` (Mintlify), Helm in `helm-chart/`, IaC in `infra/`
### Apps
- **apps/web**: Next.js product application (API, UI, SSO, i18n, emails, uploads, integrations)
- **apps/storybook**: Storybook for UI components; a11y addon + Vite builder
### Packages
- **@formbricks/database** (`packages/database`): Prisma schema, DB scripts, migrations, data layer
- **@formbricks/js-core** (`packages/js-core`): Core runtime for web embed / async loader
- **@formbricks/surveys** (`packages/surveys`): Embeddable survey rendering and helpers
- **@formbricks/logger** (`packages/logger`): Shared logging (pino) + Zod types
- **@formbricks/types** (`packages/types`): Shared types (Zod, Prisma clients)
- **@formbricks/i18n-utils** (`packages/i18n-utils`): i18n helpers and build output
- **@formbricks/eslint-config** (`packages/config-eslint`): Central ESLint config (Next, TS, Vitest, Prettier)
- **@formbricks/config-typescript** (`packages/config-typescript`): Central TS config and types
- **@formbricks/vite-plugins** (`packages/vite-plugins`): Internal Vite plugins
- **packages/android, packages/ios**: Native SDKs (built with platform toolchains)
### Enterpriseready by design
- **Quality & safety**: Strict TypeScript, repowide ESLint + Prettier, lintstaged + Husky, CI checks, typed env validation
- **Securityfirst**: Auth.js, SSO/SAML/OIDC, session controls, rate limiting, Sentry, structured logging
### Accessible by design
- **UI foundations**: Radix UI, TailwindCSS, Storybook with `@storybook/addon-a11y`, keyboard and screenreaderfriendly components
### Root pnpm commands
```bash
pnpm clean:all # Clean turbo cache, node_modules, lockfile, coverage, out
pnpm clean # Clean turbo cache, node_modules, coverage, out
pnpm build # Build all packages/apps (turbo)
pnpm build:dev # Dev-optimized builds (where supported)
pnpm dev # Run all dev servers in parallel
pnpm start # Start built apps/services
pnpm go # Start DB (docker compose) and run long-running dev tasks
pnpm generate # Run generators (e.g., Prisma, API specs)
pnpm lint # Lint all
pnpm format # Prettier write across repo
pnpm test # Unit tests
pnpm test:coverage # Unit tests with coverage
pnpm test:e2e # Playwright tests
pnpm test-e2e:azure # Playwright tests with Azure config
pnpm storybook # Run Storybook
pnpm db:up # Start local Postgres/Redis via docker compose
pnpm db:down # Stop local DB stack
pnpm db:start # Project-level DB setup choreography
pnpm db:push # Prisma db push (accept data loss in package script)
pnpm db:migrate:dev # Apply dev migrations
pnpm db:migrate:deploy # Apply prod migrations
pnpm fb-migrate-dev # Create DB migration (database package) and prisma generate
pnpm tolgee-pull # Pull translation keys for current branch and format
```
### Essentials for every prompt
- **Tech stack**: Next.js, React 19, TypeScript, Prisma, Zod, TailwindCSS, Turborepo, Vitest, Playwright
- **Environments**: See `.env.example`. Many tasks require DB up and env variables set
- **Licensing**: Core under AGPLv3; Enterprise code in `apps/web/modules/ee` (included in Docker, unlocked via Enterprise License Key)
For deeper details, consult perpackage `package.json` and scripts (e.g., [apps/web/package.json](mdc:apps/web/package.json)).

View File

@@ -1,5 +0,0 @@
---
description:
globs:
alwaysApply: false
---

View File

@@ -1,52 +0,0 @@
---
description:
globs:
alwaysApply: false
---
# React Context & Provider Patterns
## Context Provider Best Practices
### Provider Implementation
- Use TypeScript interfaces for provider props with optional `initialCount` for testing
- Implement proper cleanup in `useEffect` to avoid React hooks warnings
- Reference: [apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/components/ResponseCountProvider.tsx](mdc:apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/components/ResponseCountProvider.tsx)
### Cleanup Pattern for Refs
```typescript
useEffect(() => {
const currentPendingRequests = pendingRequests.current;
const currentAbortController = abortController.current;
return () => {
if (currentAbortController) {
currentAbortController.abort();
}
currentPendingRequests.clear();
};
}, []);
```
### Testing Context Providers
- Always wrap components using context in the provider during tests
- Use `initialCount` prop for predictable test scenarios
- Mock context dependencies like `useParams`, `useResponseFilter`
- Example from [apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SurveyAnalysisCTA.test.tsx](mdc:apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SurveyAnalysisCTA.test.tsx):
```typescript
render(
<ResponseCountProvider survey={dummySurvey} initialCount={5}>
<ComponentUnderTest />
</ResponseCountProvider>
);
```
### Required Mocks for Context Testing
- Mock `next/navigation` with `useParams` returning environment and survey IDs
- Mock response filter context and actions
- Mock API actions that the provider depends on
### Context Hook Usage
- Create custom hooks like `useResponseCountContext()` for consuming context
- Provide meaningful error messages when context is used outside provider
- Use context for shared state that multiple components need to access

View File

@@ -1,5 +0,0 @@
---
description:
globs:
alwaysApply: false
---

View File

@@ -1,216 +0,0 @@
---
description: Migrate deprecated UI components to a unified component
globs:
alwaysApply: false
---
# Component Migration Automation Rule
## Overview
This rule automates the migration of deprecated components to new component systems in React/TypeScript codebases.
## Trigger
When the user requests component migration (e.g., "migrate [DeprecatedComponent] to [NewComponent]" or "component migration").
## Process
### Step 1: Discovery and Planning
1. **Identify migration parameters:**
- Ask user for deprecated component name (e.g., "Modal")
- Ask user for new component name(s) (e.g., "Dialog")
- Ask for any components to exclude (e.g., "ModalWithTabs")
- Ask for specific import paths if needed
2. **Scan codebase** for deprecated components:
- Search for `import.*[DeprecatedComponent]` patterns
- Exclude specified components that should not be migrated
- List all found components with file paths
- Present numbered list to user for confirmation
### Step 2: Component-by-Component Migration
For each component, follow this exact sequence:
#### 2.1 Component Migration
- **Import changes:**
- Ask user to provide the new import structure
- Example transformation pattern:
```typescript
// FROM:
import { [DeprecatedComponent] } from "@/components/ui/[DeprecatedComponent]"
// TO:
import {
[NewComponent],
[NewComponentPart1],
[NewComponentPart2],
// ... other parts
} from "@/components/ui/[NewComponent]"
```
- **Props transformation:**
- Ask user for prop mapping rules (e.g., `open` → `open`, `setOpen` → `onOpenChange`)
- Ask for props to remove (e.g., `noPadding`, `closeOnOutsideClick`, `size`)
- Apply transformations based on user specifications
- **Structure transformation:**
- Ask user for the new component structure pattern
- Apply the transformation maintaining all functionality
- Preserve all existing logic, state management, and event handlers
#### 2.2 Wait for User Approval
- Present the migration changes
- Wait for explicit user approval before proceeding
- If rejected, ask for specific feedback and iterate
#### 2.3 Re-read and Apply Additional Changes
- Re-read the component file to capture any user modifications
- Apply any additional improvements the user made
- Ensure all changes are incorporated
#### 2.4 Test File Updates
- **Find corresponding test file** (same name with `.test.tsx` or `.test.ts`)
- **Update test mocks:**
- Ask user for new component mock structure
- Replace old component mocks with new ones
- Example pattern:
```typescript
// Add to test setup:
jest.mock("@/components/ui/[NewComponent]", () => ({
[NewComponent]: ({ children, [props] }: any) => ([mock implementation]),
[NewComponentPart1]: ({ children }: any) => <div data-testid="[new-component-part1]">{children}</div>,
[NewComponentPart2]: ({ children }: any) => <div data-testid="[new-component-part2]">{children}</div>,
// ... other parts
}));
```
- **Update test expectations:**
- Change test IDs from old component to new component
- Update any component-specific assertions
- Ensure all new component parts used in the component are mocked
#### 2.5 Run Tests and Optimize
- Execute `Node package manager test -- ComponentName.test.tsx`
- Fix any failing tests
- Optimize code quality (imports, formatting, etc.)
- Re-run tests until all pass
- **Maximum 3 iterations** - if still failing, ask user for guidance
#### 2.6 Wait for Final Approval
- Present test results and any optimizations made
- Wait for user approval of the complete migration
- If rejected, iterate based on feedback
#### 2.7 Git Commit
- Run: `git add .`
- Run: `git commit -m "migrate [ComponentName] from [DeprecatedComponent] to [NewComponent]"`
- Confirm commit was successful
### Step 3: Final Report Generation
After all components are migrated, generate a comprehensive GitHub PR report:
#### PR Title
```
feat: migrate [DeprecatedComponent] components to [NewComponent] system
```
#### PR Description Template
```markdown
## 🔄 [DeprecatedComponent] to [NewComponent] Migration
### Overview
Migrated [X] [DeprecatedComponent] components to the new [NewComponent] component system to modernize the UI architecture and improve consistency.
### Components Migrated
[List each component with file path]
### Technical Changes
- **Imports:** Replaced `[DeprecatedComponent]` with `[NewComponent], [NewComponentParts...]`
- **Props:** [List prop transformations]
- **Structure:** Implemented proper [NewComponent] component hierarchy
- **Styling:** [Describe styling changes]
- **Tests:** Updated all test mocks and expectations
### Migration Pattern
```typescript
// Before
<[DeprecatedComponent] [oldProps]>
[oldStructure]
</[DeprecatedComponent]>
// After
<[NewComponent] [newProps]>
[newStructure]
</[NewComponent]>
```
### Testing
- ✅ All existing tests updated and passing
- ✅ Component functionality preserved
- ✅ UI/UX behavior maintained
### How to Test This PR
1. **Functional Testing:**
- Navigate to each migrated component's usage
- Verify [component] opens and closes correctly
- Test all interactive elements within [components]
- Confirm styling and layout are preserved
2. **Automated Testing:**
```bash
Node package manager test
```
3. **Visual Testing:**
- Check that all [components] maintain proper styling
- Verify responsive behavior
- Test keyboard navigation and accessibility
### Breaking Changes
[List any breaking changes or state "None - this is a drop-in replacement maintaining all existing functionality."]
### Notes
- [Any excluded components] were preserved as they already use [NewComponent] internally
- All form validation and complex state management preserved
- Enhanced code quality with better imports and formatting
```
## Special Considerations
### Excluded Components
- **DO NOT MIGRATE** components specified by user as exclusions
- They may already use the new component internally or have other reasons
- Inform user these are skipped and why
### Complex Components
- Preserve all existing functionality (forms, validation, state management)
- Maintain prop interfaces
- Keep all event handlers and callbacks
- Preserve accessibility features
### Test Coverage
- Ensure all new component parts are mocked when used
- Mock all new component parts that appear in the component
- Update test IDs from old component to new component
- Maintain all existing test scenarios
### Error Handling
- If tests fail after 3 iterations, stop and ask user for guidance
- If component is too complex, ask user for specific guidance
- If unsure about functionality preservation, ask for clarification
### Migration Patterns
- Always ask user for specific migration patterns before starting
- Confirm import structures, prop mappings, and component hierarchies
- Adapt to different component architectures (simple replacements, complex restructuring, etc.)
## Success Criteria
- All deprecated components successfully migrated to new components
- All tests passing
- No functionality lost
- Code quality maintained or improved
- User approval on each component
- Successful git commits for each migration
- Comprehensive PR report generated
## Usage Examples
- "migrate Modal to Dialog"
- "migrate Button to NewButton"
- "migrate Card to ModernCard"
- "component migration" (will prompt for details)

View File

@@ -1,177 +0,0 @@
---
description: Create a story in Storybook for a given component
globs:
alwaysApply: false
---
# Formbricks Storybook Stories
## When generating Storybook stories for Formbricks components:
### 1. **File Structure**
- Create `stories.tsx` (not `.stories.tsx`) in component directory
- Use exact import: `import { Meta, StoryObj } from "@storybook/react-vite";`
- Import component from `"./index"`
### 2. **Story Structure Template**
```tsx
import { Meta, StoryObj } from "@storybook/react-vite";
import { ComponentName } from "./index";
// For complex components with configurable options
// consider this as an example the options need to reflect the props types
interface StoryOptions {
showIcon: boolean;
numberOfElements: number;
customLabels: string[];
}
type StoryProps = React.ComponentProps<typeof ComponentName> & StoryOptions;
const meta: Meta<StoryProps> = {
title: "UI/ComponentName",
component: ComponentName,
tags: ["autodocs"],
parameters: {
layout: "centered",
controls: { sort: "alpha", exclude: [] },
docs: {
description: {
component: "The **ComponentName** component provides [description].",
},
},
},
argTypes: {
// Organize in exactly these categories: Behavior, Appearance, Content
},
};
export default meta;
type Story = StoryObj<typeof ComponentName> & { args: StoryOptions };
```
### 3. **ArgTypes Organization**
Organize ALL argTypes into exactly three categories:
- **Behavior**: disabled, variant, onChange, etc.
- **Appearance**: size, color, layout, styling, etc.
- **Content**: text, icons, numberOfElements, etc.
Format:
```tsx
argTypes: {
propName: {
control: "select" | "boolean" | "text" | "number",
options: ["option1", "option2"], // for select
description: "Clear description",
table: {
category: "Behavior" | "Appearance" | "Content",
type: { summary: "string" },
defaultValue: { summary: "default" },
},
order: 1,
},
}
```
### 4. **Required Stories**
Every component must include:
- `Default`: Most common use case
- `Disabled`: If component supports disabled state
- `WithIcon`: If component supports icons
- Variant stories for each variant (Primary, Secondary, Error, etc.)
- Edge case stories (ManyElements, LongText, CustomStyling)
### 5. **Story Format**
```tsx
export const Default: Story = {
args: {
// Props with realistic values
},
};
export const EdgeCase: Story = {
args: { /* ... */ },
parameters: {
docs: {
description: {
story: "Use this when [specific scenario].",
},
},
},
};
```
### 6. **Dynamic Content Pattern**
For components with dynamic content, create render function:
```tsx
const renderComponent = (args: StoryProps) => {
const { numberOfElements, showIcon, customLabels } = args;
// Generate dynamic content
const elements = Array.from({ length: numberOfElements }, (_, i) => ({
id: `element-${i}`,
label: customLabels[i] || `Element ${i + 1}`,
icon: showIcon ? <IconComponent /> : undefined,
}));
return <ComponentName {...args} elements={elements} />;
};
export const Dynamic: Story = {
render: renderComponent,
args: {
numberOfElements: 3,
showIcon: true,
customLabels: ["First", "Second", "Third"],
},
};
```
### 7. **State Management**
For interactive components:
```tsx
import { useState } from "react";
const ComponentWithState = (args: any) => {
const [value, setValue] = useState(args.defaultValue);
return (
<ComponentName
{...args}
value={value}
onChange={(newValue) => {
setValue(newValue);
args.onChange?.(newValue);
}}
/>
);
};
export const Interactive: Story = {
render: ComponentWithState,
args: { defaultValue: "initial" },
};
```
### 8. **Quality Requirements**
- Include component description in parameters.docs
- Add story documentation for non-obvious use cases
- Test edge cases (overflow, empty states, many elements)
- Ensure no TypeScript errors
- Use realistic prop values
- Include at least 3-5 story variants
- Example values need to be in the context of survey application
### 9. **Naming Conventions**
- **Story titles**: "UI/ComponentName"
- **Story exports**: PascalCase (Default, WithIcon, ManyElements)
- **Categories**: "Behavior", "Appearance", "Content" (exact spelling)
- **Props**: camelCase matching component props
### 10. **Special Cases**
- **Generic components**: Remove `component` from meta if type conflicts
- **Form components**: Include Invalid, WithValue stories
- **Navigation**: Include ManyItems stories
- **Modals, Dropdowns and Popups **: Include trigger and content structure
## Generate stories that are comprehensive, well-documented, and reflect all component states and edge cases.

View File

@@ -1,322 +0,0 @@
---
description:
globs:
alwaysApply: false
---
# Testing Patterns & Best Practices
## Running Tests
### Test Commands
From the **root directory** (formbricks/):
- `npm test` - Run all tests across all packages (recommended for CI/full testing)
- `npm run test:coverage` - Run all tests with coverage reports
- `npm run test:e2e` - Run end-to-end tests with Playwright
From the **apps/web directory** (apps/web/):
- `npm run test` - Run only web app tests (fastest for development)
- `npm run test:coverage` - Run web app tests with coverage
- `npm run test -- <file-pattern>` - Run specific test files
### Examples
```bash
# Run all tests from root (takes ~3 minutes, runs 790 test files with 5334+ tests)
npm test
# Run specific test file from apps/web (fastest for development)
npm run test -- modules/cache/lib/service.test.ts
# Run tests matching pattern from apps/web
npm run test -- modules/ee/license-check/lib/license.test.ts
# Run with coverage from root
npm run test:coverage
# Run specific test with watch mode from apps/web (for development)
npm run test -- --watch modules/cache/lib/service.test.ts
# Run tests for a specific directory from apps/web
npm run test -- modules/cache/
```
### Performance Tips
- **For development**: Use `apps/web` directory commands to run only web app tests
- **For CI/validation**: Use root directory commands to run all packages
- **For specific features**: Use file patterns to target specific test files
- **For debugging**: Use `--watch` mode for continuous testing during development
### Test File Organization
- Place test files in the **same directory** as the source file
- Use `.test.ts` for utility/service tests (Node environment)
- Use `.test.tsx` for React component tests (jsdom environment)
## Test File Naming & Environment
### File Extensions
- Use `.test.tsx` for React component/hook tests (runs in jsdom environment)
- Use `.test.ts` for utility/service tests (runs in Node environment)
- The vitest config uses `environmentMatchGlobs` to automatically set jsdom for `.tsx` files
### Test Structure
```typescript
// Import the mocked functions first
import { useHook } from "@/path/to/hook";
import { serviceFunction } from "@/path/to/service";
import { renderHook, waitFor } from "@testing-library/react";
import { beforeEach, describe, expect, test, vi } from "vitest";
// Mock dependencies
vi.mock("@/path/to/hook", () => ({
useHook: vi.fn(),
}));
describe("ComponentName", () => {
beforeEach(() => {
vi.clearAllMocks();
// Setup default mocks
});
test("descriptive test name", async () => {
// Test implementation
});
});
```
## React Hook Testing
### Context Mocking
When testing hooks that use React Context:
```typescript
vi.mocked(useResponseFilter).mockReturnValue({
selectedFilter: {
filter: [],
responseStatus: "all",
},
setSelectedFilter: vi.fn(),
selectedOptions: {
questionOptions: [],
questionFilterOptions: [],
},
setSelectedOptions: vi.fn(),
dateRange: { from: new Date(), to: new Date() },
setDateRange: vi.fn(),
resetState: vi.fn(),
});
```
### Testing Async Hooks
- Always use `waitFor` for async operations
- Test both loading and completed states
- Verify API calls with correct parameters
```typescript
test("fetches data on mount", async () => {
const { result } = renderHook(() => useHook());
expect(result.current.isLoading).toBe(true);
await waitFor(() => {
expect(result.current.isLoading).toBe(false);
});
expect(result.current.data).toBe(expectedData);
expect(vi.mocked(apiCall)).toHaveBeenCalledWith(expectedParams);
});
```
### Testing Hook Dependencies
To test useEffect dependencies, ensure mocks return different values:
```typescript
// First render
mockGetFormattedFilters.mockReturnValue(mockFilters);
// Change dependency and trigger re-render
const newMockFilters = { ...mockFilters, finished: true };
mockGetFormattedFilters.mockReturnValue(newMockFilters);
rerender();
```
## Performance Testing
### Race Condition Testing
Test AbortController implementation:
```typescript
test("cancels previous request when new request is made", async () => {
let resolveFirst: (value: any) => void;
let resolveSecond: (value: any) => void;
const firstPromise = new Promise((resolve) => {
resolveFirst = resolve;
});
const secondPromise = new Promise((resolve) => {
resolveSecond = resolve;
});
vi.mocked(apiCall)
.mockReturnValueOnce(firstPromise as any)
.mockReturnValueOnce(secondPromise as any);
const { result } = renderHook(() => useHook());
// Trigger second request
result.current.refetch();
// Resolve in order - first should be cancelled
resolveFirst!({ data: 100 });
resolveSecond!({ data: 200 });
await waitFor(() => {
expect(result.current.isLoading).toBe(false);
});
// Should have result from second request
expect(result.current.data).toBe(200);
});
```
### Cleanup Testing
```typescript
test("cleans up on unmount", () => {
const abortSpy = vi.spyOn(AbortController.prototype, "abort");
const { unmount } = renderHook(() => useHook());
unmount();
expect(abortSpy).toHaveBeenCalled();
abortSpy.mockRestore();
});
```
## Error Handling Testing
### API Error Testing
```typescript
test("handles API errors gracefully", async () => {
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
vi.mocked(apiCall).mockRejectedValue(new Error("API Error"));
const { result } = renderHook(() => useHook());
await waitFor(() => {
expect(result.current.isLoading).toBe(false);
});
expect(consoleSpy).toHaveBeenCalledWith("Error message:", expect.any(Error));
expect(result.current.data).toBe(fallbackValue);
consoleSpy.mockRestore();
});
```
### Cancelled Request Testing
```typescript
test("does not update state for cancelled requests", async () => {
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
let rejectFirst: (error: any) => void;
const firstPromise = new Promise((_, reject) => {
rejectFirst = reject;
});
vi.mocked(apiCall)
.mockReturnValueOnce(firstPromise as any)
.mockResolvedValueOnce({ data: 42 });
const { result } = renderHook(() => useHook());
result.current.refetch();
const abortError = new Error("Request cancelled");
rejectFirst!(abortError);
await waitFor(() => {
expect(result.current.isLoading).toBe(false);
});
// Should not log error for cancelled request
expect(consoleSpy).not.toHaveBeenCalled();
consoleSpy.mockRestore();
});
```
## Type Safety in Tests
### Mock Type Assertions
Use type assertions for edge cases:
```typescript
vi.mocked(apiCall).mockResolvedValue({
data: null as any, // For testing null handling
});
vi.mocked(apiCall).mockResolvedValue({
data: undefined as any, // For testing undefined handling
});
```
### Proper Mock Typing
Ensure mocks match the actual interface:
```typescript
const mockSurvey: TSurvey = {
id: "survey-123",
name: "Test Survey",
// ... other required properties
} as unknown as TSurvey; // Use when partial mocking is needed
```
## Common Test Patterns
### Testing State Changes
```typescript
test("updates state correctly", async () => {
const { result } = renderHook(() => useHook());
// Initial state
expect(result.current.value).toBe(initialValue);
// Trigger change
result.current.updateValue(newValue);
// Verify change
expect(result.current.value).toBe(newValue);
});
```
### Testing Multiple Scenarios
```typescript
test("handles different modes", async () => {
// Test regular mode
vi.mocked(useParams).mockReturnValue({ surveyId: "123" });
const { rerender } = renderHook(() => useHook());
await waitFor(() => {
expect(vi.mocked(regularApi)).toHaveBeenCalled();
});
rerender();
await waitFor(() => {
expect(vi.mocked(sharingApi)).toHaveBeenCalled();
});
});
```
## Test Organization
### Comprehensive Test Coverage
For hooks, ensure you test:
- ✅ Initialization (with/without initial values)
- ✅ Data fetching (success/error cases)
- ✅ State updates and refetching
- ✅ Dependency changes triggering effects
- ✅ Manual actions (refetch, reset)
- ✅ Race condition prevention
- ✅ Cleanup on unmount
- ✅ Mode switching (if applicable)
- ✅ Edge cases (null/undefined data)
### Test Naming
Use descriptive test names that explain the scenario:
- ✅ "initializes with initial count"
- ✅ "fetches response count on mount for regular survey"
- ✅ "cancels previous request when new request is made"
- ❌ "test hook"
- ❌ "it works"

View File

@@ -1,7 +0,0 @@
---
description: Whenever the user asks to write or update a test file for .tsx or .ts files.
globs:
alwaysApply: false
---
Use the rules in this file when writing tests [copilot-instructions.md](mdc:.github/copilot-instructions.md).
After writing the tests, run them and check if there's any issue with the tests and if all of them are passing. Fix the issues and rerun the tests until all pass.

View File

@@ -9,8 +9,12 @@
WEBAPP_URL=http://localhost:3000
# Required for next-auth. Should be the same as WEBAPP_URL
# If your pplication uses a custom base path, specify the route to the API endpoint in full, e.g. NEXTAUTH_URL=https://example.com/custom-route/api/auth
NEXTAUTH_URL=http://localhost:3000
# Can be used to deploy the application under a sub-path of a domain. This can only be set at build time
# BASE_PATH=
# Encryption keys
# Please set both for now, we will change this in the future
@@ -62,9 +66,6 @@ SMTP_PASSWORD=smtpPassword
# Uncomment the variables you would like to use and customize the values.
# Custom local storage path for file uploads
#UPLOADS_DIR=
##############
# S3 STORAGE #
##############
@@ -99,8 +100,6 @@ PASSWORD_RESET_DISABLED=1
# Organization Invite. Disable the ability for invited users to create an account.
# INVITE_DISABLED=1
# Docker cron jobs. Disable the supercronic cron jobs in the Docker image (useful for cluster setups).
# DOCKER_CRON_ENABLED=1
##########
# Other #
@@ -169,6 +168,9 @@ SLACK_CLIENT_SECRET=
# Enterprise License Key
ENTERPRISE_LICENSE_KEY=
# Internal Environment (production, staging) - used for internal staging environment
# ENVIRONMENT=production
# Automatically assign new users to a specific organization and role within that organization
# Insert an existing organization id or generate a valid CUID for a new one at https://www.getuniqueid.com/cuid (e.g. cjld2cjxh0000qzrmn831i7rn)
# (Role Management is an Enterprise feature)
@@ -194,8 +196,9 @@ REDIS_URL=redis://localhost:6379
# The below is used for Rate Limiting (uses In-Memory LRU Cache if not provided) (You can use a service like Webdis for this)
# REDIS_HTTP_URL:
# INTERCOM_APP_ID=
# INTERCOM_SECRET_KEY=
# Chatwoot
# CHATWOOT_BASE_URL=
# CHATWOOT_WEBSITE_TOKEN=
# Enable Prometheus metrics
# PROMETHEUS_ENABLED=
@@ -219,3 +222,7 @@ REDIS_URL=redis://localhost:6379
# AUDIT_LOG_ENABLED=0
# If the ip should be added in the log or not. Default 0
# AUDIT_LOG_GET_USER_IP=0
# Lingo.dev API key for translation generation
LINGODOTDEV_API_KEY=your_api_key_here

13
.eslintrc.cjs Normal file
View File

@@ -0,0 +1,13 @@
module.exports = {
root: true,
ignorePatterns: ["node_modules/", "dist/", "coverage/"],
overrides: [
{
files: ["packages/cache/**/*.{ts,js}"],
extends: ["@formbricks/eslint-config/library.js"],
parserOptions: {
project: "./packages/cache/tsconfig.json",
},
},
],
};

View File

@@ -0,0 +1,319 @@
name: Build and Push Docker Image
description: |
Unified Docker build and push action for both ECR and GHCR registries.
Supports:
- ECR builds for Formbricks Cloud deployment
- GHCR builds for community self-hosting
- Automatic version resolution and tagging
- Conditional signing and deployment tags
inputs:
registry_type:
description: "Registry type: 'ecr' or 'ghcr'"
required: true
# Version input
version:
description: "Explicit version (SemVer only, e.g., 1.2.3). If provided, this version is used directly. If empty, version is auto-generated from branch name."
required: false
experimental_mode:
description: "Enable experimental timestamped versions"
required: false
default: "false"
# ECR specific inputs
ecr_registry:
description: "ECR registry URL (required for ECR builds)"
required: false
ecr_repository:
description: "ECR repository name (required for ECR builds)"
required: false
ecr_region:
description: "ECR AWS region (required for ECR builds)"
required: false
aws_role_arn:
description: "AWS role ARN for ECR authentication (required for ECR builds)"
required: false
# GHCR specific inputs
ghcr_image_name:
description: "GHCR image name (required for GHCR builds)"
required: false
# Deployment options
deploy_production:
description: "Tag image for production deployment"
required: false
default: "false"
deploy_staging:
description: "Tag image for staging deployment"
required: false
default: "false"
is_prerelease:
description: "Whether this is a prerelease (auto-tags for staging/production)"
required: false
default: "false"
make_latest:
description: "Whether to tag as latest/production (from GitHub release 'Set as the latest release' option)"
required: false
default: "false"
# Build options
dockerfile:
description: "Path to Dockerfile"
required: false
default: "apps/web/Dockerfile"
context:
description: "Build context"
required: false
default: "."
outputs:
image_tag:
description: "Resolved image tag used for the build"
value: ${{ steps.version.outputs.version }}
registry_tags:
description: "Complete registry tags that were pushed"
value: ${{ steps.build.outputs.tags }}
image_digest:
description: "Image digest from the build"
value: ${{ steps.build.outputs.digest }}
runs:
using: "composite"
steps:
- name: Validate inputs
shell: bash
env:
REGISTRY_TYPE: ${{ inputs.registry_type }}
ECR_REGISTRY: ${{ inputs.ecr_registry }}
ECR_REPOSITORY: ${{ inputs.ecr_repository }}
ECR_REGION: ${{ inputs.ecr_region }}
AWS_ROLE_ARN: ${{ inputs.aws_role_arn }}
GHCR_IMAGE_NAME: ${{ inputs.ghcr_image_name }}
run: |
set -euo pipefail
if [[ "$REGISTRY_TYPE" != "ecr" && "$REGISTRY_TYPE" != "ghcr" ]]; then
echo "ERROR: registry_type must be 'ecr' or 'ghcr', got: $REGISTRY_TYPE"
exit 1
fi
if [[ "$REGISTRY_TYPE" == "ecr" ]]; then
if [[ -z "$ECR_REGISTRY" || -z "$ECR_REPOSITORY" || -z "$ECR_REGION" || -z "$AWS_ROLE_ARN" ]]; then
echo "ERROR: ECR builds require ecr_registry, ecr_repository, ecr_region, and aws_role_arn"
exit 1
fi
fi
if [[ "$REGISTRY_TYPE" == "ghcr" ]]; then
if [[ -z "$GHCR_IMAGE_NAME" ]]; then
echo "ERROR: GHCR builds require ghcr_image_name"
exit 1
fi
fi
echo "SUCCESS: Input validation passed for $REGISTRY_TYPE build"
- name: Resolve Docker version
id: version
uses: ./.github/actions/resolve-docker-version
with:
version: ${{ inputs.version }}
current_branch: ${{ github.ref_name }}
experimental_mode: ${{ inputs.experimental_mode }}
- name: Update package.json version
uses: ./.github/actions/update-package-version
with:
version: ${{ steps.version.outputs.version }}
- name: Configure AWS credentials (ECR only)
if: ${{ inputs.registry_type == 'ecr' }}
uses: aws-actions/configure-aws-credentials@7474bc4690e29a8392af63c5b98e7449536d5c3a # v4.2.0
with:
role-to-assume: ${{ inputs.aws_role_arn }}
aws-region: ${{ inputs.ecr_region }}
- name: Log in to Amazon ECR (ECR only)
if: ${{ inputs.registry_type == 'ecr' }}
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1
- name: Set up Docker build tools
uses: ./.github/actions/docker-build-setup
with:
registry: ${{ inputs.registry_type == 'ghcr' && 'ghcr.io' || '' }}
setup_cosign: ${{ inputs.registry_type == 'ghcr' && 'true' || 'false' }}
skip_login_on_pr: ${{ inputs.registry_type == 'ghcr' && 'true' || 'false' }}
- name: Build ECR tag list
if: ${{ inputs.registry_type == 'ecr' }}
id: ecr-tags
shell: bash
env:
IMAGE_TAG: ${{ steps.version.outputs.version }}
ECR_REGISTRY: ${{ inputs.ecr_registry }}
ECR_REPOSITORY: ${{ inputs.ecr_repository }}
DEPLOY_PRODUCTION: ${{ inputs.deploy_production }}
DEPLOY_STAGING: ${{ inputs.deploy_staging }}
IS_PRERELEASE: ${{ inputs.is_prerelease }}
MAKE_LATEST: ${{ inputs.make_latest }}
run: |
set -euo pipefail
# Start with the base image tag
TAGS="${ECR_REGISTRY}/${ECR_REPOSITORY}:${IMAGE_TAG}"
# Handle automatic tagging based on release type
if [[ "${IS_PRERELEASE}" == "true" ]]; then
TAGS="${TAGS}\n${ECR_REGISTRY}/${ECR_REPOSITORY}:staging"
echo "Adding staging tag for prerelease"
elif [[ "${IS_PRERELEASE}" == "false" && "${MAKE_LATEST}" == "true" ]]; then
TAGS="${TAGS}\n${ECR_REGISTRY}/${ECR_REPOSITORY}:production"
echo "Adding production tag for stable release marked as latest"
fi
# Handle manual deployment overrides
if [[ "${DEPLOY_PRODUCTION}" == "true" ]]; then
TAGS="${TAGS}\n${ECR_REGISTRY}/${ECR_REPOSITORY}:production"
echo "Adding production tag (manual override)"
fi
if [[ "${DEPLOY_STAGING}" == "true" ]]; then
TAGS="${TAGS}\n${ECR_REGISTRY}/${ECR_REPOSITORY}:staging"
echo "Adding staging tag (manual override)"
fi
echo "ECR tags generated:"
echo -e "${TAGS}"
{
echo "tags<<EOF"
echo -e "${TAGS}"
echo "EOF"
} >> "${GITHUB_OUTPUT}"
- name: Generate additional GHCR tags for releases
if: ${{ inputs.registry_type == 'ghcr' && inputs.experimental_mode == 'false' && (github.event_name == 'workflow_call' || github.event_name == 'release' || github.event_name == 'workflow_dispatch') }}
id: ghcr-extra-tags
shell: bash
env:
VERSION: ${{ steps.version.outputs.version }}
IMAGE_NAME: ${{ inputs.ghcr_image_name }}
IS_PRERELEASE: ${{ inputs.is_prerelease }}
MAKE_LATEST: ${{ inputs.make_latest }}
run: |
set -euo pipefail
# Start with base version tag
TAGS="ghcr.io/${IMAGE_NAME}:${VERSION}"
# For proper SemVer releases, add major.minor and major tags
if [[ "${VERSION}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# Extract major and minor versions
MAJOR=$(echo "${VERSION}" | cut -d. -f1)
MINOR=$(echo "${VERSION}" | cut -d. -f2)
TAGS="${TAGS}\nghcr.io/${IMAGE_NAME}:${MAJOR}.${MINOR}"
TAGS="${TAGS}\nghcr.io/${IMAGE_NAME}:${MAJOR}"
echo "Added SemVer tags: ${MAJOR}.${MINOR}, ${MAJOR}"
fi
# Add latest tag for stable releases marked as latest
if [[ "${IS_PRERELEASE}" == "false" && "${MAKE_LATEST}" == "true" ]]; then
TAGS="${TAGS}\nghcr.io/${IMAGE_NAME}:latest"
echo "Added latest tag for stable release marked as latest"
fi
echo "Generated GHCR tags:"
echo -e "${TAGS}"
# Debug: Show what will be passed to Docker build
echo "DEBUG: Tags for Docker build step:"
echo -e "${TAGS}"
{
echo "tags<<EOF"
echo -e "${TAGS}"
echo "EOF"
} >> "${GITHUB_OUTPUT}"
- name: Build GHCR metadata (experimental)
if: ${{ inputs.registry_type == 'ghcr' && inputs.experimental_mode == 'true' }}
id: ghcr-meta-experimental
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
with:
images: ghcr.io/${{ inputs.ghcr_image_name }}
tags: |
type=ref,event=branch
type=raw,value=${{ steps.version.outputs.version }}
- name: Debug Docker build tags
shell: bash
run: |
echo "=== DEBUG: Docker Build Configuration ==="
echo "Registry Type: ${{ inputs.registry_type }}"
echo "Experimental Mode: ${{ inputs.experimental_mode }}"
echo "Event Name: ${{ github.event_name }}"
echo "Is Prerelease: ${{ inputs.is_prerelease }}"
echo "Make Latest: ${{ inputs.make_latest }}"
echo "Version: ${{ steps.version.outputs.version }}"
if [[ "${{ inputs.registry_type }}" == "ecr" ]]; then
echo "ECR Tags: ${{ steps.ecr-tags.outputs.tags }}"
elif [[ "${{ inputs.experimental_mode }}" == "true" ]]; then
echo "GHCR Experimental Tags: ${{ steps.ghcr-meta-experimental.outputs.tags }}"
else
echo "GHCR Extra Tags: ${{ steps.ghcr-extra-tags.outputs.tags }}"
fi
- name: Build and push Docker image
id: build
uses: depot/build-push-action@636daae76684e38c301daa0c5eca1c095b24e780 # v1.14.0
with:
project: tw0fqmsx3c
token: ${{ env.DEPOT_PROJECT_TOKEN }}
context: ${{ inputs.context }}
file: ${{ inputs.dockerfile }}
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ inputs.registry_type == 'ecr' && steps.ecr-tags.outputs.tags || (inputs.registry_type == 'ghcr' && inputs.experimental_mode == 'true' && steps.ghcr-meta-experimental.outputs.tags) || (inputs.registry_type == 'ghcr' && inputs.experimental_mode == 'false' && steps.ghcr-extra-tags.outputs.tags) || (inputs.registry_type == 'ghcr' && format('ghcr.io/{0}:{1}', inputs.ghcr_image_name, steps.version.outputs.version)) || (inputs.registry_type == 'ecr' && format('{0}/{1}:{2}', inputs.ecr_registry, inputs.ecr_repository, steps.version.outputs.version)) }}
labels: ${{ inputs.registry_type == 'ghcr' && inputs.experimental_mode == 'true' && steps.ghcr-meta-experimental.outputs.labels || '' }}
secrets: |
database_url=${{ env.DUMMY_DATABASE_URL }}
encryption_key=${{ env.DUMMY_ENCRYPTION_KEY }}
redis_url=${{ env.DUMMY_REDIS_URL }}
sentry_auth_token=${{ env.SENTRY_AUTH_TOKEN }}
env:
DEPOT_PROJECT_TOKEN: ${{ env.DEPOT_PROJECT_TOKEN }}
DUMMY_DATABASE_URL: ${{ env.DUMMY_DATABASE_URL }}
DUMMY_ENCRYPTION_KEY: ${{ env.DUMMY_ENCRYPTION_KEY }}
DUMMY_REDIS_URL: ${{ env.DUMMY_REDIS_URL }}
SENTRY_AUTH_TOKEN: ${{ env.SENTRY_AUTH_TOKEN }}
- name: Sign GHCR image (GHCR only)
if: ${{ inputs.registry_type == 'ghcr' && (github.event_name == 'workflow_call' || github.event_name == 'release' || github.event_name == 'workflow_dispatch') }}
shell: bash
env:
TAGS: ${{ inputs.experimental_mode == 'true' && steps.ghcr-meta-experimental.outputs.tags || steps.ghcr-extra-tags.outputs.tags }}
DIGEST: ${{ steps.build.outputs.digest }}
run: |
set -euo pipefail
echo "${TAGS}" | xargs -I {} cosign sign --yes "{}@${DIGEST}"
- name: Output build summary
shell: bash
env:
REGISTRY_TYPE: ${{ inputs.registry_type }}
IMAGE_TAG: ${{ steps.version.outputs.version }}
VERSION_SOURCE: ${{ steps.version.outputs.source }}
run: |
echo "SUCCESS: Built and pushed Docker image to $REGISTRY_TYPE"
echo "Image Tag: $IMAGE_TAG (source: $VERSION_SOURCE)"
if [[ "$REGISTRY_TYPE" == "ecr" ]]; then
echo "ECR Registry: ${{ inputs.ecr_registry }}"
echo "ECR Repository: ${{ inputs.ecr_repository }}"
else
echo "GHCR Image: ghcr.io/${{ inputs.ghcr_image_name }}"
fi

View File

@@ -0,0 +1,106 @@
name: Docker Build Setup
description: |
Sets up common Docker build tools and authentication with security validation.
Security Features:
- Registry URL validation
- Input sanitization
- Conditional setup based on event type
- Post-setup verification
Supports Depot CLI, Cosign signing, and Docker registry authentication.
inputs:
registry:
description: "Docker registry hostname to login to (e.g., ghcr.io, registry.example.com:5000). No paths allowed."
required: false
default: "ghcr.io"
setup_cosign:
description: "Whether to install cosign for image signing"
required: false
default: "true"
skip_login_on_pr:
description: "Whether to skip registry login on pull requests"
required: false
default: "true"
runs:
using: "composite"
steps:
- name: Validate inputs
shell: bash
env:
REGISTRY: ${{ inputs.registry }}
SETUP_COSIGN: ${{ inputs.setup_cosign }}
SKIP_LOGIN_ON_PR: ${{ inputs.skip_login_on_pr }}
run: |
set -euo pipefail
# Security: Validate registry input - must be hostname[:port] only, no paths
# Allow empty registry for cases where login is handled externally (e.g., ECR)
if [[ -n "$REGISTRY" ]]; then
if [[ "$REGISTRY" =~ / ]]; then
echo "ERROR: Invalid registry format: $REGISTRY"
echo "Registry must be host[:port] with no path (e.g., 'ghcr.io' or 'registry.example.com:5000')"
echo "Path components like 'ghcr.io/org' are not allowed as they break docker login"
exit 1
fi
# Validate hostname with optional port format
if [[ ! "$REGISTRY" =~ ^[a-zA-Z0-9.-]+(\:[0-9]+)?$ ]]; then
echo "ERROR: Invalid registry hostname format: $REGISTRY"
echo "Registry must be a valid hostname optionally with port (e.g., 'ghcr.io' or 'registry.example.com:5000')"
exit 1
fi
fi
# Validate boolean inputs
if [[ "$SETUP_COSIGN" != "true" && "$SETUP_COSIGN" != "false" ]]; then
echo "ERROR: setup_cosign must be 'true' or 'false', got: $SETUP_COSIGN"
exit 1
fi
if [[ "$SKIP_LOGIN_ON_PR" != "true" && "$SKIP_LOGIN_ON_PR" != "false" ]]; then
echo "ERROR: skip_login_on_pr must be 'true' or 'false', got: $SKIP_LOGIN_ON_PR"
exit 1
fi
echo "SUCCESS: Input validation passed"
- name: Set up Depot CLI
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
- name: Install cosign
# Install cosign when requested AND when we might actually sign images
# (i.e., non-PR contexts or when we login on PRs)
if: ${{ inputs.setup_cosign == 'true' && (inputs.skip_login_on_pr == 'false' || github.event_name != 'pull_request') }}
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
- name: Log into registry
if: ${{ inputs.registry != '' && (inputs.skip_login_on_pr == 'false' || github.event_name != 'pull_request') }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ${{ inputs.registry }}
username: ${{ github.actor }}
password: ${{ github.token }}
- name: Verify setup completion
shell: bash
run: |
set -euo pipefail
# Verify Depot CLI is available
if ! command -v depot >/dev/null 2>&1; then
echo "ERROR: Depot CLI not found in PATH"
exit 1
fi
# Verify cosign if it should be installed (same conditions as install step)
if [[ "${{ inputs.setup_cosign }}" == "true" ]] && [[ "${{ inputs.skip_login_on_pr }}" == "false" || "${{ github.event_name }}" != "pull_request" ]]; then
if ! command -v cosign >/dev/null 2>&1; then
echo "ERROR: Cosign not found in PATH despite being requested"
exit 1
fi
fi
echo "SUCCESS: Docker build setup completed successfully"

View File

@@ -0,0 +1,192 @@
name: Resolve Docker Version
description: |
Resolves and validates Docker-compatible SemVer versions for container builds with comprehensive security.
Security Features:
- Command injection protection
- Input sanitization and validation
- Docker tag character restrictions
- Length limits and boundary checks
- Safe branch name handling
Supports multiple modes: release, manual override, branch auto-detection, and experimental timestamped versions.
inputs:
version:
description: "Explicit version (SemVer only, e.g., 1.2.3-beta). If provided, this version is used directly. If empty, version is auto-generated from branch name."
required: false
current_branch:
description: "Current branch name for auto-detection"
required: true
experimental_mode:
description: "Enable experimental mode with timestamp-based versions"
required: false
default: "false"
outputs:
version:
description: "Resolved Docker-compatible SemVer version"
value: ${{ steps.resolve.outputs.version }}
source:
description: "Source of version (release|override|branch)"
value: ${{ steps.resolve.outputs.source }}
normalized:
description: "Whether the version was normalized (true/false)"
value: ${{ steps.resolve.outputs.normalized }}
runs:
using: "composite"
steps:
- name: Resolve and validate Docker version
id: resolve
shell: bash
env:
EXPLICIT_VERSION: ${{ inputs.version }}
CURRENT_BRANCH: ${{ inputs.current_branch }}
EXPERIMENTAL_MODE: ${{ inputs.experimental_mode }}
run: |
set -euo pipefail
# Function to validate SemVer format (Docker-compatible, no '+' build metadata)
validate_semver() {
local version="$1"
local context="$2"
if [[ ! "$version" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
echo "ERROR: Invalid $context format. Must be semver without build metadata (e.g., 1.2.3, 1.2.3-alpha)"
echo "Provided: $version"
echo "Note: Docker tags cannot contain '+' characters. Use prerelease identifiers instead."
exit 1
fi
}
# Function to generate branch-based version
generate_branch_version() {
local branch="$1"
local use_timestamp="${2:-true}"
local timestamp
if [[ "$use_timestamp" == "true" ]]; then
timestamp=$(date +%s)
else
timestamp=""
fi
# Sanitize branch name for Docker compatibility
local sanitized_branch=$(echo "$branch" | sed 's/[^a-zA-Z0-9.-]/-/g' | sed 's/--*/-/g' | sed 's/^-\|-$//g')
# Additional safety: truncate if too long (reserve space for prefix and timestamp)
if (( ${#sanitized_branch} > 80 )); then
sanitized_branch="${sanitized_branch:0:80}"
echo "INFO: Branch name truncated for Docker compatibility" >&2
fi
local version
# Generate version based on branch name (unified approach)
# All branches get alpha versions with sanitized branch name
if [[ -n "$timestamp" ]]; then
version="0.0.0-alpha-$sanitized_branch-$timestamp"
echo "INFO: Branch '$branch' detected - alpha version: $version" >&2
else
version="0.0.0-alpha-$sanitized_branch"
echo "INFO: Branch '$branch' detected - alpha version: $version" >&2
fi
echo "$version"
}
# Input validation and sanitization
if [[ -z "$CURRENT_BRANCH" ]]; then
echo "ERROR: current_branch input is required"
exit 1
fi
# Security: Validate inputs to prevent command injection
# Use grep to check for dangerous characters (more reliable than bash regex)
validate_input() {
local input="$1"
local name="$2"
# Check for dangerous characters using grep
if echo "$input" | grep -q '[;|&`$(){}\\[:space:]]'; then
echo "ERROR: $name contains potentially dangerous characters: $input"
echo "Input should only contain letters, numbers, hyphens, underscores, dots, and forward slashes"
return 1
fi
return 0
}
# Validate current branch
if ! validate_input "$CURRENT_BRANCH" "Branch name"; then
exit 1
fi
# Validate explicit version if provided
if [[ -n "$EXPLICIT_VERSION" ]] && ! validate_input "$EXPLICIT_VERSION" "Explicit version"; then
exit 1
fi
# Main resolution logic (ultra-simplified)
NORMALIZED="false"
if [[ -n "$EXPLICIT_VERSION" ]]; then
# Use provided explicit version (from either workflow_call or manual input)
validate_semver "$EXPLICIT_VERSION" "explicit version"
# Normalize to lowercase for Docker/ECR compatibility
RESOLVED_VERSION="${EXPLICIT_VERSION,,}"
if [[ "$EXPLICIT_VERSION" != "$RESOLVED_VERSION" ]]; then
NORMALIZED="true"
echo "INFO: Original version contained uppercase characters, normalized: $EXPLICIT_VERSION -> $RESOLVED_VERSION"
fi
SOURCE="explicit"
echo "INFO: Using explicit version: $RESOLVED_VERSION"
else
# Auto-generate version from branch name
if [[ "$EXPERIMENTAL_MODE" == "true" ]]; then
# Use timestamped version generation
echo "INFO: Experimental mode: generating timestamped version from branch: $CURRENT_BRANCH"
RESOLVED_VERSION=$(generate_branch_version "$CURRENT_BRANCH" "true")
SOURCE="experimental"
else
# Standard branch version (no timestamp)
echo "INFO: Auto-detecting version from branch: $CURRENT_BRANCH"
RESOLVED_VERSION=$(generate_branch_version "$CURRENT_BRANCH" "false")
SOURCE="branch"
fi
echo "Generated version: $RESOLVED_VERSION"
fi
# Final validation - ensure result is valid Docker tag
if [[ -z "$RESOLVED_VERSION" ]]; then
echo "ERROR: Failed to resolve version"
exit 1
fi
if (( ${#RESOLVED_VERSION} > 128 )); then
echo "ERROR: Version must be at most 128 characters (Docker limitation)"
echo "Generated version: $RESOLVED_VERSION (${#RESOLVED_VERSION} chars)"
exit 1
fi
if [[ ! "$RESOLVED_VERSION" =~ ^[a-z0-9._-]+$ ]]; then
echo "ERROR: Version contains invalid characters for Docker tags"
echo "Version: $RESOLVED_VERSION"
exit 1
fi
if [[ "$RESOLVED_VERSION" =~ ^[.-] || "$RESOLVED_VERSION" =~ [.-]$ ]]; then
echo "ERROR: Version must not start or end with '.' or '-'"
echo "Version: $RESOLVED_VERSION"
exit 1
fi
# Output results
echo "SUCCESS: Resolved Docker version: $RESOLVED_VERSION (source: $SOURCE)"
echo "version=$RESOLVED_VERSION" >> $GITHUB_OUTPUT
echo "source=$SOURCE" >> $GITHUB_OUTPUT
echo "normalized=$NORMALIZED" >> $GITHUB_OUTPUT

View File

@@ -0,0 +1,160 @@
name: Update Package Version
description: |
Safely updates package.json version with comprehensive validation and atomic operations.
Security Features:
- Path traversal protection
- SemVer validation with length limits
- Atomic file operations with backup/recovery
- JSON validation before applying changes
This action is designed to be secure by default and prevent common attack vectors.
inputs:
version:
description: "Version to set in package.json (must be valid SemVer)"
required: true
package_path:
description: "Path to package.json file"
required: false
default: "./apps/web/package.json"
outputs:
updated_version:
description: "The version that was actually set in package.json"
value: ${{ steps.update.outputs.updated_version }}
runs:
using: "composite"
steps:
- name: Update and verify package.json version
id: update
shell: bash
env:
VERSION: ${{ inputs.version }}
PACKAGE_PATH: ${{ inputs.package_path }}
run: |
set -euo pipefail
# Validate inputs
if [[ -z "$VERSION" ]]; then
echo "ERROR: version input is required"
exit 1
fi
# Security: Validate package_path to prevent path traversal attacks
# Only allow paths within the workspace and must end with package.json
if [[ "$PACKAGE_PATH" =~ \.\./|^/|^~ ]]; then
echo "ERROR: Invalid package path - path traversal detected: $PACKAGE_PATH"
echo "Package path must be relative to workspace root and cannot contain '../', start with '/', or '~'"
exit 1
fi
if [[ ! "$PACKAGE_PATH" =~ package\.json$ ]]; then
echo "ERROR: Package path must end with 'package.json': $PACKAGE_PATH"
exit 1
fi
# Resolve to absolute path within workspace for additional security
WORKSPACE_ROOT="${GITHUB_WORKSPACE:-$(pwd)}"
# Use realpath to resolve both paths and handle symlinks properly
WORKSPACE_ROOT=$(realpath "$WORKSPACE_ROOT")
RESOLVED_PATH=$(realpath "${WORKSPACE_ROOT}/${PACKAGE_PATH}")
# Ensure WORKSPACE_ROOT has a trailing slash for proper prefix matching
WORKSPACE_ROOT="${WORKSPACE_ROOT}/"
# Use shell string matching to ensure RESOLVED_PATH is within workspace
# This is more secure than regex and handles edge cases properly
if [[ "$RESOLVED_PATH" != "$WORKSPACE_ROOT"* ]]; then
echo "ERROR: Resolved path is outside workspace: $RESOLVED_PATH"
echo "Workspace root: $WORKSPACE_ROOT"
exit 1
fi
if [[ ! -f "$RESOLVED_PATH" ]]; then
echo "ERROR: package.json not found at: $RESOLVED_PATH"
exit 1
fi
# Use resolved path for operations
PACKAGE_PATH="$RESOLVED_PATH"
# Validate SemVer format with additional security checks
if [[ ${#VERSION} -gt 128 ]]; then
echo "ERROR: Version string too long (${#VERSION} chars, max 128): $VERSION"
exit 1
fi
if [[ ! "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
echo "ERROR: Invalid SemVer format: $VERSION"
echo "Expected format: MAJOR.MINOR.PATCH[-PRERELEASE]"
echo "Only alphanumeric characters, dots, and hyphens allowed in prerelease"
exit 1
fi
# Additional validation: Check for reasonable version component sizes
# Extract base version (MAJOR.MINOR.PATCH) without prerelease/build metadata
if [[ "$VERSION" =~ ^([0-9]+\.[0-9]+\.[0-9]+) ]]; then
BASE_VERSION="${BASH_REMATCH[1]}"
else
echo "ERROR: Could not extract base version from: $VERSION"
exit 1
fi
# Split version components safely
IFS='.' read -ra VERSION_PARTS <<< "$BASE_VERSION"
# Validate component sizes (should have exactly 3 parts due to regex above)
if (( ${VERSION_PARTS[0]} > 999 || ${VERSION_PARTS[1]} > 999 || ${VERSION_PARTS[2]} > 999 )); then
echo "ERROR: Version components too large (max 999 each): $VERSION"
echo "Components: ${VERSION_PARTS[0]}.${VERSION_PARTS[1]}.${VERSION_PARTS[2]}"
exit 1
fi
echo "Updating package.json version to: $VERSION"
# Create backup for atomic operations
BACKUP_PATH="${PACKAGE_PATH}.backup.$$"
cp "$PACKAGE_PATH" "$BACKUP_PATH"
# Use jq to safely update the version field with error handling
if ! jq --arg version "$VERSION" '.version = $version' "$PACKAGE_PATH" > "${PACKAGE_PATH}.tmp"; then
echo "ERROR: jq failed to process package.json"
rm -f "${PACKAGE_PATH}.tmp" "$BACKUP_PATH"
exit 1
fi
# Validate the generated JSON before applying changes
if ! jq empty "${PACKAGE_PATH}.tmp" 2>/dev/null; then
echo "ERROR: Generated invalid JSON"
rm -f "${PACKAGE_PATH}.tmp" "$BACKUP_PATH"
exit 1
fi
# Atomic move operation
if ! mv "${PACKAGE_PATH}.tmp" "$PACKAGE_PATH"; then
echo "ERROR: Failed to update package.json"
# Restore backup
mv "$BACKUP_PATH" "$PACKAGE_PATH"
exit 1
fi
# Verify the update was successful
UPDATED_VERSION=$(jq -r '.version' "$PACKAGE_PATH" 2>/dev/null)
if [[ "$UPDATED_VERSION" != "$VERSION" ]]; then
echo "ERROR: Version update failed!"
echo "Expected: $VERSION"
echo "Actual: $UPDATED_VERSION"
# Restore backup
mv "$BACKUP_PATH" "$PACKAGE_PATH"
exit 1
fi
# Clean up backup on success
rm -f "$BACKUP_PATH"
echo "SUCCESS: Updated package.json version to: $UPDATED_VERSION"
echo "updated_version=$UPDATED_VERSION" >> $GITHUB_OUTPUT

View File

@@ -1,104 +0,0 @@
name: "Upload Sentry Sourcemaps"
description: "Extract sourcemaps from Docker image and upload to Sentry"
inputs:
docker_image:
description: "Docker image to extract sourcemaps from"
required: true
release_version:
description: "Sentry release version (e.g., v1.2.3)"
required: true
sentry_auth_token:
description: "Sentry authentication token"
required: true
environment:
description: "Sentry environment (e.g., production, staging)"
required: false
default: "staging"
runs:
using: "composite"
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Extract sourcemaps from Docker image
shell: bash
env:
DOCKER_IMAGE: ${{ inputs.docker_image }}
run: |
set -euo pipefail
# Validate docker image format (basic validation)
if [[ ! "$DOCKER_IMAGE" =~ ^[a-zA-Z0-9._/-]+:[a-zA-Z0-9._-]+$ ]] && [[ ! "$DOCKER_IMAGE" =~ ^[a-zA-Z0-9._/-]+@sha256:[A-Fa-f0-9]{64}$ ]]; then
echo "❌ Error: Invalid docker image format. Must be in format 'image:tag' or 'image@sha256:hash'"
echo "Provided: ${DOCKER_IMAGE}"
exit 1
fi
echo "📦 Extracting sourcemaps from Docker image: ${DOCKER_IMAGE}"
# Create temporary container from the image and capture its ID
echo "Creating temporary container..."
CONTAINER_ID=$(docker create "$DOCKER_IMAGE")
echo "Container created with ID: ${CONTAINER_ID}"
# Set up cleanup function to ensure container is removed on script exit
cleanup_container() {
# Capture the current exit code to preserve it
local original_exit_code=$?
echo "🧹 Cleaning up Docker container..."
# Remove the container if it exists (ignore errors if already removed)
if [ -n "$CONTAINER_ID" ]; then
docker rm -f "$CONTAINER_ID" 2>/dev/null || true
echo "Container ${CONTAINER_ID} removed"
fi
# Exit with the original exit code to preserve script success/failure status
exit $original_exit_code
}
# Register cleanup function to run on script exit (success or failure)
trap cleanup_container EXIT
# Extract .next directory containing sourcemaps
docker cp "$CONTAINER_ID:/home/nextjs/apps/web/.next" ./extracted-next
# Verify sourcemaps exist
if [ ! -d "./extracted-next/static/chunks" ]; then
echo "❌ Error: .next/static/chunks directory not found in Docker image"
echo "Expected structure: /home/nextjs/apps/web/.next/static/chunks/"
exit 1
fi
sourcemap_count=$(find ./extracted-next/static/chunks -name "*.map" | wc -l)
echo "✅ Found ${sourcemap_count} sourcemap files"
if [ "$sourcemap_count" -eq 0 ]; then
echo "❌ Error: No sourcemap files found. Check that productionBrowserSourceMaps is enabled."
exit 1
fi
- name: Create Sentry release and upload sourcemaps
uses: getsentry/action-release@v3
env:
SENTRY_AUTH_TOKEN: ${{ inputs.sentry_auth_token }}
SENTRY_ORG: formbricks
SENTRY_PROJECT: formbricks-cloud
with:
environment: ${{ inputs.environment }}
version: ${{ inputs.release_version }}
sourcemaps: "./extracted-next/"
- name: Clean up extracted files
shell: bash
if: always()
run: |
set -euo pipefail
# Clean up extracted files
rm -rf ./extracted-next
echo "🧹 Cleaned up extracted files"

View File

@@ -1,32 +0,0 @@
# Testing Instructions
When generating test files inside the "/app/web" path, follow these rules:
- You are an experienced senior software engineer
- Use vitest
- Ensure 100% code coverage
- Add as few comments as possible
- The test file should be located in the same folder as the original file
- Use the `test` function instead of `it`
- Follow the same test pattern used for other files in the package where the file is located
- All imports should be at the top of the file, not inside individual tests
- For mocking inside "test" blocks use "vi.mocked"
- If the file is located in the "packages/survey" path, use "@testing-library/preact" instead of "@testing-library/react"
- Don't mock functions that are already mocked in the "apps/web/vitestSetup.ts" file
- When using "screen.getByText" check for the tolgee string if it is being used in the file.
- The types for mocked variables can be found in the "packages/types" path. Be sure that every imported type exists before using it. Don't create types that are not already in the codebase.
- When mocking data check if the properties added are part of the type of the object being mocked. Only specify known properties, don't use properties that are not part of the type.
If it's a test for a ".tsx" file, follow these extra instructions:
- Add this code inside the "describe" block and before any test:
afterEach(() => {
cleanup();
});
- The "afterEach" function should only have the "cleanup()" line inside it and should be adde to the "vitest" imports.
- For click events, import userEvent from "@testing-library/user-event"
- Mock other components that can make the text more complex and but at the same time mocking it wouldn't make the test flaky. It's ok to leave basic and simple components.
- You don't need to mock @tolgee/react
- Use "import "@testing-library/jest-dom/vitest";"

View File

@@ -1,12 +1,49 @@
name: Build & Push Docker to ECR
name: Build Cloud Deployment Images
# This workflow builds Formbricks Docker images for ECR deployment:
# - workflow_call: Used by releases with explicit SemVer versions
# - workflow_dispatch: Auto-detects version from current branch or uses override
on:
workflow_dispatch:
inputs:
version_override:
description: "Override version (SemVer only, e.g., 1.2.3). Leave empty to auto-detect from branch."
required: false
type: string
deploy_production:
description: "Tag image for production deployment"
required: false
default: false
type: boolean
deploy_staging:
description: "Tag image for staging deployment"
required: false
default: false
type: boolean
workflow_call:
inputs:
image_tag:
description: "Image tag to push (e.g., v3.16.1)"
description: "Image tag to push (required for workflow_call)"
required: true
default: "v3.16.1"
type: string
IS_PRERELEASE:
description: "Whether this is a prerelease (auto-tags for staging/production)"
required: false
type: boolean
default: false
MAKE_LATEST:
description: "Whether to tag for production (from GitHub release 'Set as the latest release' option)"
required: false
type: boolean
default: false
outputs:
IMAGE_TAG:
description: "Normalized image tag used for the build"
value: ${{ jobs.build-and-push.outputs.IMAGE_TAG }}
TAGS:
description: "Newline-separated list of ECR tags pushed"
value: ${{ jobs.build-and-push.outputs.TAGS }}
permissions:
contents: read
@@ -17,14 +54,15 @@ env:
# ECR settings are sourced from repository/environment variables for portability across envs/forks
ECR_REGISTRY: ${{ vars.ECR_REGISTRY }}
ECR_REPOSITORY: ${{ vars.ECR_REPOSITORY }}
DOCKERFILE: apps/web/Dockerfile
CONTEXT: .
jobs:
build-and-push:
name: Build and Push
runs-on: ubuntu-latest
timeout-minutes: 45
outputs:
IMAGE_TAG: ${{ steps.build.outputs.image_tag }}
TAGS: ${{ steps.build.outputs.registry_tags }}
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
@@ -34,66 +72,23 @@ jobs:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Validate image tag input
shell: bash
env:
IMAGE_TAG: ${{ inputs.image_tag }}
run: |
set -euo pipefail
if [[ -z "${IMAGE_TAG}" ]]; then
echo "❌ Image tag is required (non-empty)."
exit 1
fi
if (( ${#IMAGE_TAG} > 128 )); then
echo "❌ Image tag must be at most 128 characters."
exit 1
fi
if [[ ! "${IMAGE_TAG}" =~ ^[a-z0-9._-]+$ ]]; then
echo "❌ Image tag may only contain lowercase letters, digits, '.', '_' and '-'."
exit 1
fi
if [[ "${IMAGE_TAG}" =~ ^[.-] || "${IMAGE_TAG}" =~ [.-]$ ]]; then
echo "❌ Image tag must not start or end with '.' or '-'."
exit 1
fi
- name: Validate required variables
shell: bash
env:
ECR_REGISTRY: ${{ env.ECR_REGISTRY }}
ECR_REPOSITORY: ${{ env.ECR_REPOSITORY }}
ECR_REGION: ${{ env.ECR_REGION }}
run: |
set -euo pipefail
if [[ -z "${ECR_REGISTRY}" || -z "${ECR_REPOSITORY}" || -z "${ECR_REGION}" ]]; then
echo "ECR_REGION, ECR_REGISTRY and ECR_REPOSITORY must be set via repository or environment variables (Settings → Variables)."
exit 1
fi
- name: Configure AWS credentials (OIDC)
uses: aws-actions/configure-aws-credentials@7474bc4690e29a8392af63c5b98e7449536d5c3a
- name: Build and push cloud deployment image
id: build
uses: ./.github/actions/build-and-push-docker
with:
role-to-assume: ${{ secrets.AWS_ECR_PUSH_ROLE_ARN }}
aws-region: ${{ env.ECR_REGION }}
- name: Log in to Amazon ECR
uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076
- name: Set up Depot CLI
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
- name: Build and push image (Depot remote builder)
uses: depot/build-push-action@636daae76684e38c301daa0c5eca1c095b24e780 # v1.14.0
with:
project: tw0fqmsx3c
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
context: ${{ env.CONTEXT }}
file: ${{ env.DOCKERFILE }}
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ env.ECR_REGISTRY }}/${{ env.ECR_REPOSITORY }}:${{ inputs.image_tag }}
${{ env.ECR_REGISTRY }}/${{ env.ECR_REPOSITORY }}:latest
secrets: |
database_url=${{ secrets.DUMMY_DATABASE_URL }}
encryption_key=${{ secrets.DUMMY_ENCRYPTION_KEY }}
registry_type: "ecr"
ecr_registry: ${{ env.ECR_REGISTRY }}
ecr_repository: ${{ env.ECR_REPOSITORY }}
ecr_region: ${{ env.ECR_REGION }}
aws_role_arn: ${{ secrets.AWS_ECR_PUSH_ROLE_ARN }}
version: ${{ inputs.version_override || inputs.image_tag }}
deploy_production: ${{ inputs.deploy_production }}
deploy_staging: ${{ inputs.deploy_staging }}
is_prerelease: ${{ inputs.IS_PRERELEASE }}
make_latest: ${{ inputs.MAKE_LATEST }}
env:
DEPOT_PROJECT_TOKEN: ${{ secrets.DEPOT_PROJECT_TOKEN }}
DUMMY_DATABASE_URL: ${{ secrets.DUMMY_DATABASE_URL }}
DUMMY_ENCRYPTION_KEY: ${{ secrets.DUMMY_ENCRYPTION_KEY }}
DUMMY_REDIS_URL: ${{ secrets.DUMMY_REDIS_URL }}
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}

View File

@@ -13,13 +13,12 @@ jobs:
chromatic:
name: Run Chromatic
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
packages: write
id-token: write
actions: read
contents: read
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit
@@ -27,16 +26,34 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0
- name: Setup Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
with:
node-version: 20
- name: Install pnpm
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install dependencies
run: pnpm install --config.platform=linux --config.architecture=x64
- name: Run Chromatic
uses: chromaui/action@c93e0bc3a63aa176e14a75b61a31847cbfdd341c # latest
uses: chromaui/action@4c20b95e9d3209ecfdf9cd6aace6bbde71ba1694 # v13.3.4
with:
# ⚠️ Make sure to configure a `CHROMATIC_PROJECT_TOKEN` repository secret
projectToken: ${{ secrets.CHROMATIC_PROJECT_TOKEN }}
workingDir: apps/storybook
zip: true

View File

@@ -4,7 +4,7 @@ on:
workflow_dispatch:
inputs:
VERSION:
description: "The version of the Docker image to release, full image tag if image tag is v0.0.0 enter v0.0.0."
description: "The version of the Docker image to release (clean SemVer, e.g., 1.2.3)"
required: true
type: string
REPOSITORY:

View File

@@ -21,10 +21,10 @@ jobs:
name: Validate Docker Build
runs-on: ubuntu-latest
# Add PostgreSQL service container
# Add PostgreSQL and Redis service containers
services:
postgres:
image: pgvector/pgvector:pg17
image: pgvector/pgvector@sha256:9ae02a756ba16a2d69dd78058e25915e36e189bb36ddf01ceae86390d7ed786a
env:
POSTGRES_USER: test
POSTGRES_PASSWORD: test
@@ -38,6 +38,11 @@ jobs:
--health-timeout 5s
--health-retries 5
redis:
image: valkey/valkey@sha256:12ba4f45a7c3e1d0f076acd616cb230834e75a77e8516dde382720af32832d6d
ports:
- 6379:6379
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
@@ -67,6 +72,7 @@ jobs:
secrets: |
database_url=${{ secrets.DUMMY_DATABASE_URL }}
encryption_key=${{ secrets.DUMMY_ENCRYPTION_KEY }}
redis_url=redis://localhost:6379
- name: Verify and Initialize PostgreSQL
run: |
@@ -96,6 +102,29 @@ jobs:
echo "Network configuration:"
netstat -tulpn | grep 5432 || echo "No process listening on port 5432"
- name: Verify Redis/Valkey Connection
run: |
echo "Verifying Redis/Valkey connection..."
# Install Redis client to test connection
sudo apt-get update && sudo apt-get install -y redis-tools
# Test connection using redis-cli with timeout and proper error handling
echo "Testing Redis connection with 30 second timeout..."
if timeout 30 bash -c 'until redis-cli -h localhost -p 6379 ping >/dev/null 2>&1; do
echo "Waiting for Redis to be ready..."
sleep 2
done'; then
echo "✅ Redis connection successful"
redis-cli -h localhost -p 6379 info server | head -5
else
echo "❌ Redis connection failed after 30 seconds"
exit 1
fi
# Show network configuration for Redis
echo "Redis network configuration:"
netstat -tulpn | grep 6379 || echo "No process listening on port 6379"
- name: Test Docker Image with Health Check
shell: bash
env:
@@ -113,6 +142,7 @@ jobs:
-p 3000:3000 \
-e DATABASE_URL="postgresql://test:test@host.docker.internal:5432/formbricks" \
-e ENCRYPTION_KEY="$DUMMY_ENCRYPTION_KEY" \
-e REDIS_URL="redis://host.docker.internal:6379" \
-d "formbricks-test:$GITHUB_SHA"
# Start health check polling immediately (every 5 seconds for up to 5 minutes)

View File

@@ -3,26 +3,20 @@ name: E2E Tests
on:
workflow_call:
secrets:
AZURE_CLIENT_ID:
required: false
AZURE_TENANT_ID:
required: false
AZURE_SUBSCRIPTION_ID:
required: false
PLAYWRIGHT_SERVICE_URL:
required: false
PLAYWRIGHT_SERVICE_ACCESS_TOKEN:
required: false
ENTERPRISE_LICENSE_KEY:
required: true
# Add other secrets if necessary
workflow_dispatch:
env:
TELEMETRY_DISABLED: 1
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}
TURBO_TEAM: ${{ vars.TURBO_TEAM }}
permissions:
id-token: write
contents: read
actions: read
@@ -33,7 +27,7 @@ jobs:
timeout-minutes: 60
services:
postgres:
image: pgvector/pgvector:pg17
image: pgvector/pgvector@sha256:9ae02a756ba16a2d69dd78058e25915e36e189bb36ddf01ceae86390d7ed786a
env:
POSTGRES_DB: postgres
POSTGRES_USER: postgres
@@ -41,27 +35,23 @@ jobs:
ports:
- 5432:5432
options: >-
--health-cmd="pg_isready -U testuser"
--health-cmd="pg_isready -U postgres"
--health-interval=10s
--health-timeout=5s
--health-retries=5
valkey:
image: valkey/valkey:8.1.1
image: valkey/valkey@sha256:12ba4f45a7c3e1d0f076acd616cb230834e75a77e8516dde382720af32832d6d
ports:
- 6379:6379
options: >-
--entrypoint "valkey-server"
--health-cmd="valkey-cli ping"
--health-interval=10s
--health-timeout=5s
--health-retries=5
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: allow
egress-policy: audit
allowed-endpoints: |
ee.formbricks.com:443
registry-1.docker.io:443
docker.io:443
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: ./.github/actions/dangerous-git-checkout
@@ -92,8 +82,69 @@ jobs:
sed -i "s|REDIS_URL=.*|REDIS_URL=redis://localhost:6379|" .env
echo "" >> .env
echo "E2E_TESTING=1" >> .env
echo "S3_REGION=us-east-1" >> .env
echo "S3_BUCKET_NAME=formbricks-e2e" >> .env
echo "S3_ENDPOINT_URL=http://localhost:9000" >> .env
echo "S3_ACCESS_KEY=devminio" >> .env
echo "S3_SECRET_KEY=devminio123" >> .env
echo "S3_FORCE_PATH_STYLE=1" >> .env
shell: bash
- name: Install MinIO client (mc)
run: |
set -euo pipefail
MC_VERSION="RELEASE.2025-08-13T08-35-41Z"
MC_BASE="https://dl.min.io/client/mc/release/linux-amd64/archive"
MC_BIN="mc.${MC_VERSION}"
MC_SUM="${MC_BIN}.sha256sum"
curl -fsSL "${MC_BASE}/${MC_BIN}" -o "${MC_BIN}"
curl -fsSL "${MC_BASE}/${MC_SUM}" -o "${MC_SUM}"
sha256sum -c "${MC_SUM}"
chmod +x "${MC_BIN}"
sudo mv "${MC_BIN}" /usr/local/bin/mc
- name: Start MinIO Server
run: |
set -euo pipefail
# Start MinIO server in background
docker run -d \
--name minio-server \
-p 9000:9000 \
-p 9001:9001 \
-e MINIO_ROOT_USER=devminio \
-e MINIO_ROOT_PASSWORD=devminio123 \
minio/minio:RELEASE.2025-09-07T16-13-09Z \
server /data --console-address :9001
echo "MinIO server started"
- name: Wait for MinIO and create S3 bucket
run: |
set -euo pipefail
echo "Waiting for MinIO to be ready..."
ready=0
for i in {1..60}; do
if curl -fsS http://localhost:9000/minio/health/live >/dev/null; then
echo "MinIO is up after ${i} seconds"
ready=1
break
fi
sleep 1
done
if [ "$ready" -ne 1 ]; then
echo "::error::MinIO did not become ready within 60 seconds"
exit 1
fi
mc alias set local http://localhost:9000 devminio devminio123
mc mb --ignore-existing local/formbricks-e2e
- name: Build App
run: |
pnpm build --filter=@formbricks/web...
@@ -109,6 +160,12 @@ jobs:
cd apps/web && pnpm vitest run modules/core/rate-limit/rate-limit-load.test.ts
shell: bash
- name: Run Cache Integration Tests
run: |
echo "Running cache integration tests with Redis/Valkey..."
cd packages/cache && pnpm vitest run src/cache-integration.test.ts
shell: bash
- name: Check for Enterprise License
run: |
LICENSE_KEY=$(grep '^ENTERPRISE_LICENSE_KEY=' .env | cut -d'=' -f2-)
@@ -118,6 +175,12 @@ jobs:
fi
echo "License key length: ${#LICENSE_KEY}"
- name: Disable rate limiting for E2E tests
run: |
echo "RATE_LIMITING_DISABLED=1" >> .env
echo "Rate limiting disabled for E2E tests"
shell: bash
- name: Run App
run: |
echo "Starting app with enterprise license..."
@@ -139,31 +202,32 @@ jobs:
- name: Install Playwright
run: pnpm exec playwright install --with-deps
- name: Set Azure Secret Variables
run: |
if [[ -n "${{ secrets.AZURE_CLIENT_ID }}" && -n "${{ secrets.AZURE_TENANT_ID }}" && -n "${{ secrets.AZURE_SUBSCRIPTION_ID }}" ]]; then
echo "AZURE_ENABLED=true" >> $GITHUB_ENV
else
echo "AZURE_ENABLED=false" >> $GITHUB_ENV
fi
- name: Azure login
if: env.AZURE_ENABLED == 'true'
uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0
with:
client-id: ${{ secrets.AZURE_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Run E2E Tests (Azure)
if: env.AZURE_ENABLED == 'true'
- name: Determine Playwright execution mode
shell: bash
env:
PLAYWRIGHT_SERVICE_URL: ${{ secrets.PLAYWRIGHT_SERVICE_URL }}
PLAYWRIGHT_SERVICE_ACCESS_TOKEN: ${{ secrets.PLAYWRIGHT_SERVICE_ACCESS_TOKEN }}
run: |
pnpm test-e2e:azure
set -euo pipefail
if [[ -n "${PLAYWRIGHT_SERVICE_URL}" && -n "${PLAYWRIGHT_SERVICE_ACCESS_TOKEN}" ]]; then
echo "PW_MODE=service" >> "$GITHUB_ENV"
else
echo "PW_MODE=local" >> "$GITHUB_ENV"
fi
- name: Run E2E Tests (Playwright Service)
if: env.PW_MODE == 'service'
env:
PLAYWRIGHT_SERVICE_URL: ${{ secrets.PLAYWRIGHT_SERVICE_URL }}
PLAYWRIGHT_SERVICE_ACCESS_TOKEN: ${{ secrets.PLAYWRIGHT_SERVICE_ACCESS_TOKEN }}
CI: true
run: pnpm test-e2e:azure
- name: Run E2E Tests (Local)
if: env.AZURE_ENABLED == 'false'
if: env.PW_MODE == 'local'
env:
CI: true
run: |
pnpm test:e2e

View File

@@ -8,16 +8,103 @@ permissions:
contents: read
jobs:
docker-build:
name: Build & release docker image
check-latest-release:
name: Check if this is the latest release
runs-on: ubuntu-latest
timeout-minutes: 5
permissions:
contents: read
outputs:
is_latest: ${{ steps.compare_tags.outputs.is_latest }}
# This job determines if the current release was marked as "Set as the latest release"
# by comparing it with the latest release from GitHub API
steps:
- name: Harden the runner
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit
- name: Get latest release tag from API
id: get_latest_release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO: ${{ github.repository }}
run: |
set -euo pipefail
# Get the latest release tag from GitHub API with error handling
echo "Fetching latest release from GitHub API..."
# Use curl with error handling - API returns 404 if no releases exist
http_code=$(curl -s -w "%{http_code}" -H "Authorization: token ${GITHUB_TOKEN}" \
"https://api.github.com/repos/${REPO}/releases/latest" -o /tmp/latest_release.json)
if [[ "$http_code" == "404" ]]; then
echo "⚠️ No previous releases found (404). This appears to be the first release."
echo "latest_release=" >> $GITHUB_OUTPUT
elif [[ "$http_code" == "200" ]]; then
latest_release=$(jq -r .tag_name /tmp/latest_release.json)
if [[ "$latest_release" == "null" || -z "$latest_release" ]]; then
echo "⚠️ API returned null/empty tag_name. Treating as first release."
echo "latest_release=" >> $GITHUB_OUTPUT
else
echo "Latest release from API: ${latest_release}"
echo "latest_release=${latest_release}" >> $GITHUB_OUTPUT
fi
else
echo "❌ GitHub API error (HTTP ${http_code}). Treating as first release."
echo "latest_release=" >> $GITHUB_OUTPUT
fi
echo "Current release tag: ${{ github.event.release.tag_name }}"
- name: Compare release tags
id: compare_tags
env:
CURRENT_TAG: ${{ github.event.release.tag_name }}
LATEST_TAG: ${{ steps.get_latest_release.outputs.latest_release }}
run: |
set -euo pipefail
# Handle first release case (no previous releases)
if [[ -z "${LATEST_TAG}" ]]; then
echo "🎉 This is the first release (${CURRENT_TAG}) - treating as latest"
echo "is_latest=true" >> $GITHUB_OUTPUT
elif [[ "${CURRENT_TAG}" == "${LATEST_TAG}" ]]; then
echo "✅ This release (${CURRENT_TAG}) is marked as the latest release"
echo "is_latest=true" >> $GITHUB_OUTPUT
else
echo " This release (${CURRENT_TAG}) is not the latest release (latest: ${LATEST_TAG})"
echo "is_latest=false" >> $GITHUB_OUTPUT
fi
docker-build-community:
name: Build & release community docker image
permissions:
contents: read
packages: write
id-token: write
uses: ./.github/workflows/release-docker-github.yml
secrets: inherit
needs:
- check-latest-release
with:
IS_PRERELEASE: ${{ github.event.release.prerelease }}
MAKE_LATEST: ${{ needs.check-latest-release.outputs.is_latest == 'true' }}
docker-build-cloud:
name: Build & push Formbricks Cloud to ECR
permissions:
contents: read
id-token: write
uses: ./.github/workflows/build-and-push-ecr.yml
secrets: inherit
with:
image_tag: ${{ needs.docker-build-community.outputs.VERSION }}
IS_PRERELEASE: ${{ github.event.release.prerelease }}
MAKE_LATEST: ${{ needs.check-latest-release.outputs.is_latest == 'true' }}
needs:
- check-latest-release
- docker-build-community
helm-chart-release:
name: Release Helm Chart
@@ -27,48 +114,44 @@ jobs:
uses: ./.github/workflows/release-helm-chart.yml
secrets: inherit
needs:
- docker-build
- docker-build-community
with:
VERSION: ${{ needs.docker-build.outputs.VERSION }}
VERSION: ${{ needs.docker-build-community.outputs.VERSION }}
deploy-formbricks-cloud:
name: Deploy Helm Chart to Formbricks Cloud
permissions:
contents: read
id-token: write
secrets: inherit
uses: ./.github/workflows/deploy-formbricks-cloud.yml
needs:
- docker-build
- helm-chart-release
with:
VERSION: v${{ needs.docker-build.outputs.VERSION }}
ENVIRONMENT: ${{ github.event.release.prerelease && 'staging' || 'production' }}
upload-sentry-sourcemaps:
name: Upload Sentry Sourcemaps
verify-cloud-build:
name: Verify Cloud Build Outputs
runs-on: ubuntu-latest
permissions:
contents: read
timeout-minutes: 5 # Simple verification should be quick
needs:
- docker-build
- deploy-formbricks-cloud
- docker-build-cloud
steps:
- name: Harden the runner (Audit all outbound calls)
- name: Harden the runner
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Display ECR build outputs
env:
IMAGE_TAG: ${{ needs.docker-build-cloud.outputs.IMAGE_TAG }}
TAGS: ${{ needs.docker-build-cloud.outputs.TAGS }}
run: |
set -euo pipefail
- name: Upload Sentry Sourcemaps
uses: ./.github/actions/upload-sentry-sourcemaps
continue-on-error: true
with:
docker_image: ghcr.io/formbricks/formbricks:v${{ needs.docker-build.outputs.VERSION }}
release_version: v${{ needs.docker-build.outputs.VERSION }}
sentry_auth_token: ${{ secrets.SENTRY_AUTH_TOKEN }}
environment: ${{ github.event.release.prerelease && 'staging' || 'production' }}
echo "✅ ECR Build Completed Successfully"
echo "Image Tag: ${IMAGE_TAG}"
echo "ECR Tags:"
printf '%s\n' "${TAGS}"
move-stable-tag:
name: Move stable tag to release
permissions:
contents: write # Required for tag push operations in called workflow
uses: ./.github/workflows/move-stable-tag.yml
needs:
- check-latest-release
- docker-build-community # Ensure release is successful first
with:
release_tag: ${{ github.event.release.tag_name }}
commit_sha: ${{ github.sha }}
is_prerelease: ${{ github.event.release.prerelease }}
make_latest: ${{ needs.check-latest-release.outputs.is_latest == 'true' }}

101
.github/workflows/move-stable-tag.yml vendored Normal file
View File

@@ -0,0 +1,101 @@
name: Move Stable Tag
on:
workflow_call:
inputs:
release_tag:
description: "The release tag name (e.g., 1.2.3)"
required: true
type: string
commit_sha:
description: "The commit SHA to point the stable tag to"
required: true
type: string
is_prerelease:
description: "Whether this is a prerelease (stable tag won't be moved for prereleases)"
required: false
type: boolean
default: false
make_latest:
description: "Whether to move stable tag (from GitHub release 'Set as the latest release' option)"
required: false
type: boolean
default: false
permissions:
contents: read
# Prevent concurrent stable tag operations to avoid race conditions
concurrency:
group: move-stable-tag-${{ github.repository }}
cancel-in-progress: true
jobs:
move-stable-tag:
name: Move stable tag to release
runs-on: ubuntu-latest
timeout-minutes: 10 # Prevent hung git operations
permissions:
contents: write # Required to push tags
# Only move stable tag for non-prerelease versions AND when make_latest is true
if: ${{ !inputs.is_prerelease && inputs.make_latest }}
steps:
- name: Harden the runner
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0 # Full history needed for tag operations
- name: Validate inputs
env:
RELEASE_TAG: ${{ inputs.release_tag }}
COMMIT_SHA: ${{ inputs.commit_sha }}
run: |
set -euo pipefail
# Validate release tag format
if [[ ! "$RELEASE_TAG" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
echo "❌ Error: Invalid release tag format. Expected format: 1.2.3, 1.2.3-alpha"
echo "Provided: $RELEASE_TAG"
exit 1
fi
# Validate commit SHA format (40 character hex)
if [[ ! "$COMMIT_SHA" =~ ^[a-f0-9]{40}$ ]]; then
echo "❌ Error: Invalid commit SHA format. Expected 40 character hex string"
echo "Provided: $COMMIT_SHA"
exit 1
fi
echo "✅ Input validation passed"
echo "Release tag: $RELEASE_TAG"
echo "Commit SHA: $COMMIT_SHA"
- name: Move stable tag
env:
RELEASE_TAG: ${{ inputs.release_tag }}
COMMIT_SHA: ${{ inputs.commit_sha }}
run: |
set -euo pipefail
# Configure git
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
# Verify the commit exists
if ! git cat-file -e "$COMMIT_SHA"; then
echo "❌ Error: Commit $COMMIT_SHA does not exist in this repository"
exit 1
fi
# Move stable tag to the release commit
echo "📌 Moving stable tag to commit: $COMMIT_SHA (release: $RELEASE_TAG)"
git tag -f stable "$COMMIT_SHA"
git push origin stable --force
echo "✅ Successfully moved stable tag to release $RELEASE_TAG"
echo "🔗 Stable tag now points to: https://github.com/${{ github.repository }}/commit/$COMMIT_SHA"

159
.github/workflows/pr-size-check.yml vendored Normal file
View File

@@ -0,0 +1,159 @@
name: PR Size Check
on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
pull-requests: write
jobs:
check-pr-size:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Harden the runner
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit
- name: Checkout code
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Check PR size
id: check-size
run: |
set -euo pipefail
# Fetch the base branch
git fetch origin "${{ github.base_ref }}"
# Get diff stats
diff_output=$(git diff --numstat "origin/${{ github.base_ref }}"...HEAD)
# Count lines, excluding:
# - Test files (*.test.ts, *.spec.tsx, etc.)
# - Locale files (locales/*.json, i18n/*.json)
# - Lock files (pnpm-lock.yaml, package-lock.json, yarn.lock)
# - Generated files (dist/, coverage/, build/, .next/)
# - Storybook stories (*.stories.tsx)
total_additions=0
total_deletions=0
counted_files=0
excluded_files=0
while IFS=$'\t' read -r additions deletions file; do
# Skip if additions or deletions are "-" (binary files)
if [ "$additions" = "-" ] || [ "$deletions" = "-" ]; then
continue
fi
# Check if file should be excluded
case "$file" in
*.test.ts|*.test.tsx|*.spec.ts|*.spec.tsx|*.test.js|*.test.jsx|*.spec.js|*.spec.jsx)
excluded_files=$((excluded_files + 1))
continue
;;
*/locales/*.json|*/i18n/*.json)
excluded_files=$((excluded_files + 1))
continue
;;
pnpm-lock.yaml|package-lock.json|yarn.lock)
excluded_files=$((excluded_files + 1))
continue
;;
dist/*|coverage/*|build/*|node_modules/*|test-results/*|playwright-report/*|.next/*|*.tsbuildinfo)
excluded_files=$((excluded_files + 1))
continue
;;
*.stories.ts|*.stories.tsx|*.stories.js|*.stories.jsx)
excluded_files=$((excluded_files + 1))
continue
;;
esac
total_additions=$((total_additions + additions))
total_deletions=$((total_deletions + deletions))
counted_files=$((counted_files + 1))
done <<EOF
${diff_output}
EOF
total_changes=$((total_additions + total_deletions))
echo "counted_files=${counted_files}" >> "${GITHUB_OUTPUT}"
echo "excluded_files=${excluded_files}" >> "${GITHUB_OUTPUT}"
echo "total_additions=${total_additions}" >> "${GITHUB_OUTPUT}"
echo "total_deletions=${total_deletions}" >> "${GITHUB_OUTPUT}"
echo "total_changes=${total_changes}" >> "${GITHUB_OUTPUT}"
# Set flag if PR is too large (> 800 lines)
if [ ${total_changes} -gt 800 ]; then
echo "is_too_large=true" >> "${GITHUB_OUTPUT}"
else
echo "is_too_large=false" >> "${GITHUB_OUTPUT}"
fi
- name: Comment on PR if too large
if: steps.check-size.outputs.is_too_large == 'true'
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const totalChanges = ${{ steps.check-size.outputs.total_changes }};
const countedFiles = ${{ steps.check-size.outputs.counted_files }};
const excludedFiles = ${{ steps.check-size.outputs.excluded_files }};
const additions = ${{ steps.check-size.outputs.total_additions }};
const deletions = ${{ steps.check-size.outputs.total_deletions }};
const body = '## 🚨 PR Size Warning\n\n' +
'This PR has approximately **' + totalChanges + ' lines** of changes (' + additions + ' additions, ' + deletions + ' deletions across ' + countedFiles + ' files).\n\n' +
'Large PRs (>800 lines) are significantly harder to review and increase the chance of merge conflicts. Consider splitting this into smaller, self-contained PRs.\n\n' +
'### 💡 Suggestions:\n' +
'- **Split by feature or module** - Break down into logical, independent pieces\n' +
'- **Create a sequence of PRs** - Each building on the previous one\n' +
'- **Branch off PR branches** - Don\'t wait for reviews to continue dependent work\n\n' +
'### 📊 What was counted:\n' +
'- ✅ Source files, stylesheets, configuration files\n' +
'- ❌ Excluded ' + excludedFiles + ' files (tests, locales, locks, generated files)\n\n' +
'### 📚 Guidelines:\n' +
'- **Ideal:** 300-500 lines per PR\n' +
'- **Warning:** 500-800 lines\n' +
'- **Critical:** 800+ lines ⚠️\n\n' +
'If this large PR is unavoidable (e.g., migration, dependency update, major refactor), please explain in the PR description why it couldn\'t be split.';
// Check if we already commented
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const botComment = comments.find(comment =>
comment.user.type === 'Bot' &&
comment.body.includes('🚨 PR Size Warning')
);
if (botComment) {
// Update existing comment
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: body
});
} else {
// Create new comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: body
});
}

View File

@@ -1,41 +1,31 @@
name: Docker Release to Github Experimental
name: Build Community Testing Images
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# This workflow builds experimental/testing versions of Formbricks for self-hosting customers
# to test fixes and features before official releases. Images are pushed to GHCR with
# timestamped experimental versions for easy identification and testing.
on:
workflow_dispatch:
env:
# Use docker.io for Docker Hub if empty
REGISTRY: ghcr.io
# github.repository as <account>/<repo>
IMAGE_NAME: ${{ github.repository }}-experimental
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}
TURBO_TEAM: ${{ secrets.TURBO_TEAM }}
inputs:
version_override:
description: "Override version (SemVer only, e.g., 1.2.3-beta). Leave empty for auto-generated experimental version."
required: false
type: string
permissions:
contents: read
packages: write
id-token: write
jobs:
build:
build-community-testing:
name: Build Community Testing Image
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
# This is used to complete the identity challenge
# with sigstore/fulcio when running outside of PRs.
id-token: write
outputs:
DOCKER_IMAGE: ${{ steps.extract_image_info.outputs.DOCKER_IMAGE }}
RELEASE_VERSION: ${{ steps.extract_image_info.outputs.RELEASE_VERSION }}
timeout-minutes: 45
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit
@@ -44,151 +34,17 @@ jobs:
with:
fetch-depth: 0
- name: Generate SemVer version from branch or tag
id: generate_version
- name: Build and push community testing image
uses: ./.github/actions/build-and-push-docker
with:
registry_type: "ghcr"
ghcr_image_name: "${{ github.repository }}-experimental"
experimental_mode: "true"
version: ${{ inputs.version_override }}
env:
REF_NAME: ${{ github.ref_name }}
REF_TYPE: ${{ github.ref_type }}
run: |
# Get reference name and type from environment variables
echo "Reference type: $REF_TYPE"
echo "Reference name: $REF_NAME"
if [[ "$REF_TYPE" == "tag" ]]; then
# If running from a tag, use the tag name
if [[ "$REF_NAME" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+.*$ ]]; then
# Tag looks like a SemVer, use it directly (remove 'v' prefix if present)
VERSION=$(echo "$REF_NAME" | sed 's/^v//')
echo "Using SemVer tag: $VERSION"
else
# Tag is not SemVer, treat as prerelease
SANITIZED_TAG=$(echo "$REF_NAME" | sed 's/[^a-zA-Z0-9.-]/-/g' | sed 's/--*/-/g' | sed 's/^-\|-$//g')
VERSION="0.0.0-$SANITIZED_TAG"
echo "Using tag as prerelease: $VERSION"
fi
else
# Running from branch, use branch name as prerelease
SANITIZED_BRANCH=$(echo "$REF_NAME" | sed 's/[^a-zA-Z0-9.-]/-/g' | sed 's/--*/-/g' | sed 's/^-\|-$//g')
VERSION="0.0.0-$SANITIZED_BRANCH"
echo "Using branch as prerelease: $VERSION"
fi
echo "VERSION=$VERSION" >> $GITHUB_ENV
echo "VERSION=$VERSION" >> $GITHUB_OUTPUT
echo "Generated SemVer version: $VERSION"
- name: Update package.json version
run: |
sed -i "s/\"version\": \"0.0.0\"/\"version\": \"${{ env.VERSION }}\"/" ./apps/web/package.json
cat ./apps/web/package.json | grep version
- name: Set Sentry environment in .env
run: |
if ! grep -q "^SENTRY_ENVIRONMENT=staging$" .env 2>/dev/null; then
echo "SENTRY_ENVIRONMENT=staging" >> .env
echo "Added SENTRY_ENVIRONMENT=staging to .env file"
else
echo "SENTRY_ENVIRONMENT=staging already exists in .env file"
fi
- name: Set up Depot CLI
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
# Install the cosign tool except on PR
# https://github.com/sigstore/cosign-installer
- name: Install cosign
if: github.event_name != 'pull_request'
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
# Login against a Docker registry except on PR
# https://github.com/docker/login-action
- name: Log into registry ${{ env.REGISTRY }}
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Extract metadata (tags, labels) for Docker
# https://github.com/docker/metadata-action
- name: Extract Docker metadata
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# Build and push Docker image with Buildx (don't push on PR)
# https://github.com/docker/build-push-action
- name: Build and push Docker image
id: build-and-push
uses: depot/build-push-action@636daae76684e38c301daa0c5eca1c095b24e780 # v1.14.0
with:
project: tw0fqmsx3c
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
context: .
file: ./apps/web/Dockerfile
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
secrets: |
database_url=${{ secrets.DUMMY_DATABASE_URL }}
encryption_key=${{ secrets.DUMMY_ENCRYPTION_KEY }}
- name: Extract image info for sourcemap upload
id: extract_image_info
run: |
# Use the first readable tag from metadata action output
DOCKER_IMAGE=$(echo "${{ steps.meta.outputs.tags }}" | head -n1 | xargs)
echo "DOCKER_IMAGE=$DOCKER_IMAGE" >> $GITHUB_OUTPUT
# Use the generated version for Sentry release
RELEASE_VERSION="$VERSION"
echo "RELEASE_VERSION=$RELEASE_VERSION" >> $GITHUB_OUTPUT
echo "Docker image: $DOCKER_IMAGE"
echo "Release version: $RELEASE_VERSION"
echo "Available tags: ${{ steps.meta.outputs.tags }}"
# Sign the resulting Docker image digest except on PRs.
# This will only write to the public Rekor transparency log when the Docker
# repository is public to avoid leaking data. If you would like to publish
# transparency data even for private images, pass --force to cosign below.
# https://github.com/sigstore/cosign
- name: Sign the published Docker image
if: ${{ github.event_name != 'pull_request' }}
env:
# https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable
TAGS: ${{ steps.meta.outputs.tags }}
DIGEST: ${{ steps.build-and-push.outputs.digest }}
# This step uses the identity token to provision an ephemeral certificate
# against the sigstore community Fulcio instance.
run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST}
upload-sentry-sourcemaps:
name: Upload Sentry Sourcemaps
runs-on: ubuntu-latest
permissions:
contents: read
needs:
- build
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Upload Sentry Sourcemaps
uses: ./.github/actions/upload-sentry-sourcemaps
continue-on-error: true
with:
docker_image: ${{ needs.build.outputs.DOCKER_IMAGE }}
release_version: ${{ needs.build.outputs.RELEASE_VERSION }}
sentry_auth_token: ${{ secrets.SENTRY_AUTH_TOKEN }}
environment: staging
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DEPOT_PROJECT_TOKEN: ${{ secrets.DEPOT_PROJECT_TOKEN }}
DUMMY_DATABASE_URL: ${{ secrets.DUMMY_DATABASE_URL }}
DUMMY_ENCRYPTION_KEY: ${{ secrets.DUMMY_ENCRYPTION_KEY }}
DUMMY_REDIS_URL: ${{ secrets.DUMMY_REDIS_URL }}
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}

View File

@@ -1,4 +1,4 @@
name: Docker Release to Github
name: Release Community Docker Images
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
@@ -13,6 +13,11 @@ on:
required: false
type: boolean
default: false
MAKE_LATEST:
description: "Whether to tag as latest (from GitHub release 'Set as the latest release' option)"
required: false
type: boolean
default: false
outputs:
VERSION:
description: release version
@@ -23,8 +28,6 @@ env:
REGISTRY: ghcr.io
# github.repository as <account>/<repo>
IMAGE_NAME: ${{ github.repository }}
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}
TURBO_TEAM: ${{ secrets.TURBO_TEAM }}
permissions:
contents: read
@@ -32,6 +35,7 @@ permissions:
jobs:
build:
runs-on: ubuntu-latest
timeout-minutes: 45
permissions:
contents: read
packages: write
@@ -44,102 +48,61 @@ jobs:
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Get Release Tag
- name: Extract release version from tag
id: extract_release_tag
run: |
# Extract version from tag (e.g., refs/tags/v1.2.3 -> 1.2.3)
TAG="$GITHUB_REF"
TAG=${TAG#refs/tags/v}
set -euo pipefail
# Validate the extracted tag format
if [[ ! "$TAG" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?(\+[a-zA-Z0-9.-]+)?$ ]]; then
echo "❌ Error: Invalid release tag format after extraction. Must be semver (e.g., 1.2.3, 1.2.3-alpha)"
echo "Original ref: $GITHUB_REF"
echo "Extracted tag: $TAG"
# Extract tag name with fallback logic for different trigger contexts
if [[ -n "${RELEASE_TAG:-}" ]]; then
TAG="$RELEASE_TAG"
echo "Using RELEASE_TAG override: $TAG"
elif [[ "$GITHUB_REF_NAME" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]] || [[ "$GITHUB_REF_NAME" =~ ^v[0-9] ]]; then
TAG="$GITHUB_REF_NAME"
echo "Using GITHUB_REF_NAME (looks like tag): $TAG"
else
# Fallback: extract from GITHUB_REF for direct tag triggers
TAG="${GITHUB_REF#refs/tags/}"
if [[ -z "$TAG" || "$TAG" == "$GITHUB_REF" ]]; then
TAG="$GITHUB_REF_NAME"
echo "Using GITHUB_REF_NAME as final fallback: $TAG"
else
echo "Extracted from GITHUB_REF: $TAG"
fi
fi
# Strip v-prefix if present (normalize to clean SemVer)
TAG=${TAG#[vV]}
# Validate SemVer format (supports prereleases like 4.0.0-rc.1)
if [[ ! "$TAG" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.-]+)?$ ]]; then
echo "ERROR: Invalid tag format '$TAG'. Expected SemVer (e.g., 1.2.3, 4.0.0-rc.1)"
exit 1
fi
# Safely add to environment variables
echo "RELEASE_TAG=$TAG" >> $GITHUB_ENV
echo "VERSION=$TAG" >> $GITHUB_OUTPUT
echo "Using tag-based version: $TAG"
echo "Using version: $TAG"
- name: Update package.json version
run: |
sed -i "s/\"version\": \"0.0.0\"/\"version\": \"${{ env.RELEASE_TAG }}\"/" ./apps/web/package.json
cat ./apps/web/package.json | grep version
- name: Set up Depot CLI
uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1.6.0
# Install the cosign tool except on PR
# https://github.com/sigstore/cosign-installer
- name: Install cosign
if: github.event_name != 'pull_request'
uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
# Login against a Docker registry except on PR
# https://github.com/docker/login-action
- name: Log into registry ${{ env.REGISTRY }}
if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
- name: Build and push community release image
id: build
uses: ./.github/actions/build-and-push-docker
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Extract metadata (tags, labels) for Docker
# https://github.com/docker/metadata-action
- name: Extract Docker metadata
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
# Default semver tags (version, major.minor, major)
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
# Only tag as 'latest' for stable releases (not prereleases)
type=raw,value=latest,enable=${{ !inputs.IS_PRERELEASE }}
# Build and push Docker image with Buildx (don't push on PR)
# https://github.com/docker/build-push-action
- name: Build and push Docker image
id: build-and-push
uses: depot/build-push-action@636daae76684e38c301daa0c5eca1c095b24e780 # v1.14.0
with:
project: tw0fqmsx3c
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
context: .
file: ./apps/web/Dockerfile
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
secrets: |
database_url=${{ secrets.DUMMY_DATABASE_URL }}
encryption_key=${{ secrets.DUMMY_ENCRYPTION_KEY }}
# Sign the resulting Docker image digest except on PRs.
# This will only write to the public Rekor transparency log when the Docker
# repository is public to avoid leaking data. If you would like to publish
# transparency data even for private images, pass --force to cosign below.
# https://github.com/sigstore/cosign
- name: Sign the published Docker image
if: ${{ github.event_name != 'pull_request' }}
registry_type: "ghcr"
ghcr_image_name: ${{ env.IMAGE_NAME }}
version: ${{ steps.extract_release_tag.outputs.VERSION }}
is_prerelease: ${{ inputs.IS_PRERELEASE }}
make_latest: ${{ inputs.MAKE_LATEST }}
env:
# https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable
TAGS: ${{ steps.meta.outputs.tags }}
DIGEST: ${{ steps.build-and-push.outputs.digest }}
# This step uses the identity token to provision an ephemeral certificate
# against the sigstore community Fulcio instance.
run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DEPOT_PROJECT_TOKEN: ${{ secrets.DEPOT_PROJECT_TOKEN }}
DUMMY_DATABASE_URL: ${{ secrets.DUMMY_DATABASE_URL }}
DUMMY_ENCRYPTION_KEY: ${{ secrets.DUMMY_ENCRYPTION_KEY }}
DUMMY_REDIS_URL: ${{ secrets.DUMMY_REDIS_URL }}
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}

View File

@@ -19,7 +19,7 @@ jobs:
contents: read
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit
@@ -59,14 +59,35 @@ jobs:
uses: dcarbone/install-yq-action@4075b4dca348d74bd83f2bf82d30f25d7c54539b # v1.3.1
- name: Update Chart.yaml with new version
env:
VERSION: ${{ env.VERSION }}
run: |
yq -i ".version = \"$VERSION\"" helm-chart/Chart.yaml
yq -i ".appVersion = \"v$VERSION\"" helm-chart/Chart.yaml
set -euo pipefail
echo "Updating Chart.yaml with version: ${VERSION}"
yq -i ".version = \"${VERSION}\"" helm-chart/Chart.yaml
yq -i ".appVersion = \"${VERSION}\"" helm-chart/Chart.yaml
echo "✅ Successfully updated Chart.yaml"
- name: Package Helm chart
env:
VERSION: ${{ env.VERSION }}
run: |
set -euo pipefail
echo "Packaging Helm chart version: ${VERSION}"
helm package ./helm-chart
echo "✅ Successfully packaged formbricks-${VERSION}.tgz"
- name: Push Helm chart to GitHub Container Registry
env:
VERSION: ${{ env.VERSION }}
run: |
helm push "formbricks-$VERSION.tgz" oci://ghcr.io/formbricks/helm-charts
set -euo pipefail
echo "Pushing Helm chart to registry: formbricks-${VERSION}.tgz"
helm push "formbricks-${VERSION}.tgz" oci://ghcr.io/formbricks/helm-charts
echo "✅ Successfully pushed Helm chart to registry"

View File

@@ -1,86 +0,0 @@
name: "Terraform"
on:
workflow_dispatch:
# TODO: enable it back when migration is completed.
push:
branches:
- main
paths:
- "infra/terraform/**"
pull_request:
branches:
- main
paths:
- "infra/terraform/**"
permissions:
contents: read
jobs:
terraform:
runs-on: ubuntu-latest
permissions:
id-token: write
pull-requests: write
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Tailscale
uses: tailscale/github-action@84a3f23bb4d843bcf4da6cf824ec1be473daf4de # v3.2.3
with:
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
tags: tag:github
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@f24d7193d98baebaeacc7e2227925dd47cc267f5 # v4.2.0
with:
role-to-assume: ${{ secrets.AWS_ASSUME_ROLE_ARN }}
aws-region: "eu-central-1"
- name: Setup Terraform
uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3.1.2
- name: Terraform Format
id: fmt
run: terraform fmt -check -recursive
continue-on-error: true
working-directory: infra/terraform
- name: Terraform Init
id: init
run: terraform init
working-directory: infra/terraform
- name: Terraform Validate
id: validate
run: terraform validate
working-directory: infra/terraform
- name: Terraform Plan
id: plan
run: terraform plan -out .planfile
working-directory: infra/terraform
- name: Post PR comment
uses: borchero/terraform-plan-comment@434458316f8f24dd073cd2561c436cce41dc8f34 # v2.4.1
if: always() && github.ref != 'refs/heads/main' && (steps.plan.outcome == 'success' || steps.plan.outcome == 'failure')
with:
token: ${{ github.token }}
planfile: .planfile
working-directory: "infra/terraform"
- name: Terraform Apply
id: apply
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: terraform apply .planfile
working-directory: "infra/terraform"

View File

@@ -1,51 +0,0 @@
name: Check Missing Translations
permissions:
contents: read
on:
workflow_dispatch:
pull_request_target:
types: [opened, synchronize, reopened]
jobs:
check-missing-translations:
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
ref: ${{ github.event.pull_request.base.ref }}
- name: Checkout PR
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Setup Node.js
uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0
with:
node-version: 18
- name: Install Tolgee CLI
run: npm install -g @tolgee/cli
- name: Compare Tolgee Keys
id: compare
run: |
tolgee compare --api-key ${{ secrets.TOLGEE_API_KEY }} > compare_output.txt
cat compare_output.txt
- name: Check for Missing Translations
run: |
if grep -q "new key found" compare_output.txt; then
echo "New keys found that may require translations:"
exit 1
else
echo "No new keys found."
fi

View File

@@ -1,95 +0,0 @@
name: Tolgee Tagging on PR Merge
permissions:
contents: read
on:
pull_request_target:
types: [closed]
branches:
- main
jobs:
tag-production-keys:
name: Tag Production Keys
runs-on: ubuntu-latest
if: github.event.pull_request.merged == true
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0 # This ensures we get the full git history
- name: Get source branch name
id: branch-name
env:
RAW_BRANCH: ${{ github.head_ref }}
run: |
# Validate and sanitize branch name - only allow alphanumeric, dots, underscores, hyphens, and forward slashes
SOURCE_BRANCH=$(echo "$RAW_BRANCH" | sed 's/[^a-zA-Z0-9._\/-]//g')
# Additional validation - ensure branch name is not empty after sanitization
if [[ -z "$SOURCE_BRANCH" ]]; then
echo "❌ Error: Branch name is empty after sanitization"
echo "Original branch: $RAW_BRANCH"
exit 1
fi
# Safely add to environment variables using GitHub's recommended method
# This prevents environment variable injection attacks
echo "SOURCE_BRANCH<<EOF" >> $GITHUB_ENV
echo "$SOURCE_BRANCH" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
echo "Detected source branch: $SOURCE_BRANCH"
- name: Setup Node.js
uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0
with:
node-version: 18 # Ensure compatibility with your project
- name: Install Tolgee CLI
run: npm install -g @tolgee/cli
- name: Tag Production Keys
run: |
npx tolgee tag \
--api-key ${{ secrets.TOLGEE_API_KEY }} \
--filter-extracted \
--filter-tag "draft:${SOURCE_BRANCH}" \
--tag production \
--untag "draft:${SOURCE_BRANCH}"
- name: Tag unused production keys as Deprecated
run: |
npx tolgee tag \
--api-key ${{ secrets.TOLGEE_API_KEY }} \
--filter-not-extracted --filter-tag production \
--tag deprecated --untag production
- name: Tag unused draft:current-branch keys as Deprecated
run: |
npx tolgee tag \
--api-key ${{ secrets.TOLGEE_API_KEY }} \
--filter-not-extracted --filter-tag "draft:${SOURCE_BRANCH}" \
--tag deprecated --untag "draft:${SOURCE_BRANCH}"
- name: Sync with backup
run: |
npx tolgee sync \
--api-key ${{ secrets.TOLGEE_API_KEY }} \
--backup ./tolgee-backup \
--continue-on-warning \
--yes
- name: Upload backup as artifact
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
with:
name: tolgee-backup-${{ github.sha }}
path: ./tolgee-backup
retention-days: 90

63
.github/workflows/translation-check.yml vendored Normal file
View File

@@ -0,0 +1,63 @@
name: Translation Validation
permissions:
contents: read
on:
pull_request:
types: [opened, synchronize, reopened]
paths:
- "apps/web/**/*.ts"
- "apps/web/**/*.tsx"
- "apps/web/locales/**/*.json"
- "scan-translations.ts"
push:
branches:
- main
paths:
- "apps/web/**/*.ts"
- "apps/web/**/*.tsx"
- "apps/web/locales/**/*.json"
- "scan-translations.ts"
jobs:
validate-translations:
name: Validate Translation Keys
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Node.js
uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0
with:
node-version: 18
- name: Setup pnpm
uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # v3.0.0
with:
version: 9.15.9
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Validate translation keys
run: |
echo ""
echo "🔍 Validating translation keys..."
echo ""
pnpm run scan-translations
- name: Summary
if: success()
run: |
echo ""
echo "✅ Translation validation completed successfully!"
echo ""

View File

@@ -1,48 +0,0 @@
name: Upload Sentry Sourcemaps (Manual)
on:
workflow_dispatch:
inputs:
docker_image:
description: "Docker image to extract sourcemaps from"
required: true
type: string
release_version:
description: "Release version (e.g., v1.2.3)"
required: true
type: string
tag_version:
description: "Docker image tag (leave empty to use release_version)"
required: false
type: string
permissions:
contents: read
jobs:
upload-sourcemaps:
name: Upload Sourcemaps to Sentry
runs-on: ubuntu-latest
steps:
- name: Harden the runner (Audit all outbound calls)
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
with:
egress-policy: audit
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Set Docker Image
run: echo "DOCKER_IMAGE=${DOCKER_IMAGE}" >> $GITHUB_ENV
env:
DOCKER_IMAGE: ${{ inputs.docker_image }}:${{ inputs.tag_version != '' && inputs.tag_version || inputs.release_version }}
- name: Upload Sourcemaps to Sentry
uses: ./.github/actions/upload-sentry-sourcemaps
with:
docker_image: ${{ env.DOCKER_IMAGE }}
release_version: ${{ inputs.release_version }}
sentry_auth_token: ${{ secrets.SENTRY_AUTH_TOKEN }}

20
.gitignore vendored
View File

@@ -56,21 +56,15 @@ packages/database/migrations
branch.json
.vercel
# Terraform
infra/terraform/.terraform/
**/.terraform.lock.hcl
**/terraform.tfstate
**/terraform.tfstate.*
**/crash.log
**/override.tf
**/override.tf.json
**/*.tfvars
**/*.tfvars.json
**/.terraformrc
**/terraform.rc
# IntelliJ IDEA
/.idea/
/*.iml
packages/ios/FormbricksSDK/FormbricksSDK.xcodeproj/project.xcworkspace/xcuserdata
.cursorrules
i18n.cache
stats.html
# Agent skill archives
.agent/skills/**/.archived/
.agent/.temp-skills/

View File

@@ -1,6 +1,3 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
# Load environment variables from .env files
if [ -f .env ]; then
set -a
@@ -10,12 +7,34 @@ fi
pnpm lint-staged
# Run tolgee-pull if branch.json exists and NEXT_PUBLIC_TOLGEE_API_KEY is not set
if [ -f branch.json ]; then
if [ -z "$NEXT_PUBLIC_TOLGEE_API_KEY" ]; then
echo "Skipping tolgee-pull: NEXT_PUBLIC_TOLGEE_API_KEY is not set"
# Run Lingo.dev i18n workflow if LINGODOTDEV_API_KEY is set
if [ -n "$LINGODOTDEV_API_KEY" ]; then
echo ""
echo "🌍 Running Lingo.dev translation workflow..."
echo ""
# Run translation generation and validation
if pnpm run i18n; then
echo ""
echo "✅ Translation validation passed"
echo ""
# Add updated locale files to git
git add apps/web/locales/*.json
else
pnpm run tolgee-pull
git add apps/web/locales
echo ""
echo "❌ Translation validation failed!"
echo ""
echo "Please fix the translation issues above before committing:"
echo " • Add missing translation keys to your locale files"
echo " • Remove unused translation keys"
echo ""
echo "Or run 'pnpm i18n' to see the detailed report"
echo ""
exit 1
fi
else
echo ""
echo "⚠️ Skipping translation validation: LINGODOTDEV_API_KEY is not set"
echo " (This is expected for community contributors)"
echo ""
fi

View File

@@ -1,43 +0,0 @@
{
"$schema": "https://docs.tolgee.io/cli-schema.json",
"format": "JSON_TOLGEE",
"patterns": ["./apps/web/**/*.ts?(x)"],
"projectId": 10304,
"pull": {
"path": "./apps/web/locales"
},
"push": {
"files": [
{
"language": "en-US",
"path": "./apps/web/locales/en-US.json"
},
{
"language": "de-DE",
"path": "./apps/web/locales/de-DE.json"
},
{
"language": "fr-FR",
"path": "./apps/web/locales/fr-FR.json"
},
{
"language": "pt-BR",
"path": "./apps/web/locales/pt-BR.json"
},
{
"language": "zh-Hant-TW",
"path": "./apps/web/locales/zh-Hant-TW.json"
},
{
"language": "pt-PT",
"path": "./apps/web/locales/pt-PT.json"
},
{
"language": "ro-RO",
"path": "./apps/web/locales/ro-RO.json"
}
],
"forceMode": "OVERRIDE"
},
"strictNamespace": false
}

View File

@@ -1,4 +1,10 @@
{
"eslint.validate": ["javascript", "javascriptreact", "typescript", "typescriptreact"],
"eslint.workingDirectories": [
{
"mode": "auto"
}
],
"javascript.updateImportsOnFileMove.enabled": "always",
"sonarlint.connectedMode.project": {
"connectionId": "formbricks",

82
AGENTS.md Normal file
View File

@@ -0,0 +1,82 @@
# Repository Guidelines
## Project Structure & Module Organization
Formbricks runs as a pnpm/turbo monorepo. `apps/web` is the Next.js product surface, with feature modules under `app/` and `modules/`, assets in `public/` and `images/`, and Playwright specs in `apps/web/playwright/`. `apps/storybook` renders reusable UI pieces for review. Shared logic lives in `packages/*`: `database` (Prisma schemas/migrations), `surveys`, `js-core`, `types`, plus linting and TypeScript presets (`config-*`). Deployment collateral is kept in `docs/`, `docker/`, and `helm-chart/`. Unit tests sit next to their source as `*.test.ts` or inside `__tests__`.
## Build, Test & Development Commands
- `pnpm install` — install workspace dependencies pinned by `pnpm-lock.yaml`.
- `pnpm db:up` / `pnpm db:down` — start/stop the Docker services backing the app.
- `pnpm dev` — run all app and worker dev servers in parallel via Turborepo.
- `pnpm build` — generate production builds for every package and app.
- `pnpm lint` — apply the shared ESLint rules across the workspace.
- `pnpm test` / `pnpm test:coverage` — execute Vitest suites with optional coverage.
- `pnpm test:e2e` — launch the Playwright browser regression suite.
- `pnpm db:migrate:dev` — apply Prisma migrations against the dev database.
## Coding Style & Naming Conventions
TypeScript, React, and Prisma are the primary languages. Use the shared ESLint presets (`@formbricks/eslint-config`) and Prettier preset (110-char width, semicolons, double quotes, sorted import groups). Two-space indentation is standard; prefer `PascalCase` for React components and folders under `modules/`, `camelCase` for functions/variables, and `SCREAMING_SNAKE_CASE` only for constants. When adding mocks, place them inside `__mocks__` so import ordering stays stable.
We are using SonarQube to identify code smells and security hotspots.
## Architecture & Patterns
- Next.js app router lives in `apps/web/app` with route groups like `(app)` and `(auth)`. Services live in `apps/web/lib`, feature modules in `apps/web/modules`.
- Server actions wrap service calls and return `{ data }` or `{ error }` consistently.
- Context providers should guard against missing provider usage and use cleanup patterns that snapshot refs inside `useEffect` to avoid React hooks warnings
## Caching
- Use React `cache()` for request-level dedupe and `cache.withCache()` or explicit Redis for expensive data.
- Do not use Next.js `unstable_cache()`.
- Always use `createCacheKey.*` utilities for cache keys.
## i18n (Internationalization)
- All user-facing text must use the `t()` function from `react-i18next`.
- Key naming: use lowercase with dots for nesting (e.g., `common.welcome`).
- Translations are in `apps/web/locales/`. Default is `en-US.json`.
- Lingo.dev is automatically translating strings from en-US into other languages on commit. Run `pnpm i18n` to generate missing translations and validate keys.
## Database & Prisma Performance
- Multi-tenancy: All data must be scoped by Organization or Environment.
- Soft Deletion: Check for `isActive` or `deletedAt` fields; use proper filtering.
- Never use `skip`/`offset` with `prisma.response.count()`; only use `where`.
- Separate count and data queries and run in parallel (`Promise.all`).
- Prefer cursor pagination for large datasets.
- When filtering by `createdAt`, include indexed fields (e.g., `surveyId` + `createdAt`).
## Testing Guidelines
Prefer Vitest with Testing Library for logic in `.ts` files, keeping specs colocated with the code they exercise (`utility.test.ts`). Do not write tests for `.tsx` files—React components are covered by Playwright E2E tests instead. Mock network and storage boundaries through helpers from `@formbricks/*`. Run `pnpm test` before opening a PR and `pnpm test:coverage` when touching critical flows; keep coverage from regressing. End-to-end scenarios belong in `apps/web/playwright`, using descriptive filenames (`billing.spec.ts`) and tagging slow suites with `@slow` when necessary.
## Documentation (apps/docs)
- Add frontmatter with `title`, `description`, and `icon` at the top of the MDX file.
- Do not start with an H1; use Camel Case headings (only capitalize the feature name).
- Use Mintlify components for steps and callouts.
- If Enterprise-only, add the Enterprise note block described in docs.
## Storybook
- Stories live in `stories.tsx` in the component folder and import from `"./index"`.
- Use `@storybook/react-vite` and organize argTypes into `Behavior`, `Appearance`, `Content`.
- Include Default, Disabled (if supported), WithIcon (if supported), all variants, and edge cases.
## GitHub Actions
- Always set minimal `permissions` for `GITHUB_TOKEN`.
- On `ubuntu-latest`, add `step-security/harden-runner` as the first step.
## Quality Checklist
- Keep code DRY and small; remove dead code and unused imports.
- Follow React hooks rules, keep effects focused, and avoid unnecessary `useMemo`/`useCallback`.
- Prefer type inference, avoid `any`, and use shared types from `@formbricks/types`.
- Keep components focused, avoid deep nesting, and ensure basic accessibility.
## Commit & Pull Request Guidelines
Commits follow a lightweight Conventional Commit format (`fix:`, `chore:`, `feat:`) and usually append the PR number, e.g. `fix: update OpenAPI schema (#6617)`. Keep commits scoped and lint-clean. Pull requests should outline the problem, summarize the solution, and link to issues or product specs. Attach screenshots or gifs for UI-facing work, list any migrations or env changes, and paste the output of relevant commands (`pnpm test`, `pnpm lint`, `pnpm db:migrate:dev`) so reviewers can verify readiness.

View File

@@ -21,6 +21,7 @@ The Open Source Qualtrics Alternative
<p align="center">
<a href="https://github.com/formbricks/formbricks/blob/main/LICENSE"><img src="https://img.shields.io/badge/License-AGPL-purple" alt="License"></a> <a href="https://github.com/formbricks/formbricks/stargazers"><img src="https://img.shields.io/github/stars/formbricks/formbricks?logo=github" alt="Github Stars"></a>
<a href="https://insights.linuxfoundation.org/project/formbricks"><img src="https://insights.linuxfoundation.org/api/badge/health-score?project=formbricks"></a>
<a href="https://news.ycombinator.com/item?id=32303986"><img src="https://img.shields.io/badge/Hacker%20News-122-%23FF6600" alt="Hacker News"></a>
<a href="[https://www.producthunt.com/products/formbricks](https://www.producthunt.com/posts/formbricks)"><img src="https://img.shields.io/badge/Product%20Hunt-455-orange?logo=producthunt&logoColor=%23fff" alt="Product Hunt"></a>
<a href="https://github.blog/2023-04-12-github-accelerator-our-first-cohort-and-whats-next/"><img src="https://img.shields.io/badge/2023-blue?logo=github&label=Github%20Accelerator" alt="Github Accelerator"></a>
@@ -202,6 +203,14 @@ Here are a few options:
</a>
## Thanks
Formbricks is supported by the following companies who provide us with their tools for free as part of their open-source support:
<a href="https://www.chromatic.com/"><img src="https://user-images.githubusercontent.com/321738/84662277-e3db4f80-af1b-11ea-88f5-91d67a5e59f6.png" width="153" height="30" alt="Chromatic" /></a>
&nbsp;&nbsp;&nbsp;&nbsp;
<a href="https://sentry.io/"><img src="https://github.com/user-attachments/assets/d743ffd4-b575-4802-a29a-10136be9227e" width="150" height="30" alt="Sentry" /></a>
<a id="contact-us"></a>
## 📆 Contact us

View File

@@ -1,8 +1,11 @@
import type { StorybookConfig } from "@storybook/react-vite";
import { createRequire } from "module";
import { dirname, join } from "path";
import { dirname, join, resolve } from "path";
import { fileURLToPath } from "url";
const require = createRequire(import.meta.url);
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
/**
* This function is used to resolve the absolute path of a package.
@@ -13,7 +16,7 @@ function getAbsolutePath(value: string): any {
}
const config: StorybookConfig = {
stories: ["../src/**/*.mdx", "../../web/modules/ui/**/stories.@(js|jsx|mjs|ts|tsx)"],
stories: ["../src/**/*.mdx", "../../../packages/survey-ui/src/**/*.stories.@(js|jsx|mjs|ts|tsx)"],
addons: [
getAbsolutePath("@storybook/addon-onboarding"),
getAbsolutePath("@storybook/addon-links"),
@@ -25,5 +28,25 @@ const config: StorybookConfig = {
name: getAbsolutePath("@storybook/react-vite"),
options: {},
},
async viteFinal(config) {
const surveyUiPath = resolve(__dirname, "../../../packages/survey-ui/src");
const rootPath = resolve(__dirname, "../../../");
// Configure server to allow files from outside the storybook directory
config.server = config.server || {};
config.server.fs = {
...config.server.fs,
allow: [...(config.server.fs?.allow || []), rootPath],
};
// Configure simple alias resolution
config.resolve = config.resolve || {};
config.resolve.alias = {
...config.resolve.alias,
"@": surveyUiPath,
};
return config;
},
};
export default config;

View File

@@ -1,21 +1,6 @@
import type { Preview } from "@storybook/react-vite";
import { TolgeeProvider } from "@tolgee/react";
import React from "react";
import "../../web/modules/ui/globals.css";
import { TolgeeBase } from "../../web/tolgee/shared";
// Create a Storybook-specific Tolgee decorator
const withTolgee = (Story: any) => {
const tolgee = TolgeeBase().init({
tagNewKeys: [], // No branch tagging in Storybook
});
return React.createElement(
TolgeeProvider,
{ tolgee, fallback: "Loading", ssr: { language: "en", staticData: {} } },
React.createElement(Story)
);
};
import "../../../packages/survey-ui/src/styles/globals.css";
const preview: Preview = {
parameters: {
@@ -24,9 +9,23 @@ const preview: Preview = {
color: /(background|color)$/i,
date: /Date$/i,
},
expanded: true,
},
backgrounds: {
default: "light",
},
},
decorators: [withTolgee],
decorators: [
(Story) =>
React.createElement(
"div",
{
id: "fbjs",
className: "w-full h-full min-h-screen p-4 bg-background font-sans antialiased text-foreground",
},
React.createElement(Story)
),
],
};
export default preview;

View File

@@ -11,22 +11,24 @@
"clean": "rimraf .turbo node_modules dist storybook-static"
},
"dependencies": {
"eslint-plugin-react-refresh": "0.4.20"
"@formbricks/survey-ui": "workspace:*"
},
"devDependencies": {
"@chromatic-com/storybook": "^4.0.1",
"@storybook/addon-a11y": "9.0.15",
"@storybook/addon-links": "9.0.15",
"@storybook/addon-onboarding": "9.0.15",
"@storybook/react-vite": "9.0.15",
"@typescript-eslint/eslint-plugin": "8.32.0",
"@typescript-eslint/parser": "8.32.0",
"@vitejs/plugin-react": "4.4.1",
"esbuild": "0.25.4",
"eslint-plugin-storybook": "9.0.15",
"@chromatic-com/storybook": "^5.0.0",
"@storybook/addon-a11y": "10.1.11",
"@storybook/addon-links": "10.1.11",
"@storybook/addon-onboarding": "10.1.11",
"@storybook/react-vite": "10.1.11",
"@typescript-eslint/eslint-plugin": "8.53.0",
"@tailwindcss/vite": "4.1.18",
"@typescript-eslint/parser": "8.53.0",
"@vitejs/plugin-react": "5.1.2",
"esbuild": "0.27.2",
"eslint-plugin-react-refresh": "0.4.26",
"eslint-plugin-storybook": "10.1.11",
"prop-types": "15.8.1",
"storybook": "9.0.15",
"vite": "6.3.5",
"@storybook/addon-docs": "9.0.15"
"storybook": "10.1.11",
"vite": "7.3.1",
"@storybook/addon-docs": "10.1.11"
}
}

View File

@@ -1,6 +0,0 @@
export default {
plugins: {
tailwindcss: {},
autoprefixer: {},
},
};

View File

@@ -1,7 +1,15 @@
/** @type {import('tailwindcss').Config} */
import base from "../web/tailwind.config";
import surveyUi from "../../packages/survey-ui/tailwind.config";
export default {
...base,
content: ["./index.html", "./src/**/*.{js,ts,jsx,tsx}", "../web/modules/ui/**/*.{js,ts,jsx,tsx}"],
content: [
"./index.html",
"./src/**/*.{js,ts,jsx,tsx}",
"../../packages/survey-ui/src/**/*.{js,ts,jsx,tsx}",
],
theme: {
extend: {
...surveyUi.theme?.extend,
},
},
};

View File

@@ -1,16 +1,17 @@
import tailwindcss from "@tailwindcss/vite";
import react from "@vitejs/plugin-react";
import path from "path";
import { defineConfig } from "vite";
// https://vitejs.dev/config/
export default defineConfig({
plugins: [react()],
plugins: [react(), tailwindcss()],
define: {
"process.env": {},
},
resolve: {
alias: {
"@": path.resolve(__dirname, "../web"),
"@formbricks/survey-ui": path.resolve(__dirname, "../../packages/survey-ui/src"),
},
},
});

7
apps/web/.eslintignore Normal file
View File

@@ -0,0 +1,7 @@
node_modules/
.next/
public/
playwright/
dist/
coverage/
vendor/

View File

@@ -30,9 +30,17 @@ COPY apps/web/scripts/docker/read-secrets.sh /tmp/read-secrets.sh
RUN chmod +x /tmp/read-secrets.sh
# Increase Node.js memory limit as a regular build argument
ARG NODE_OPTIONS="--max_old_space_size=4096"
ARG NODE_OPTIONS="--max_old_space_size=8192"
ENV NODE_OPTIONS=${NODE_OPTIONS}
# Target architecture - automatically provided by Docker in multi-platform builds
# but needs explicit declaration for some build systems (like Depot)
ARG TARGETARCH
# Base path for the application (optional)
ARG BASE_PATH=""
ENV BASE_PATH=${BASE_PATH}
# Set the working directory
WORKDIR /app
@@ -57,6 +65,8 @@ RUN pnpm build --filter=@formbricks/database
# This mounts the secrets only during this build step without storing them in layers
RUN --mount=type=secret,id=database_url \
--mount=type=secret,id=encryption_key \
--mount=type=secret,id=redis_url \
--mount=type=secret,id=sentry_auth_token \
/tmp/read-secrets.sh pnpm build --filter=@formbricks/web...
# Extract Prisma version
@@ -67,8 +77,8 @@ RUN jq -r '.devDependencies.prisma' packages/database/package.json > /prisma_ver
#
FROM base AS runner
RUN npm install --ignore-scripts -g corepack@latest
RUN corepack enable
RUN npm install --ignore-scripts -g corepack@latest && \
corepack enable
RUN apk add --no-cache curl \
&& apk add --no-cache supercronic \
@@ -94,6 +104,9 @@ RUN chown -R nextjs:nextjs ./apps/web/.next/static && chmod -R 755 ./apps/web/.n
COPY --from=installer /app/apps/web/public ./apps/web/public
RUN chown -R nextjs:nextjs ./apps/web/public && chmod -R 755 ./apps/web/public
# Create packages/database directory structure with proper ownership for runtime migrations
RUN mkdir -p ./packages/database/migrations && chown -R nextjs:nextjs ./packages/database
COPY --from=installer /app/packages/database/schema.prisma ./packages/database/schema.prisma
RUN chown nextjs:nextjs ./packages/database/schema.prisma && chmod 644 ./packages/database/schema.prisma
@@ -109,9 +122,6 @@ RUN chown -R nextjs:nextjs ./node_modules/.prisma && chmod -R 755 ./node_modules
COPY --from=installer /prisma_version.txt .
RUN chown nextjs:nextjs ./prisma_version.txt && chmod 644 ./prisma_version.txt
COPY /docker/cronjobs /app/docker/cronjobs
RUN chmod -R 755 /app/docker/cronjobs
COPY --from=installer /app/node_modules/@paralleldrive/cuid2 ./node_modules/@paralleldrive/cuid2
RUN chmod -R 755 ./node_modules/@paralleldrive/cuid2
@@ -121,7 +131,7 @@ RUN chmod -R 755 ./node_modules/@noble/hashes
COPY --from=installer /app/node_modules/zod ./node_modules/zod
RUN chmod -R 755 ./node_modules/zod
RUN npm install -g prisma
RUN npm install -g prisma@6
# Create a startup script to handle the conditional logic
COPY --from=installer /app/apps/web/scripts/docker/next-start.sh /home/nextjs/start.sh
@@ -131,12 +141,13 @@ EXPOSE 3000
ENV HOSTNAME="0.0.0.0"
USER nextjs
# Prepare volume for uploads
RUN mkdir -p /home/nextjs/apps/web/uploads/
VOLUME /home/nextjs/apps/web/uploads/
# Prepare pnpm as the nextjs user to ensure it's available at runtime
# Prepare volumes for uploads and SAML connections
RUN corepack prepare pnpm@9.15.9 --activate && \
mkdir -p /home/nextjs/apps/web/uploads/ && \
mkdir -p /home/nextjs/apps/web/saml-connection
# Prepare volume for SAML preloaded connection
RUN mkdir -p /home/nextjs/apps/web/saml-connection
VOLUME /home/nextjs/apps/web/uploads/
VOLUME /home/nextjs/apps/web/saml-connection
CMD ["/home/nextjs/start.sh"]

View File

@@ -1,79 +0,0 @@
import "@testing-library/jest-dom/vitest";
import { cleanup, render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { afterEach, describe, expect, test, vi } from "vitest";
import { ConnectWithFormbricks } from "./ConnectWithFormbricks";
// Mocks before import
const pushMock = vi.fn();
const refreshMock = vi.fn();
vi.mock("@tolgee/react", () => ({ useTranslate: () => ({ t: (key: string) => key }) }));
vi.mock("next/navigation", () => ({ useRouter: vi.fn(() => ({ push: pushMock, refresh: refreshMock })) }));
vi.mock("./OnboardingSetupInstructions", () => ({
OnboardingSetupInstructions: () => <div data-testid="instructions" />,
}));
afterEach(() => {
cleanup();
vi.clearAllMocks();
});
describe("ConnectWithFormbricks", () => {
const environment = { id: "env1" } as any;
const webAppUrl = "http://app";
const channel = {} as any;
test("renders waiting state when widgetSetupCompleted is false", () => {
render(
<ConnectWithFormbricks
environment={environment}
publicDomain={webAppUrl}
widgetSetupCompleted={false}
channel={channel}
/>
);
expect(screen.getByTestId("instructions")).toBeInTheDocument();
expect(screen.getByText("environments.connect.waiting_for_your_signal")).toBeInTheDocument();
});
test("renders success state when widgetSetupCompleted is true", () => {
render(
<ConnectWithFormbricks
environment={environment}
publicDomain={webAppUrl}
widgetSetupCompleted={true}
channel={channel}
/>
);
expect(screen.getByText("environments.connect.congrats")).toBeInTheDocument();
expect(screen.getByText("environments.connect.connection_successful_message")).toBeInTheDocument();
});
test("clicking finish button navigates to surveys", async () => {
render(
<ConnectWithFormbricks
environment={environment}
publicDomain={webAppUrl}
widgetSetupCompleted={true}
channel={channel}
/>
);
const button = screen.getByRole("button", { name: "environments.connect.finish_onboarding" });
await userEvent.click(button);
expect(pushMock).toHaveBeenCalledWith(`/environments/${environment.id}/surveys`);
});
test("refresh is called on visibilitychange to visible", () => {
render(
<ConnectWithFormbricks
environment={environment}
publicDomain={webAppUrl}
widgetSetupCompleted={false}
channel={channel}
/>
);
Object.defineProperty(document, "visibilityState", { value: "visible", configurable: true });
document.dispatchEvent(new Event("visibilitychange"));
expect(refreshMock).toHaveBeenCalled();
});
});

View File

@@ -1,29 +1,29 @@
"use client";
import { cn } from "@/lib/cn";
import { Button } from "@/modules/ui/components/button";
import { useTranslate } from "@tolgee/react";
import { ArrowRight } from "lucide-react";
import { useRouter } from "next/navigation";
import { useEffect } from "react";
import { useTranslation } from "react-i18next";
import { TEnvironment } from "@formbricks/types/environment";
import { TProjectConfigChannel } from "@formbricks/types/project";
import { cn } from "@/lib/cn";
import { Button } from "@/modules/ui/components/button";
import { OnboardingSetupInstructions } from "./OnboardingSetupInstructions";
interface ConnectWithFormbricksProps {
environment: TEnvironment;
publicDomain: string;
widgetSetupCompleted: boolean;
appSetupCompleted: boolean;
channel: TProjectConfigChannel;
}
export const ConnectWithFormbricks = ({
environment,
publicDomain,
widgetSetupCompleted,
appSetupCompleted,
channel,
}: ConnectWithFormbricksProps) => {
const { t } = useTranslate();
const { t } = useTranslation();
const router = useRouter();
const handleFinishOnboarding = async () => {
router.push(`/environments/${environment.id}/surveys`);
@@ -51,15 +51,15 @@ export const ConnectWithFormbricks = ({
environmentId={environment.id}
publicDomain={publicDomain}
channel={channel}
widgetSetupCompleted={widgetSetupCompleted}
appSetupCompleted={appSetupCompleted}
/>
</div>
<div
className={cn(
"flex h-[30rem] w-1/2 flex-col items-center justify-center rounded-lg border text-center",
widgetSetupCompleted ? "border-green-500 bg-green-100" : "border-slate-300 bg-slate-200"
appSetupCompleted ? "border-green-500 bg-green-100" : "border-slate-300 bg-slate-200"
)}>
{widgetSetupCompleted ? (
{appSetupCompleted ? (
<div>
<p className="text-3xl">{t("environments.connect.congrats")}</p>
<p className="pt-4 text-sm font-medium text-slate-600">
@@ -81,9 +81,9 @@ export const ConnectWithFormbricks = ({
</div>
<Button
id="finishOnboarding"
variant={widgetSetupCompleted ? "default" : "ghost"}
variant={appSetupCompleted ? "default" : "ghost"}
onClick={handleFinishOnboarding}>
{widgetSetupCompleted
{appSetupCompleted
? t("environments.connect.finish_onboarding")
: t("environments.connect.do_it_later")}
<ArrowRight />

View File

@@ -1,103 +0,0 @@
import { cleanup, render, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import toast from "react-hot-toast";
import { afterEach, beforeAll, describe, expect, test, vi } from "vitest";
import { OnboardingSetupInstructions } from "./OnboardingSetupInstructions";
// Mock react-hot-toast so we can assert that a success message is shown
vi.mock("react-hot-toast", () => ({
__esModule: true,
default: {
success: vi.fn(),
},
}));
// Set up a spy for navigator.clipboard.writeText so it becomes a ViTest spy.
beforeAll(() => {
Object.defineProperty(navigator, "clipboard", {
configurable: true,
writable: true,
value: {
// Using a mockResolvedValue resolves the promise as writeText is async.
writeText: vi.fn().mockResolvedValue(undefined),
},
});
});
describe("OnboardingSetupInstructions", () => {
afterEach(() => {
cleanup();
vi.clearAllMocks();
});
// Provide some default props for testing
const defaultProps = {
environmentId: "env-123",
publicDomain: "https://example.com",
channel: "app" as const, // Assuming channel is either "app" or "website"
widgetSetupCompleted: false,
};
test("renders HTML tab content by default", () => {
render(<OnboardingSetupInstructions {...defaultProps} />);
// Since the default active tab is "html", we check for a unique text
expect(
screen.getByText(/environments.connect.insert_this_code_into_the_head_tag_of_your_website/i)
).toBeInTheDocument();
// The HTML snippet contains a marker comment
expect(screen.getByText("START")).toBeInTheDocument();
// Verify the "Copy Code" button is present
expect(screen.getByRole("button", { name: /common.copy_code/i })).toBeInTheDocument();
});
test("renders NPM tab content when selected", async () => {
render(<OnboardingSetupInstructions {...defaultProps} />);
const user = userEvent.setup();
// Click on the "NPM" tab to switch views.
const npmTab = screen.getByText("NPM");
await user.click(npmTab);
// Check that the install commands are present
expect(screen.getByText(/npm install @formbricks\/js/)).toBeInTheDocument();
expect(screen.getByText(/yarn add @formbricks\/js/)).toBeInTheDocument();
// Verify the "Read Docs" link has the correct URL (based on channel prop)
const readDocsLink = screen.getByRole("link", { name: /common.read_docs/i });
expect(readDocsLink).toHaveAttribute("href", "https://formbricks.com/docs/app-surveys/framework-guides");
});
test("copies HTML snippet to clipboard and shows success toast when Copy Code button is clicked", async () => {
render(<OnboardingSetupInstructions {...defaultProps} />);
const user = userEvent.setup();
const writeTextSpy = vi.spyOn(navigator.clipboard, "writeText");
// Click the "Copy Code" button
const copyButton = screen.getByRole("button", { name: /common.copy_code/i });
await user.click(copyButton);
// Ensure navigator.clipboard.writeText was called.
expect(writeTextSpy).toHaveBeenCalled();
const writtenText = (navigator.clipboard.writeText as any).mock.calls[0][0] as string;
// Check that the pasted snippet contains the expected environment values
expect(writtenText).toContain('var appUrl = "https://example.com"');
expect(writtenText).toContain('var environmentId = "env-123"');
// Verify that a success toast was shown
expect(toast.success).toHaveBeenCalledWith("common.copied_to_clipboard");
});
test("renders step-by-step manual link with correct URL in HTML tab", () => {
render(<OnboardingSetupInstructions {...defaultProps} />);
const manualLink = screen.getByRole("link", { name: /common.step_by_step_manual/i });
expect(manualLink).toHaveAttribute(
"href",
"https://formbricks.com/docs/app-surveys/framework-guides#html"
);
});
});

View File

@@ -1,15 +1,15 @@
"use client";
import { Button } from "@/modules/ui/components/button";
import { CodeBlock } from "@/modules/ui/components/code-block";
import { Html5Icon, NpmIcon } from "@/modules/ui/components/icons";
import { TabBar } from "@/modules/ui/components/tab-bar";
import { useTranslate } from "@tolgee/react";
import Link from "next/link";
import "prismjs/themes/prism.css";
import { useState } from "react";
import toast from "react-hot-toast";
import { useTranslation } from "react-i18next";
import { TProjectConfigChannel } from "@formbricks/types/project";
import { Button } from "@/modules/ui/components/button";
import { CodeBlock } from "@/modules/ui/components/code-block";
import { Html5Icon, NpmIcon } from "@/modules/ui/components/icons";
import { TabBar } from "@/modules/ui/components/tab-bar";
const tabs = [
{ id: "html", label: "HTML", icon: <Html5Icon /> },
@@ -20,16 +20,16 @@ interface OnboardingSetupInstructionsProps {
environmentId: string;
publicDomain: string;
channel: TProjectConfigChannel;
widgetSetupCompleted: boolean;
appSetupCompleted: boolean;
}
export const OnboardingSetupInstructions = ({
environmentId,
publicDomain,
channel,
widgetSetupCompleted,
appSetupCompleted,
}: OnboardingSetupInstructionsProps) => {
const { t } = useTranslate();
const { t } = useTranslation();
const [activeTab, setActiveTab] = useState(tabs[0].id);
const htmlSnippetForAppSurveys = `<!-- START Formbricks Surveys -->
<script type="text/javascript">
@@ -137,7 +137,7 @@ export const OnboardingSetupInstructions = ({
<div className="mt-4 flex justify-between space-x-2">
<Button
id="onboarding-inapp-connect-copy-code"
variant={widgetSetupCompleted ? "secondary" : "default"}
variant={appSetupCompleted ? "secondary" : "default"}
onClick={() => {
navigator.clipboard.writeText(
channel === "app" ? htmlSnippetForAppSurveys : htmlSnippetForWebsiteSurveys

View File

@@ -1,12 +1,12 @@
import { XIcon } from "lucide-react";
import Link from "next/link";
import { ConnectWithFormbricks } from "@/app/(app)/(onboarding)/environments/[environmentId]/connect/components/ConnectWithFormbricks";
import { getEnvironment } from "@/lib/environment/service";
import { getPublicDomain } from "@/lib/getPublicUrl";
import { getProjectByEnvironmentId } from "@/lib/project/service";
import { getTranslate } from "@/lingodotdev/server";
import { Button } from "@/modules/ui/components/button";
import { Header } from "@/modules/ui/components/header";
import { getTranslate } from "@/tolgee/server";
import { XIcon } from "lucide-react";
import Link from "next/link";
interface ConnectPageProps {
params: Promise<{
@@ -25,7 +25,7 @@ const Page = async (props: ConnectPageProps) => {
const project = await getProjectByEnvironmentId(environment.id);
if (!project) {
throw new Error(t("common.project_not_found"));
throw new Error(t("common.workspace_not_found"));
}
const channel = project.config.channel || null;
@@ -42,11 +42,11 @@ const Page = async (props: ConnectPageProps) => {
<ConnectWithFormbricks
environment={environment}
publicDomain={publicDomain}
widgetSetupCompleted={environment.appSetupCompleted}
appSetupCompleted={environment.appSetupCompleted}
channel={channel}
/>
<Button
className="absolute right-5 top-5 !mt-0 text-slate-500 hover:text-slate-700"
className="absolute top-5 right-5 !mt-0 text-slate-500 hover:text-slate-700"
variant="ghost"
asChild>
<Link href={`/environments/${environment.id}`}>

Some files were not shown because too many files have changed in this diff Show More