Compare commits

..

14 Commits

Author SHA1 Message Date
pandeymangg
e3ba952154 fixes a race condition between auto save and publish that led to no segments in surveys 2026-02-18 20:30:15 +05:30
Anshuman Pandey
aa538a3a51 fix: better query in the backwards compatible code (#7288) 2026-02-18 13:00:19 +00:00
Anshuman Pandey
817e108ff5 docs: adds migration docs (#7281)
Co-authored-by: Bhagya Amarasinghe <b.sithumini@yahoo.com>
2026-02-17 17:01:46 +01:00
Theodór Tómas
33542d0c54 fix: default preview colors (#7277)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2026-02-17 11:28:58 +00:00
Matti Nannt
f37d22f13d docs: align rate limiting docs with current code enforcement (#7267)
Co-authored-by: pandeymangg <anshuman.pandey9999@gmail.com>
2026-02-17 07:42:53 +00:00
Anshuman Pandey
202ae903ac chore: makes rate limit config const (#7274) 2026-02-17 06:49:56 +00:00
Dhruwang Jariwala
6ab5cc367c fix: reduced default height of input (#7259) 2026-02-17 05:11:29 +00:00
Theodór Tómas
21559045ba fix: input placeholder color (#7265) 2026-02-17 05:11:01 +00:00
Theodór Tómas
d7c57a7a48 fix: disabling cache in dev (#7269) 2026-02-17 04:44:22 +00:00
Chowdhury Tafsir Ahmed Siddiki
11b2ef4788 docs: remove stale 'coming soon' placeholders (#7254) 2026-02-16 13:21:12 +00:00
Theodór Tómas
6fefd51cce fix: suggest colors has better succes copy (#7258) 2026-02-16 13:18:46 +00:00
Theodór Tómas
65af826222 fix: matrix table preview (#7257)
Co-authored-by: Dhruwang <dhruwangjariwala18@gmail.com>
2026-02-16 13:18:17 +00:00
Anshuman Pandey
12eb54c653 fix: fixes number being passed into string attribute (#7255) 2026-02-16 11:18:59 +00:00
Dhruwang Jariwala
5aa1427e64 fix: input combobx height (#7256) 2026-02-16 10:03:23 +00:00
10 changed files with 466 additions and 70 deletions

View File

@@ -30,4 +30,4 @@ export const rateLimitConfigs = {
upload: { interval: 60, allowedPerInterval: 5, namespace: "storage:upload" }, // 5 per minute
delete: { interval: 60, allowedPerInterval: 5, namespace: "storage:delete" }, // 5 per minute
},
};
} as const;

View File

@@ -1229,6 +1229,103 @@ describe("segmentFilterToPrismaQuery", () => {
}
});
test("number filter falls back to raw SQL when un-migrated rows exist", async () => {
mockFindFirst.mockResolvedValue({ id: "unmigrated-row-1" });
mockQueryRawUnsafe.mockResolvedValue([{ contactId: "mock-contact-1" }]);
const filters: TBaseFilters = [
{
id: "filter_1",
connector: null,
resource: {
id: "attr_1",
root: {
type: "attribute" as const,
contactAttributeKey: "age",
},
value: 25,
qualifier: {
operator: "greaterThan",
},
},
},
];
const result = await segmentFilterToPrismaQuery(mockSegmentId, filters, mockEnvironmentId);
expect(result.ok).toBe(true);
if (result.ok) {
const filterClause = result.data.whereClause.AND?.[1] as any;
expect(filterClause.AND[0]).toEqual({
OR: [
{
attributes: {
some: {
attributeKey: { key: "age" },
valueNumber: { gt: 25 },
},
},
},
{ id: { in: ["mock-contact-1"] } },
],
});
}
expect(mockFindFirst).toHaveBeenCalledWith({
where: {
attributeKey: {
key: "age",
environmentId: mockEnvironmentId,
},
valueNumber: null,
},
select: { id: true },
});
expect(mockQueryRawUnsafe).toHaveBeenCalled();
const sqlCall = mockQueryRawUnsafe.mock.calls[0];
expect(sqlCall[0]).toContain('cak."environmentId" = $4');
expect(sqlCall[4]).toBe(mockEnvironmentId);
});
test("number filter uses clean Prisma query when backfill is complete", async () => {
const filters: TBaseFilters = [
{
id: "filter_1",
connector: null,
resource: {
id: "attr_1",
root: {
type: "attribute" as const,
contactAttributeKey: "score",
},
value: 100,
qualifier: {
operator: "lessEqual",
},
},
},
];
const result = await segmentFilterToPrismaQuery(mockSegmentId, filters, mockEnvironmentId);
expect(result.ok).toBe(true);
if (result.ok) {
const filterClause = result.data.whereClause.AND?.[1] as any;
expect(filterClause.AND[0]).toEqual({
attributes: {
some: {
attributeKey: { key: "score" },
valueNumber: { lte: 100 },
},
},
});
}
expect(mockFindFirst).toHaveBeenCalled();
expect(mockQueryRawUnsafe).not.toHaveBeenCalled();
});
// ==========================================
// DATE FILTER TESTS
// ==========================================

View File

@@ -37,6 +37,7 @@ vi.mock("@formbricks/database", () => ({
create: vi.fn(),
delete: vi.fn(),
update: vi.fn(),
upsert: vi.fn(),
findFirst: vi.fn(),
},
survey: {
@@ -206,6 +207,73 @@ describe("Segment Service Tests", () => {
vi.mocked(prisma.segment.create).mockRejectedValue(new Error("DB error"));
await expect(createSegment(mockSegmentCreateInput)).rejects.toThrow(Error);
});
test("should upsert a private segment without surveyId", async () => {
const privateInput: TSegmentCreateInput = {
...mockSegmentCreateInput,
isPrivate: true,
};
const privateSegmentPrisma = { ...mockSegmentPrisma, isPrivate: true };
vi.mocked(prisma.segment.upsert).mockResolvedValue(privateSegmentPrisma);
const segment = await createSegment(privateInput);
expect(segment).toEqual({ ...mockSegment, isPrivate: true });
expect(prisma.segment.upsert).toHaveBeenCalledWith({
where: {
environmentId_title: {
environmentId,
title: privateInput.title,
},
},
create: {
environmentId,
title: privateInput.title,
description: undefined,
isPrivate: true,
filters: [],
},
update: {
description: undefined,
filters: [],
},
select: selectSegment,
});
expect(prisma.segment.create).not.toHaveBeenCalled();
});
test("should upsert a private segment with surveyId", async () => {
const privateInputWithSurvey: TSegmentCreateInput = {
...mockSegmentCreateInput,
isPrivate: true,
surveyId,
};
const privateSegmentPrisma = { ...mockSegmentPrisma, isPrivate: true };
vi.mocked(prisma.segment.upsert).mockResolvedValue(privateSegmentPrisma);
const segment = await createSegment(privateInputWithSurvey);
expect(segment).toEqual({ ...mockSegment, isPrivate: true });
expect(prisma.segment.upsert).toHaveBeenCalledWith({
where: {
environmentId_title: {
environmentId,
title: privateInputWithSurvey.title,
},
},
create: {
environmentId,
title: privateInputWithSurvey.title,
description: undefined,
isPrivate: true,
filters: [],
surveys: { connect: { id: surveyId } },
},
update: {
description: undefined,
filters: [],
surveys: { connect: { id: surveyId } },
},
select: selectSegment,
});
expect(prisma.segment.create).not.toHaveBeenCalled();
});
});
describe("cloneSegment", () => {

View File

@@ -136,28 +136,48 @@ export const createSegment = async (segmentCreateInput: TSegmentCreateInput): Pr
const { description, environmentId, filters, isPrivate, surveyId, title } = segmentCreateInput;
let data: Prisma.SegmentCreateArgs["data"] = {
environmentId,
title,
description,
isPrivate,
filters,
};
if (surveyId) {
data = {
...data,
surveys: {
connect: {
id: surveyId,
},
},
};
}
const surveyConnect = surveyId ? { surveys: { connect: { id: surveyId } } } : {};
try {
// Private segments use upsert because auto-save may have already created a
// default (empty-filter) segment via connectOrCreate before the user publishes.
// Without upsert the second create hits the (environmentId, title) unique constraint.
if (isPrivate) {
const segment = await prisma.segment.upsert({
where: {
environmentId_title: {
environmentId,
title,
},
},
create: {
environmentId,
title,
description,
isPrivate,
filters,
...surveyConnect,
},
update: {
description,
filters,
...surveyConnect,
},
select: selectSegment,
});
return transformPrismaSegment(segment);
}
const segment = await prisma.segment.create({
data,
data: {
environmentId,
title,
description,
isPrivate,
filters,
...surveyConnect,
},
select: selectSegment,
});

View File

@@ -40,7 +40,10 @@ export const SurveyInline = (props: Omit<SurveyContainerProps, "containerId">) =
isLoadingScript = true;
try {
const scriptUrl = props.appUrl ? `${props.appUrl}/js/surveys.umd.cjs` : "/js/surveys.umd.cjs";
const response = await fetch(scriptUrl);
const response = await fetch(
scriptUrl,
process.env.NODE_ENV === "development" ? { cache: "no-store" } : {}
);
if (!response.ok) {
throw new Error("Failed to load the surveys package");

View File

@@ -4,12 +4,182 @@ description: "Formbricks Self-hosted version migration"
icon: "arrow-right"
---
## v4.7
Formbricks v4.7 introduces **typed contact attributes** with native `number` and `date` data types. This enables comparison-based segment filters (e.g. "signup date before 2025-01-01") that were previously not possible with string-only attribute values.
### What Happens Automatically
When Formbricks v4.7 starts for the first time, the data migration will:
1. Analyze all existing contact attribute keys and infer their data types (`text`, `number`, or `date`) based on the stored values
2. Update the `ContactAttributeKey` table with the detected `dataType` for each key
3. **If your instance has fewer than 1,000,000 contact attribute rows**: backfill the new `valueNumber` and `valueDate` columns inline. No manual action is needed.
4. **If your instance has 1,000,000 or more contact attribute rows**: the value backfill is skipped to avoid hitting the migration timeout. You will need to run a standalone backfill script after the upgrade.
<Info>
Most self-hosted instances have far fewer than 1,000,000 contact attribute rows (a typical setup with 100K
contacts and 5-10 attributes each lands around 500K-1M rows). If you are below the threshold, the migration
handles everything automatically and you can skip the manual backfill step below.
</Info>
### Steps to Migrate
**1. Backup your Database**
<Tabs>
<Tab title="Docker">
Before running these steps, navigate to the `formbricks` directory where your `docker-compose.yml` file is located.
```bash
docker exec formbricks-postgres-1 pg_dump -Fc -U postgres -d formbricks > formbricks_pre_v4.7_$(date +%Y%m%d_%H%M%S).dump
```
<Info>
If you run into "**No such container**", use `docker ps` to find your container name, e.g.
`formbricks_postgres_1`.
</Info>
</Tab>
<Tab title="Kubernetes">
If you are using the **in-cluster PostgreSQL** deployed by the Helm chart:
```bash
kubectl exec -n formbricks formbricks-postgresql-0 -- pg_dump -Fc -U formbricks -d formbricks > formbricks_pre_v4.7_$(date +%Y%m%d_%H%M%S).dump
```
<Info>
If your PostgreSQL pod has a different name, run `kubectl get pods -n formbricks` to find it.
</Info>
If you are using a **managed PostgreSQL** service (e.g. AWS RDS, Cloud SQL), use your provider's backup/snapshot feature or run `pg_dump` directly against the external host.
</Tab>
</Tabs>
**2. Upgrade to Formbricks v4.7**
<Tabs>
<Tab title="Docker">
```bash
# Pull the latest version
docker compose pull
# Stop the current instance
docker compose down
# Start with Formbricks v4.7
docker compose up -d
```
</Tab>
<Tab title="Kubernetes">
```bash
helm upgrade formbricks oci://ghcr.io/formbricks/helm-charts/formbricks \
-n formbricks \
--set deployment.image.tag=v4.7.0
```
<Info>
The Helm chart includes a migration Job that automatically runs Prisma schema migrations as a
PreSync hook before the new pods start. No manual migration step is needed.
</Info>
</Tab>
</Tabs>
**3. Check the Migration Logs**
After Formbricks starts, check the logs to see whether the value backfill was completed or skipped:
<Tabs>
<Tab title="Docker">
```bash
docker compose logs formbricks | grep -i "backfill"
```
</Tab>
<Tab title="Kubernetes">
```bash
# Check the application pod logs
kubectl logs -n formbricks -l app.kubernetes.io/name=formbricks --tail=200 | grep -i "backfill"
```
If the Helm migration Job ran, you can also inspect its logs:
```bash
kubectl logs -n formbricks job/formbricks-migration
```
</Tab>
</Tabs>
If you see a message like `Skipping value backfill (X rows >= 1000000 threshold)`, proceed to step 4. Otherwise, the migration is complete and no further action is needed.
**4. Run the Backfill Script (large datasets only)**
If the migration skipped the value backfill, run the standalone backfill script inside the running Formbricks container:
<Tabs>
<Tab title="Docker">
```bash
docker exec formbricks node packages/database/dist/scripts/backfill-attribute-values.js
```
<Info>Replace `formbricks` with your actual container name if it differs. Use `docker ps` to find it.</Info>
</Tab>
<Tab title="Kubernetes">
```bash
kubectl exec -n formbricks deploy/formbricks -- node packages/database/dist/scripts/backfill-attribute-values.js
```
<Info>
If your Formbricks deployment has a different name, run `kubectl get deploy -n formbricks` to find it.
</Info>
</Tab>
</Tabs>
The script will output progress as it runs:
```
========================================
Attribute Value Backfill Script
========================================
Fetching number-type attribute keys...
Found 12 number-type keys. Backfilling valueNumber...
Number backfill progress: 10/12 keys (48230 rows updated)
Number backfill progress: 12/12 keys (52104 rows updated)
Fetching date-type attribute keys...
Found 5 date-type keys. Backfilling valueDate...
Date backfill progress: 5/5 keys (31200 rows updated)
========================================
Backfill Complete!
========================================
valueNumber rows updated: 52104
valueDate rows updated: 31200
Duration: 42.3s
========================================
```
Key characteristics of the backfill script:
- **Safe to run while Formbricks is live** -- it does not lock the entire table or wrap work in a long transaction
- **Idempotent** -- it only updates rows where the typed columns are still `NULL`, so you can safely run it multiple times
- **Resumable** -- each batch commits independently, so if the process is interrupted you can re-run it and it picks up where it left off
- **No timeout risk** -- unlike the migration, this script runs outside the migration transaction and has no time limit
**5. Verify the Upgrade**
- Access your Formbricks instance at the same URL as before
- If you use contact segments with number or date filters, verify they return the expected results
- Check that existing surveys and response data are intact
---
## v4.0
<Warning>
**Important: Migration Required**
Formbricks 4 introduces additional requirements for self-hosting setups and makes a dedicated Redis cache as well as S3-compatible file storage mandatory.
Formbricks 4 introduces additional requirements for self-hosting setups and makes a dedicated Redis cache as well as S3-compatible file storage mandatory.
</Warning>
Formbricks 4.0 is a **major milestone** that sets up the technical foundation for future iterations and feature improvements. This release focuses on modernizing core infrastructure components to improve reliability, scalability, and enable advanced features going forward.
@@ -17,9 +187,11 @@ Formbricks 4.0 is a **major milestone** that sets up the technical foundation fo
### What's New in Formbricks 4.0
**🚀 New Enterprise Features:**
- **Quotas Management**: Advanced quota controls for enterprise users
**🏗️ Technical Foundation Improvements:**
- **Enhanced File Storage**: Improved file handling with better performance and reliability
- **Improved Caching**: New caching functionality improving speed, extensibility and reliability
- **Database Optimization**: Removal of unused database tables and fields for better performance
@@ -39,7 +211,8 @@ These services are already included in the updated one-click setup for self-host
We know this represents more moving parts in your infrastructure and might even introduce more complexity in hosting Formbricks, and we don't take this decision lightly. As Formbricks grows into a comprehensive Survey and Experience Management platform, we've reached a point where the simple, single-service approach was holding back our ability to deliver the reliable, feature-rich product our users demand and deserve.
By moving to dedicated, professional-grade services for these critical functions, we're building the foundation needed to deliver:
- **Enterprise-grade reliability** with proper redundancy and backup capabilities
- **Enterprise-grade reliability** with proper redundancy and backup capabilities
- **Advanced features** that require sophisticated caching and file processing
- **Better performance** through optimized, dedicated services
- **Future scalability** to support larger deployments and more complex use cases without the need to maintain two different approaches
@@ -52,7 +225,7 @@ Additional migration steps are needed if you are using a self-hosted Formbricks
### One-Click Setup
For users using our official one-click setup, we provide an automated migration using a migration script:
For users using our official one-click setup, we provide an automated migration using a migration script:
```bash
# Download the latest script
@@ -67,11 +240,11 @@ chmod +x migrate-to-v4.sh
```
This script guides you through the steps for the infrastructure migration and does the following:
- Adds a Redis service to your setup and configures it
- Adds a MinIO service (open source S3-alternative) to your setup, configures it and migrates local files to it
- Pulls the latest Formbricks image and updates your instance
### Manual Setup
If you use a different setup to host your Formbricks instance, you need to make sure to make the necessary adjustments to run Formbricks 4.0.
@@ -87,6 +260,7 @@ You need to configure the `REDIS_URL` environment variable and point it to your
To use file storage (e.g., file upload questions, image choice questions, custom survey backgrounds, etc.), you need to have S3-compatible file storage set up and connected to Formbricks.
Formbricks supports multiple storage providers (among many other S3-compatible storages):
- AWS S3
- Digital Ocean Spaces
- Hetzner Object Storage
@@ -101,6 +275,7 @@ Please make sure to set up a storage bucket with one of these solutions and then
S3_BUCKET_NAME: formbricks-uploads
S3_ENDPOINT_URL: http://minio:9000 # not needed for AWS S3
```
#### Upgrade Process
**1. Backup your Database**
@@ -112,8 +287,8 @@ docker exec formbricks-postgres-1 pg_dump -Fc -U postgres -d formbricks > formbr
```
<Info>
If you run into "**No such container**", use `docker ps` to find your container name,
e.g. `formbricks_postgres_1`.
If you run into "**No such container**", use `docker ps` to find your container name, e.g.
`formbricks_postgres_1`.
</Info>
**2. Upgrade to Formbricks 4.0**
@@ -134,6 +309,7 @@ docker compose up -d
**3. Automatic Database Migration**
When you start Formbricks 4.0 for the first time, it will **automatically**:
- Detect and apply required database schema updates
- Remove unused database tables and fields
- Optimize the database structure for better performance

View File

@@ -1,41 +1,94 @@
---
title: "Rate Limiting"
description: "Rate limiting for Formbricks"
description: "Current request rate limits in Formbricks"
icon: "timer"
---
To protect the platform from abuse and ensure fair usage, rate limiting is enforced by default on an IP-address basis. If a client exceeds the allowed number of requests within the specified time window, the API will return a `429 Too Many Requests` status code.
Formbricks applies request rate limits to protect against abuse and keep API usage fair.
## Default Rate Limits
Rate limits are scoped by identifier, depending on the endpoint:
The following rate limits apply to various endpoints:
- IP hash (for unauthenticated/client-side routes and public actions)
- API key ID (for authenticated API calls)
- User ID (for authenticated session-based calls and server actions)
- Organization ID (for follow-up email dispatch)
| **Endpoint** | **Rate Limit** | **Time Window** |
| ----------------------- | -------------- | --------------- |
| `POST /login` | 30 requests | 15 minutes |
| `POST /signup` | 30 requests | 60 minutes |
| `POST /verify-email` | 10 requests | 60 minutes |
| `POST /forgot-password` | 5 requests | 60 minutes |
| `GET /client-side-api` | 100 requests | 1 minute |
| `POST /share` | 100 requests | 60 minutes |
When a limit is exceeded, the API returns `429 Too Many Requests`.
If a request exceeds the defined rate limit, the server will respond with:
## Management API Rate Limits
These are the current limits for Management APIs:
| **Route Group** | **Limit** | **Window** | **Identifier** |
| --- | --- | --- | --- |
| `/api/v1/management/*` (except `/api/v1/management/storage`), `/api/v1/webhooks/*`, `/api/v1/integrations/*`, `/api/v1/management/me` | 100 requests | 1 minute | API key ID or session user ID |
| `/api/v2/management/*` (and other v2 authenticated routes that use `authenticatedApiClient`) | 100 requests | 1 minute | API key ID |
| `POST /api/v1/management/storage` | 5 requests | 1 minute | API key ID or session user ID |
## All Enforced Limits
| **Config** | **Limit** | **Window** | **Identifier** | **Used For** |
| --- | --- | --- | --- | --- |
| `auth.login` | 10 requests | 15 minutes | IP hash | Email/password login flow (`/api/auth/callback/credentials`) |
| `auth.signup` | 30 requests | 60 minutes | IP hash | Signup server action |
| `auth.forgotPassword` | 5 requests | 60 minutes | IP hash | Forgot password server action |
| `auth.verifyEmail` | 10 requests | 60 minutes | IP hash | Email verification callback + resend verification action |
| `api.v1` | 100 requests | 1 minute | API key ID or session user ID | v1 management, webhooks, integrations, and `/api/v1/management/me` |
| `api.v2` | 100 requests | 1 minute | API key ID | v2 authenticated API wrapper (`authenticatedApiClient`) |
| `api.client` | 100 requests | 1 minute | IP hash | v1 client API routes (except `/api/v1/client/og` and storage upload override), plus v2 routes that re-use those v1 handlers |
| `storage.upload` | 5 requests | 1 minute | IP hash or authenticated ID | Client storage upload and management storage upload |
| `storage.delete` | 5 requests | 1 minute | API key ID or session user ID | `DELETE /storage/[environmentId]/[accessType]/[fileName]` |
| `actions.emailUpdate` | 3 requests | 60 minutes | User ID | Profile email update action |
| `actions.surveyFollowUp` | 50 requests | 60 minutes | Organization ID | Survey follow-up email processing |
| `actions.sendLinkSurveyEmail` | 10 requests | 60 minutes | IP hash | Link survey email send action |
| `actions.licenseRecheck` | 5 requests | 1 minute | User ID | Enterprise license recheck action |
## Current Endpoint Exceptions
The following routes are currently not rate-limited by the server-side limiter:
- `GET /api/v1/client/og` (explicitly excluded)
- `POST /api/v2/client/[environmentId]/responses`
- `POST /api/v2/client/[environmentId]/displays`
- `GET /api/v2/health`
## 429 Response Shape
v1-style endpoints return:
```json
{
"code": 429,
"error": "Too many requests, Please try after a while!"
"code": "too_many_requests",
"message": "Maximum number of requests reached. Please try again later.",
"details": {}
}
```
v2-style endpoints return:
```json
{
"error": {
"code": 429,
"message": "Too Many Requests"
}
}
```
## Disabling Rate Limiting
For self-hosters, rate limiting can be disabled if necessary. However, we **strongly recommend keeping rate limiting enabled in production environments** to prevent abuse.
For self-hosters, rate limiting can be disabled if necessary. We strongly recommend keeping it enabled in production.
To disable rate limiting, set the following environment variable:
Set:
```bash
RATE_LIMITING_DISABLED=1
```
After making this change, restart your server to apply the new setting.
After changing this value, restart the server.
## Operational Notes
- Redis/Valkey is required for robust rate limiting (`REDIS_URL`).
- If Redis is unavailable at runtime, rate-limiter checks currently fail open (requests are allowed through without enforcement).
- Authentication failure audit logging uses a separate throttle (`shouldLogAuthFailure()`) and is intentionally **fail-closed**: when Redis is unavailable or errors occur, audit log entries are **skipped entirely** rather than written without throttle control. This prevents spam while preserving the hash-integrity chain required for compliance. In other words, if Redis is down, no authentication-failure audit logs will be recorded—requests themselves are still allowed (fail-open rate limiting above), but the audit trail for those failures will not be written.

View File

@@ -16,8 +16,6 @@ The Churn Survey is among the most effective ways to identify weaknesses in your
* Follow-up to prevent bad reviews
* Coming soon: Make survey mandatory
## Overview
To run the Churn Survey in your app you want to proceed as follows:
@@ -80,13 +78,6 @@ Whenever a user visits this page, matches the filter conditions above and the re
Here is our complete [Actions manual](/xm-and-surveys/surveys/website-app-surveys/actions/) covering [No-Code](/xm-and-surveys/surveys/website-app-surveys/actions#setting-up-no-code-actions) and [Code](/xm-and-surveys/surveys/website-app-surveys/actions#setting-up-code-actions) Actions.
<Note>
Pre-churn flow coming soon Were currently building full-screen survey
pop-ups. Youll be able to prevent users from closing the survey unless they
respond to it. Its certainly debatable if you want that but you could force
them to click through the survey before letting them cancel 🤷
</Note>
### 5. Select Action in the “When to ask” card
![Select feedback button action](/images/xm-and-surveys/xm/best-practices/cancel-subscription/select-action.webp)

View File

@@ -46,13 +46,7 @@ _Want to change the button color? Adjust it in the project settings!_
Save, and move over to the **Audience** tab.
### 3. Pre-segment your audience (coming soon)
<Note>
### Filter by Attribute Coming Soon
We're working on pre-segmenting users by attributes. This manual will be updated in the coming days.
</Note>
### 3. Pre-segment your audience
Pre-segmentation isn't needed for this survey since you likely want to target all users who cancel their trial. You can use a specific user action, like clicking **Cancel Trial**, to show the survey only to users trying your product.
@@ -62,13 +56,13 @@ How you trigger your survey depends on your product. There are two options:
- **Trigger by Page view:** If you have a page like `/trial-cancelled` for users who cancel their trial subscription, create a user action with the type "Page View." Select "Limit to specific pages" and apply URL filters with these settings:
![Change text content](/images/xm-and-surveys/xm/best-practices/improve-trial-cr/action-pageurl.webp)
![Add page URL action](/images/xm-and-surveys/xm/best-practices/improve-trial-cr/action-pageurl.webp)
Whenever a user visits this page, the survey will be displayed ✅
- **Trigger by Button Click:** In a different case, you have a “Cancel Trial" button in your app. You can setup a user Action with the `Inner Text`:
![Change text content](/images/xm-and-surveys/xm/best-practices/improve-trial-cr/action-innertext.webp)
![Add inner text action](/images/xm-and-surveys/xm/best-practices/improve-trial-cr/action-innertext.webp)
Please have a look at our complete [Actions manual](/xm-and-surveys/surveys/website-app-surveys/actions) if you have questions.

View File

@@ -54,13 +54,7 @@ In the button settings you have to make sure it is set to “External URL”. In
Save, and move over to the “Audience” tab.
### 3. Pre-segment your audience (coming soon)
<Note>
## Filter by attribute coming soon. We're working on pre-segmenting users by
attributes. We will update this manual in the next few days.
</Note>
### 3. Pre-segment your audience
Once you clicked over to the “Audience” tab you can change the settings. In the **Who To Send** card, select “Filter audience by attribute”. This allows you to only show the prompt to a specific segment of your user base.