mirror of
https://github.com/formbricks/formbricks.git
synced 2026-02-13 11:18:46 -06:00
Compare commits
110 Commits
4.5.0-rc.2
...
4.7.0-rc.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08ac490512 | ||
|
|
4538c7bbcb | ||
|
|
7495c04048 | ||
|
|
85a1318f77 | ||
|
|
22ae0a731e | ||
|
|
f7e8bc1630 | ||
|
|
36f091bc73 | ||
|
|
091b78d1e3 | ||
|
|
18a7b233f0 | ||
|
|
b52627b3e9 | ||
|
|
73e8e2f899 | ||
|
|
fb0ef2fa82 | ||
|
|
8ab8adc3d0 | ||
|
|
fad55e3486 | ||
|
|
a5c92bbc7b | ||
|
|
48eff5b547 | ||
|
|
ff10ca7d6a | ||
|
|
04c2b030f1 | ||
|
|
256b223925 | ||
|
|
f3ff4c9951 | ||
|
|
2a590ef315 | ||
|
|
07a6cd6c0e | ||
|
|
335da2f1f5 | ||
|
|
13b9db915b | ||
|
|
76b25476b3 | ||
|
|
04220902b4 | ||
|
|
4649a2de3e | ||
|
|
56ce05fb94 | ||
|
|
1b81e68106 | ||
|
|
202958cac2 | ||
|
|
8e901fb3c9 | ||
|
|
29afb3e4e9 | ||
|
|
38a3b31761 | ||
|
|
2bfb79d999 | ||
|
|
7971b9b312 | ||
|
|
1143f58ba5 | ||
|
|
47fe3c73dd | ||
|
|
727e586b16 | ||
|
|
4a9b4d52ca | ||
|
|
cbb0166419 | ||
|
|
4b0c518683 | ||
|
|
5f05f8d36b | ||
|
|
f7558a7497 | ||
|
|
009beba866 | ||
|
|
c3ec5ddc3a | ||
|
|
9573ae19e6 | ||
|
|
7b3f841c5e | ||
|
|
8f7d225d6a | ||
|
|
094b6dedba | ||
|
|
36f0be07c4 | ||
|
|
e079055a43 | ||
|
|
9ae9a3a9fc | ||
|
|
b4606c0113 | ||
|
|
6be654ab60 | ||
|
|
95c2e24416 | ||
|
|
5b86dd3a8f | ||
|
|
0da083a214 | ||
|
|
379a86cf46 | ||
|
|
bed78716f0 | ||
|
|
6167c3d9e6 | ||
|
|
1db1271e7f | ||
|
|
9ec1964106 | ||
|
|
d5a70796dd | ||
|
|
246351b3e6 | ||
|
|
22ea7302bb | ||
|
|
8d47ab9709 | ||
|
|
8f6d27c1ef | ||
|
|
a37815b831 | ||
|
|
2b526a87ca | ||
|
|
047750967c | ||
|
|
a54356c3b0 | ||
|
|
38ea5ed6ae | ||
|
|
6e19de32f7 | ||
|
|
957a4432f4 | ||
|
|
22a5d4bb7d | ||
|
|
226dff0344 | ||
|
|
d474a94a21 | ||
|
|
c1a4cc308b | ||
|
|
210da98b69 | ||
|
|
2fc183d384 | ||
|
|
78fb111610 | ||
|
|
11c0cb4b61 | ||
|
|
95831f7c7f | ||
|
|
a31e7bfaa5 | ||
|
|
6e35fc1769 | ||
|
|
48cded1646 | ||
|
|
db752cee15 | ||
|
|
b33aae0a73 | ||
|
|
72126ad736 | ||
|
|
4a2eeac90b | ||
|
|
46be3e7d70 | ||
|
|
6d140532a7 | ||
|
|
8c4a7f1518 | ||
|
|
63fe32a786 | ||
|
|
84c465f974 | ||
|
|
6a33498737 | ||
|
|
5130c747d4 | ||
|
|
f5583d2652 | ||
|
|
e0d75914a4 | ||
|
|
f02ca1cfe1 | ||
|
|
4ade83f189 | ||
|
|
f1fc9fea2c | ||
|
|
25266e4566 | ||
|
|
b960cfd2a1 | ||
|
|
9e1d1c1dc2 | ||
|
|
8c63a9f7af | ||
|
|
fff0a7f052 | ||
|
|
0ecc8aabff | ||
|
|
01cc0ab64d | ||
|
|
1d125bdac2 |
@@ -1,61 +0,0 @@
|
||||
---
|
||||
description:
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
# Build & Deployment Best Practices
|
||||
|
||||
## Build Process
|
||||
|
||||
### Running Builds
|
||||
- Use `pnpm build` from project root for full build
|
||||
- Monitor for React hooks warnings and fix them immediately
|
||||
- Ensure all TypeScript errors are resolved before deployment
|
||||
|
||||
### Common Build Issues & Fixes
|
||||
|
||||
#### React Hooks Warnings
|
||||
- Capture ref values in variables within useEffect cleanup
|
||||
- Avoid accessing `.current` directly in cleanup functions
|
||||
- Pattern for fixing ref cleanup warnings:
|
||||
```typescript
|
||||
useEffect(() => {
|
||||
const currentRef = myRef.current;
|
||||
return () => {
|
||||
if (currentRef) {
|
||||
currentRef.cleanup();
|
||||
}
|
||||
};
|
||||
}, []);
|
||||
```
|
||||
|
||||
#### Test Failures During Build
|
||||
- Ensure all test mocks include required constants like `SESSION_MAX_AGE`
|
||||
- Mock Next.js navigation hooks properly: `useParams`, `useRouter`, `useSearchParams`
|
||||
- Remove unused imports and constants from test files
|
||||
- Use literal values instead of imported constants when the constant isn't actually needed
|
||||
|
||||
### Test Execution
|
||||
- Run `pnpm test` to execute all tests
|
||||
- Use `pnpm test -- --run filename.test.tsx` for specific test files
|
||||
- Fix test failures before merging code
|
||||
- Ensure 100% test coverage for new components
|
||||
|
||||
### Performance Monitoring
|
||||
- Monitor build times and optimize if necessary
|
||||
- Watch for memory usage during builds
|
||||
- Use proper caching strategies for faster rebuilds
|
||||
|
||||
### Deployment Checklist
|
||||
1. All tests passing
|
||||
2. Build completes without warnings
|
||||
3. TypeScript compilation successful
|
||||
4. No linter errors
|
||||
5. Database migrations applied (if any)
|
||||
6. Environment variables configured
|
||||
|
||||
### EKS Deployment Considerations
|
||||
- Ensure latest code is deployed to all pods
|
||||
- Monitor AWS RDS Performance Insights for database issues
|
||||
- Verify environment-specific configurations
|
||||
- Check pod health and resource usage
|
||||
@@ -1,415 +0,0 @@
|
||||
---
|
||||
description: Caching rules for performance improvements
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
# Cache Optimization Patterns for Formbricks
|
||||
|
||||
## Cache Strategy Overview
|
||||
|
||||
Formbricks uses a **hybrid caching approach** optimized for enterprise scale:
|
||||
|
||||
- **Redis** for persistent cross-request caching
|
||||
- **React `cache()`** for request-level deduplication
|
||||
- **NO Next.js `unstable_cache()`** - avoid for reliability
|
||||
|
||||
## Key Files
|
||||
|
||||
### Core Cache Infrastructure
|
||||
- [packages/cache/src/service.ts](mdc:packages/cache/src/service.ts) - Redis cache service
|
||||
- [packages/cache/src/client.ts](mdc:packages/cache/src/client.ts) - Cache client initialization and singleton management
|
||||
- [apps/web/lib/cache/index.ts](mdc:apps/web/lib/cache/index.ts) - Cache service proxy for web app
|
||||
- [packages/cache/src/index.ts](mdc:packages/cache/src/index.ts) - Cache package exports and utilities
|
||||
|
||||
### Environment State Caching (Critical Endpoint)
|
||||
- [apps/web/app/api/v1/client/[environmentId]/environment/route.ts](mdc:apps/web/app/api/v1/client/[environmentId]/environment/route.ts) - Main endpoint serving hundreds of thousands of SDK clients
|
||||
- [apps/web/app/api/v1/client/[environmentId]/environment/lib/data.ts](mdc:apps/web/app/api/v1/client/[environmentId]/environment/lib/data.ts) - Optimized data layer with caching
|
||||
|
||||
## Enterprise-Grade Cache Key Patterns
|
||||
|
||||
**Always use** the `createCacheKey` utilities from the cache package:
|
||||
|
||||
```typescript
|
||||
// ✅ Correct patterns
|
||||
createCacheKey.environment.state(environmentId) // "fb:env:abc123:state"
|
||||
createCacheKey.organization.billing(organizationId) // "fb:org:xyz789:billing"
|
||||
createCacheKey.license.status(organizationId) // "fb:license:org123:status"
|
||||
createCacheKey.user.permissions(userId, orgId) // "fb:user:456:org:123:permissions"
|
||||
|
||||
// ❌ Never use flat keys - collision-prone
|
||||
"environment_abc123"
|
||||
"user_data_456"
|
||||
```
|
||||
|
||||
## When to Use Each Cache Type
|
||||
|
||||
### Use React `cache()` for Request Deduplication
|
||||
```typescript
|
||||
// ✅ Prevents multiple calls within same request
|
||||
export const getEnterpriseLicense = reactCache(async () => {
|
||||
// Complex license validation logic
|
||||
});
|
||||
```
|
||||
|
||||
### Use `cache.withCache()` for Simple Database Queries
|
||||
```typescript
|
||||
// ✅ Simple caching with automatic fallback (TTL in milliseconds)
|
||||
export const getActionClasses = (environmentId: string) => {
|
||||
return cache.withCache(() => fetchActionClassesFromDB(environmentId),
|
||||
createCacheKey.environment.actionClasses(environmentId),
|
||||
60 * 30 * 1000 // 30 minutes in milliseconds
|
||||
);
|
||||
};
|
||||
```
|
||||
|
||||
### Use Explicit Redis Cache for Complex Business Logic
|
||||
```typescript
|
||||
// ✅ Full control for high-stakes endpoints
|
||||
export const getEnvironmentState = async (environmentId: string) => {
|
||||
const cached = await environmentStateCache.getEnvironmentState(environmentId);
|
||||
if (cached) return cached;
|
||||
|
||||
const fresh = await buildComplexState(environmentId);
|
||||
await environmentStateCache.setEnvironmentState(environmentId, fresh);
|
||||
return fresh;
|
||||
};
|
||||
```
|
||||
|
||||
## Caching Decision Framework
|
||||
|
||||
### When TO Add Caching
|
||||
|
||||
```typescript
|
||||
// ✅ Expensive operations that benefit from caching
|
||||
- Database queries (>10ms typical)
|
||||
- External API calls (>50ms typical)
|
||||
- Complex computations (>5ms)
|
||||
- File system operations
|
||||
- Heavy data transformations
|
||||
|
||||
// Example: Database query with complex joins (TTL in milliseconds)
|
||||
export const getEnvironmentWithDetails = withCache(
|
||||
async (environmentId: string) => {
|
||||
return prisma.environment.findUnique({
|
||||
where: { id: environmentId },
|
||||
include: { /* complex joins */ }
|
||||
});
|
||||
},
|
||||
{ key: createCacheKey.environment.details(environmentId), ttl: 60 * 30 * 1000 } // 30 minutes
|
||||
)();
|
||||
```
|
||||
|
||||
### When NOT to Add Caching
|
||||
|
||||
```typescript
|
||||
// ❌ Don't cache these operations - minimal overhead
|
||||
- Simple property access (<0.1ms)
|
||||
- Basic transformations (<1ms)
|
||||
- Functions that just call already-cached functions
|
||||
- Pure computation without I/O
|
||||
|
||||
// ❌ Bad example: Redundant caching
|
||||
const getCachedLicenseFeatures = withCache(
|
||||
async () => {
|
||||
const license = await getEnterpriseLicense(); // Already cached!
|
||||
return license.active ? license.features : null; // Just property access
|
||||
},
|
||||
{ key: "license-features", ttl: 1800 * 1000 } // 30 minutes in milliseconds
|
||||
);
|
||||
|
||||
// ✅ Good example: Simple and efficient
|
||||
const getLicenseFeatures = async () => {
|
||||
const license = await getEnterpriseLicense(); // Already cached
|
||||
return license.active ? license.features : null; // 0.1ms overhead
|
||||
};
|
||||
```
|
||||
|
||||
### Computational Overhead Analysis
|
||||
|
||||
Before adding caching, analyze the overhead:
|
||||
|
||||
```typescript
|
||||
// ✅ High overhead - CACHE IT
|
||||
- Database queries: ~10-100ms
|
||||
- External APIs: ~50-500ms
|
||||
- File I/O: ~5-50ms
|
||||
- Complex algorithms: >5ms
|
||||
|
||||
// ❌ Low overhead - DON'T CACHE
|
||||
- Property access: ~0.001ms
|
||||
- Simple lookups: ~0.1ms
|
||||
- Basic validation: ~1ms
|
||||
- Type checks: ~0.01ms
|
||||
|
||||
// Example decision tree:
|
||||
const expensiveOperation = async () => {
|
||||
return prisma.query(); // 50ms - CACHE IT
|
||||
};
|
||||
|
||||
const cheapOperation = (data: any) => {
|
||||
return data.property; // 0.001ms - DON'T CACHE
|
||||
};
|
||||
```
|
||||
|
||||
### Avoid Cache Wrapper Anti-Pattern
|
||||
|
||||
```typescript
|
||||
// ❌ Don't create wrapper functions just for caching
|
||||
const getCachedUserPermissions = withCache(
|
||||
async (userId: string) => getUserPermissions(userId),
|
||||
{ key: createCacheKey.user.permissions(userId), ttl: 3600 * 1000 } // 1 hour in milliseconds
|
||||
);
|
||||
|
||||
// ✅ Add caching directly to the original function
|
||||
export const getUserPermissions = withCache(
|
||||
async (userId: string) => {
|
||||
return prisma.user.findUnique({
|
||||
where: { id: userId },
|
||||
include: { permissions: true }
|
||||
});
|
||||
},
|
||||
{ key: createCacheKey.user.permissions(userId), ttl: 3600 * 1000 } // 1 hour in milliseconds
|
||||
);
|
||||
```
|
||||
|
||||
## TTL Coordination Strategy
|
||||
|
||||
### Multi-Layer Cache Coordination
|
||||
For endpoints serving client SDKs, coordinate TTLs across layers:
|
||||
|
||||
```typescript
|
||||
// Client SDK cache (expiresAt) - longest TTL for fewer requests
|
||||
const CLIENT_TTL = 60; // 1 minute (seconds for client)
|
||||
|
||||
// Server Redis cache - shorter TTL ensures fresh data for clients
|
||||
const SERVER_TTL = 60 * 1000; // 1 minutes in milliseconds
|
||||
|
||||
// HTTP cache headers (seconds)
|
||||
const BROWSER_TTL = 60; // 1 minute (max-age)
|
||||
const CDN_TTL = 60; // 1 minute (s-maxage)
|
||||
const CORS_TTL = 60 * 60; // 1 hour (balanced approach)
|
||||
```
|
||||
|
||||
### Standard TTL Guidelines (in milliseconds for cache-manager + Keyv)
|
||||
```typescript
|
||||
// Configuration data - rarely changes
|
||||
const CONFIG_TTL = 60 * 60 * 24 * 1000; // 24 hours
|
||||
|
||||
// User data - moderate frequency
|
||||
const USER_TTL = 60 * 60 * 2 * 1000; // 2 hours
|
||||
|
||||
// Survey data - changes moderately
|
||||
const SURVEY_TTL = 60 * 15 * 1000; // 15 minutes
|
||||
|
||||
// Billing data - expensive to compute
|
||||
const BILLING_TTL = 60 * 30 * 1000; // 30 minutes
|
||||
|
||||
// Action classes - infrequent changes
|
||||
const ACTION_CLASS_TTL = 60 * 30 * 1000; // 30 minutes
|
||||
```
|
||||
|
||||
## High-Frequency Endpoint Optimization
|
||||
|
||||
### Performance Patterns for High-Volume Endpoints
|
||||
|
||||
```typescript
|
||||
// ✅ Optimized high-frequency endpoint pattern
|
||||
export const GET = async (request: NextRequest, props: { params: Promise<{ id: string }> }) => {
|
||||
const params = await props.params;
|
||||
|
||||
try {
|
||||
// Simple validation (avoid Zod for high-frequency)
|
||||
if (!params.id || typeof params.id !== 'string') {
|
||||
return responses.badRequestResponse("ID is required", undefined, true);
|
||||
}
|
||||
|
||||
// Single optimized query with caching
|
||||
const data = await getOptimizedData(params.id);
|
||||
|
||||
return responses.successResponse(
|
||||
{
|
||||
data,
|
||||
expiresAt: new Date(Date.now() + CLIENT_TTL * 1000), // SDK cache duration
|
||||
},
|
||||
true,
|
||||
"public, s-maxage=1800, max-age=3600, stale-while-revalidate=1800, stale-if-error=3600"
|
||||
);
|
||||
} catch (err) {
|
||||
// Simplified error handling for performance
|
||||
if (err instanceof ResourceNotFoundError) {
|
||||
return responses.notFoundResponse(err.resourceType, err.resourceId);
|
||||
}
|
||||
logger.error({ error: err, url: request.url }, "Error in high-frequency endpoint");
|
||||
return responses.internalServerErrorResponse(err.message, true);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Avoid These Performance Anti-Patterns
|
||||
|
||||
```typescript
|
||||
// ❌ Avoid for high-frequency endpoints
|
||||
const inputValidation = ZodSchema.safeParse(input); // Too slow
|
||||
const startTime = Date.now(); logger.debug(...); // Logging overhead
|
||||
const { data, revalidateEnvironment } = await get(); // Complex return types
|
||||
```
|
||||
|
||||
### CORS Optimization
|
||||
```typescript
|
||||
// ✅ Balanced CORS caching (not too aggressive)
|
||||
export const OPTIONS = async (): Promise<Response> => {
|
||||
return responses.successResponse(
|
||||
{},
|
||||
true,
|
||||
"public, s-maxage=3600, max-age=3600" // 1 hour balanced approach
|
||||
);
|
||||
};
|
||||
```
|
||||
|
||||
## Redis Cache Migration from Next.js
|
||||
|
||||
### Avoid Legacy Next.js Patterns
|
||||
```typescript
|
||||
// ❌ Old Next.js unstable_cache pattern (avoid)
|
||||
const getCachedData = unstable_cache(
|
||||
async (id) => fetchData(id),
|
||||
['cache-key'],
|
||||
{ tags: ['environment'], revalidate: 900 }
|
||||
);
|
||||
|
||||
// ❌ Don't use revalidateEnvironment flags with Redis
|
||||
return { data, revalidateEnvironment: true }; // This gets cached incorrectly!
|
||||
|
||||
// ✅ New Redis pattern with withCache (TTL in milliseconds)
|
||||
export const getCachedData = (id: string) =>
|
||||
withCache(
|
||||
() => fetchData(id),
|
||||
{
|
||||
key: createCacheKey.environment.data(id),
|
||||
ttl: 60 * 15 * 1000, // 15 minutes in milliseconds
|
||||
}
|
||||
)();
|
||||
```
|
||||
|
||||
### Remove Revalidation Logic
|
||||
When migrating from Next.js `unstable_cache`:
|
||||
- Remove `revalidateEnvironment` or similar flags
|
||||
- Remove tag-based invalidation logic
|
||||
- Use TTL-based expiration instead
|
||||
- Handle one-time updates (like `appSetupCompleted`) directly in cache
|
||||
|
||||
## Data Layer Optimization
|
||||
|
||||
### Single Query Pattern
|
||||
```typescript
|
||||
// ✅ Optimize with single database query
|
||||
export const getOptimizedEnvironmentData = async (environmentId: string) => {
|
||||
return prisma.environment.findUniqueOrThrow({
|
||||
where: { id: environmentId },
|
||||
include: {
|
||||
project: {
|
||||
select: { id: true, recontactDays: true, /* ... */ }
|
||||
},
|
||||
organization: {
|
||||
select: { id: true, billing: true }
|
||||
},
|
||||
surveys: {
|
||||
where: { status: "inProgress" },
|
||||
select: { id: true, name: true, /* ... */ }
|
||||
},
|
||||
actionClasses: {
|
||||
select: { id: true, name: true, /* ... */ }
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// ❌ Avoid multiple separate queries
|
||||
const environment = await getEnvironment(id);
|
||||
const organization = await getOrganization(environment.organizationId);
|
||||
const surveys = await getSurveys(id);
|
||||
const actionClasses = await getActionClasses(id);
|
||||
```
|
||||
|
||||
## Invalidation Best Practices
|
||||
|
||||
**Always use explicit key-based invalidation:**
|
||||
|
||||
```typescript
|
||||
// ✅ Clear and debuggable
|
||||
await invalidateCache(createCacheKey.environment.state(environmentId));
|
||||
await invalidateCache([
|
||||
createCacheKey.environment.surveys(environmentId),
|
||||
createCacheKey.environment.actionClasses(environmentId)
|
||||
]);
|
||||
|
||||
// ❌ Avoid complex tag systems
|
||||
await invalidateByTags(["environment", "survey"]); // Don't do this
|
||||
```
|
||||
|
||||
## Critical Performance Targets
|
||||
|
||||
### High-Frequency Endpoint Goals
|
||||
- **Cache hit ratio**: >85%
|
||||
- **Response time P95**: <200ms
|
||||
- **Database load reduction**: >60%
|
||||
- **HTTP cache duration**: 1hr browser, 30min Cloudflare
|
||||
- **SDK refresh interval**: 1 hour with 30min server cache
|
||||
|
||||
### Performance Monitoring
|
||||
- Use **existing elastic cache analytics** for metrics
|
||||
- Log cache errors and warnings (not debug info)
|
||||
- Track database query reduction
|
||||
- Monitor response times for cached endpoints
|
||||
- **Avoid performance logging** in high-frequency endpoints
|
||||
|
||||
## Error Handling Pattern
|
||||
|
||||
Always provide fallback to fresh data on cache errors:
|
||||
|
||||
```typescript
|
||||
try {
|
||||
const cached = await cache.get(key);
|
||||
if (cached) return cached;
|
||||
|
||||
const fresh = await fetchFresh();
|
||||
await cache.set(key, fresh, ttl); // ttl in milliseconds
|
||||
return fresh;
|
||||
} catch (error) {
|
||||
// ✅ Always fallback to fresh data
|
||||
logger.warn("Cache error, fetching fresh", { key, error });
|
||||
return fetchFresh();
|
||||
}
|
||||
```
|
||||
|
||||
## Common Pitfalls to Avoid
|
||||
|
||||
1. **Never use Next.js `unstable_cache()`** - unreliable in production
|
||||
2. **Don't use revalidation flags with Redis** - they get cached incorrectly
|
||||
3. **Avoid Zod validation** for simple parameters in high-frequency endpoints
|
||||
4. **Don't add performance logging** to high-frequency endpoints
|
||||
5. **Coordinate TTLs** between client and server caches
|
||||
6. **Don't over-engineer** with complex tag systems
|
||||
7. **Avoid caching rapidly changing data** (real-time metrics)
|
||||
8. **Always validate cache keys** to prevent collisions
|
||||
9. **Don't add redundant caching layers** - analyze computational overhead first
|
||||
10. **Avoid cache wrapper functions** - add caching directly to expensive operations
|
||||
11. **Don't cache property access or simple transformations** - overhead is negligible
|
||||
12. **Analyze the full call chain** before adding caching to avoid double-caching
|
||||
13. **Remember TTL is in milliseconds** for cache-manager + Keyv stack (not seconds)
|
||||
|
||||
## Monitoring Strategy
|
||||
|
||||
- Use **existing elastic cache analytics** for metrics
|
||||
- Log cache errors and warnings
|
||||
- Track database query reduction
|
||||
- Monitor response times for cached endpoints
|
||||
- **Don't add custom metrics** that duplicate existing monitoring
|
||||
|
||||
## Important Notes
|
||||
|
||||
### TTL Units
|
||||
- **cache-manager + Keyv**: TTL in **milliseconds**
|
||||
- **Direct Redis commands**: TTL in **seconds** (EXPIRE, SETEX) or **milliseconds** (PEXPIRE, PSETEX)
|
||||
- **HTTP cache headers**: TTL in **seconds** (max-age, s-maxage)
|
||||
- **Client SDK**: TTL in **seconds** (expiresAt calculation)
|
||||
@@ -1,41 +0,0 @@
|
||||
---
|
||||
description:
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
# Database Performance & Prisma Best Practices
|
||||
|
||||
## Critical Performance Rules
|
||||
|
||||
### Response Count Queries
|
||||
- **NEVER** use `skip`/`offset` with `prisma.response.count()` - this causes expensive subqueries with OFFSET
|
||||
- Always use only `where` clauses for count operations: `prisma.response.count({ where: { ... } })`
|
||||
- For pagination, separate count queries from data queries
|
||||
- Reference: [apps/web/lib/response/service.ts](mdc:apps/web/lib/response/service.ts) line 654-686
|
||||
|
||||
### Prisma Query Optimization
|
||||
- Use proper indexes defined in [packages/database/schema.prisma](mdc:packages/database/schema.prisma)
|
||||
- Leverage existing indexes: `@@index([surveyId, createdAt])`, `@@index([createdAt])`
|
||||
- Use cursor-based pagination for large datasets instead of offset-based
|
||||
- Cache frequently accessed data using React Cache and custom cache tags
|
||||
|
||||
### Date Range Filtering
|
||||
- When filtering by `createdAt`, always use indexed queries
|
||||
- Combine with `surveyId` for optimal performance: `{ surveyId, createdAt: { gte: start, lt: end } }`
|
||||
- Avoid complex WHERE clauses that can't utilize indexes
|
||||
|
||||
### Count vs Data Separation
|
||||
- Always separate count queries from data fetching queries
|
||||
- Use `Promise.all()` to run count and data queries in parallel
|
||||
- Example pattern from [apps/web/modules/api/v2/management/responses/lib/response.ts](mdc:apps/web/modules/api/v2/management/responses/lib/response.ts):
|
||||
```typescript
|
||||
const [responses, totalCount] = await Promise.all([
|
||||
prisma.response.findMany(query),
|
||||
prisma.response.count({ where: whereClause }),
|
||||
]);
|
||||
```
|
||||
|
||||
### Monitoring & Debugging
|
||||
- Monitor AWS RDS Performance Insights for problematic queries
|
||||
- Look for queries with OFFSET in count operations - these indicate performance issues
|
||||
- Use proper error handling with `DatabaseError` for Prisma exceptions
|
||||
@@ -1,105 +0,0 @@
|
||||
---
|
||||
description: >
|
||||
globs: schema.prisma
|
||||
alwaysApply: false
|
||||
---
|
||||
# Formbricks Database Schema Reference
|
||||
|
||||
This rule provides a reference to the Formbricks database structure. For the most up-to-date and complete schema definitions, please refer to the schema.prisma file directly.
|
||||
|
||||
## Database Overview
|
||||
|
||||
Formbricks uses PostgreSQL with Prisma ORM. The schema is designed for multi-tenancy with strong data isolation between organizations.
|
||||
|
||||
### Core Hierarchy
|
||||
|
||||
```
|
||||
Organization
|
||||
└── Project
|
||||
└── Environment (production/development)
|
||||
├── Survey
|
||||
├── Contact
|
||||
├── ActionClass
|
||||
└── Integration
|
||||
```
|
||||
|
||||
## Schema Reference
|
||||
|
||||
For the complete and up-to-date database schema, please refer to:
|
||||
|
||||
- Main schema: `packages/database/schema.prisma`
|
||||
- JSON type definitions: `packages/database/json-types.ts`
|
||||
|
||||
The schema.prisma file contains all model definitions, relationships, enums, and field types. The json-types.ts file contains TypeScript type definitions for JSON fields.
|
||||
|
||||
## Data Access Patterns
|
||||
|
||||
### Multi-tenancy
|
||||
|
||||
- All data is scoped by Organization
|
||||
- Environment-level isolation for surveys and contacts
|
||||
- Project-level grouping for related surveys
|
||||
|
||||
### Soft Deletion
|
||||
|
||||
Some models use soft deletion patterns:
|
||||
|
||||
- Check `isActive` fields where present
|
||||
- Use proper filtering in queries
|
||||
|
||||
### Cascading Deletes
|
||||
|
||||
Configured cascade relationships:
|
||||
|
||||
- Organization deletion cascades to all child entities
|
||||
- Survey deletion removes responses, displays, triggers
|
||||
- Contact deletion removes attributes and responses
|
||||
|
||||
## Common Query Patterns
|
||||
|
||||
### Survey with Responses
|
||||
|
||||
```typescript
|
||||
// Include response count and latest responses
|
||||
const survey = await prisma.survey.findUnique({
|
||||
where: { id: surveyId },
|
||||
include: {
|
||||
responses: {
|
||||
take: 10,
|
||||
orderBy: { createdAt: "desc" },
|
||||
},
|
||||
_count: {
|
||||
select: { responses: true },
|
||||
},
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
### Environment Scoping
|
||||
|
||||
```typescript
|
||||
// Always scope by environment
|
||||
const surveys = await prisma.survey.findMany({
|
||||
where: {
|
||||
environmentId: environmentId,
|
||||
// Additional filters...
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
### Contact with Attributes
|
||||
|
||||
```typescript
|
||||
const contact = await prisma.contact.findUnique({
|
||||
where: { id: contactId },
|
||||
include: {
|
||||
attributes: {
|
||||
include: {
|
||||
attributeKey: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
This schema supports Formbricks' core functionality: multi-tenant survey management, user targeting, response collection, and analysis, all while maintaining strict data isolation and security.
|
||||
@@ -1,28 +0,0 @@
|
||||
---
|
||||
description: Guideline for writing end-user facing documentation in the apps/docs folder
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
Follow these instructions and guidelines when asked to write documentation in the apps/docs folder
|
||||
|
||||
Follow this structure to write the title, describtion and pick a matching icon and insert it at the top of the MDX file:
|
||||
|
||||
---
|
||||
|
||||
title: "FEATURE NAME"
|
||||
description: "1 concise sentence to describe WHEN the feature is being used and FOR WHAT BENEFIT."
|
||||
icon: "link"
|
||||
|
||||
---
|
||||
|
||||
- Description: 1 concise sentence to describe WHEN the feature is being used and FOR WHAT BENEFIT.
|
||||
- Make ample use of the Mintlify components you can find here https://mintlify.com/docs/llms.txt - e.g. if docs describe consecutive steps, always use Mintlify Step component.
|
||||
- In all Headlines, only capitalize the current feature and nothing else, to Camel Case.
|
||||
- The page should never start with H1 headline, because it's already part of the template.
|
||||
- Tonality: Keep it concise and to the point. Avoid Jargon where possible.
|
||||
- If a feature is part of the Enterprise Edition, use this note:
|
||||
|
||||
<Note>
|
||||
FEATURE NAME is part of the [Enterprise Edition](/self-hosting/advanced/license)
|
||||
</Note>
|
||||
@@ -1,332 +0,0 @@
|
||||
---
|
||||
description:
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
# Formbricks Architecture & Patterns
|
||||
|
||||
## Monorepo Structure
|
||||
|
||||
### Apps Directory
|
||||
- `apps/web/` - Main Next.js web application
|
||||
- `packages/` - Shared packages and utilities
|
||||
|
||||
### Key Directories in Web App
|
||||
```
|
||||
apps/web/
|
||||
├── app/ # Next.js 13+ app directory
|
||||
│ ├── (app)/ # Main application routes
|
||||
│ ├── (auth)/ # Authentication routes
|
||||
│ ├── api/ # API routes
|
||||
├── components/ # Shared components
|
||||
├── lib/ # Utility functions and services
|
||||
└── modules/ # Feature-specific modules
|
||||
```
|
||||
|
||||
## Routing Patterns
|
||||
|
||||
### App Router Structure
|
||||
The application uses Next.js 13+ app router with route groups:
|
||||
|
||||
```
|
||||
(app)/environments/[environmentId]/
|
||||
├── surveys/[surveyId]/
|
||||
│ ├── (analysis)/ # Analysis views
|
||||
│ │ ├── responses/ # Response management
|
||||
│ │ ├── summary/ # Survey summary
|
||||
│ │ └── hooks/ # Analysis-specific hooks
|
||||
│ ├── edit/ # Survey editing
|
||||
│ └── settings/ # Survey settings
|
||||
```
|
||||
|
||||
### Dynamic Routes
|
||||
- `[environmentId]` - Environment-specific routes
|
||||
- `[surveyId]` - Survey-specific routes
|
||||
|
||||
## Service Layer Pattern
|
||||
|
||||
### Service Organization
|
||||
Services are organized by domain in `apps/web/lib/`:
|
||||
|
||||
```typescript
|
||||
// Example: Response service
|
||||
// apps/web/lib/response/service.ts
|
||||
export const getResponseCountAction = async ({
|
||||
surveyId,
|
||||
filterCriteria,
|
||||
}: {
|
||||
surveyId: string;
|
||||
filterCriteria: any;
|
||||
}) => {
|
||||
// Service implementation
|
||||
};
|
||||
```
|
||||
|
||||
### Action Pattern
|
||||
Server actions follow a consistent pattern:
|
||||
|
||||
```typescript
|
||||
// Action wrapper for service calls
|
||||
export const getResponseCountAction = async (params) => {
|
||||
try {
|
||||
const result = await responseService.getCount(params);
|
||||
return { data: result };
|
||||
} catch (error) {
|
||||
return { error: error.message };
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Context Patterns
|
||||
|
||||
### Provider Structure
|
||||
Context providers follow a consistent pattern:
|
||||
|
||||
```typescript
|
||||
// Provider component
|
||||
export const ResponseFilterProvider = ({ children }: { children: React.ReactNode }) => {
|
||||
const [selectedFilter, setSelectedFilter] = useState(defaultFilter);
|
||||
|
||||
const value = {
|
||||
selectedFilter,
|
||||
setSelectedFilter,
|
||||
// ... other state and methods
|
||||
};
|
||||
|
||||
return (
|
||||
<ResponseFilterContext.Provider value={value}>
|
||||
{children}
|
||||
</ResponseFilterContext.Provider>
|
||||
);
|
||||
};
|
||||
|
||||
// Hook for consuming context
|
||||
export const useResponseFilter = () => {
|
||||
const context = useContext(ResponseFilterContext);
|
||||
if (!context) {
|
||||
throw new Error('useResponseFilter must be used within ResponseFilterProvider');
|
||||
}
|
||||
return context;
|
||||
};
|
||||
```
|
||||
|
||||
### Context Composition
|
||||
Multiple contexts are often composed together:
|
||||
|
||||
```typescript
|
||||
// Layout component with multiple providers
|
||||
export default function AnalysisLayout({ children }: { children: React.ReactNode }) {
|
||||
return (
|
||||
<ResponseFilterProvider>
|
||||
<ResponseCountProvider>
|
||||
{children}
|
||||
</ResponseCountProvider>
|
||||
</ResponseFilterProvider>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Component Patterns
|
||||
|
||||
### Page Components
|
||||
Page components are located in the app directory and follow this pattern:
|
||||
|
||||
```typescript
|
||||
// apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/responses/page.tsx
|
||||
export default function ResponsesPage() {
|
||||
return (
|
||||
<div>
|
||||
<ResponsesTable />
|
||||
<ResponsesPagination />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
### Component Organization
|
||||
- **Pages** - Route components in app directory
|
||||
- **Components** - Reusable UI components
|
||||
- **Modules** - Feature-specific components and logic
|
||||
|
||||
### Shared Components
|
||||
Common components are in `apps/web/components/`:
|
||||
- UI components (buttons, inputs, modals)
|
||||
- Layout components (headers, sidebars)
|
||||
- Data display components (tables, charts)
|
||||
|
||||
## Hook Patterns
|
||||
|
||||
### Custom Hook Structure
|
||||
Custom hooks follow consistent patterns:
|
||||
|
||||
```typescript
|
||||
export const useResponseCount = ({
|
||||
survey,
|
||||
initialCount
|
||||
}: {
|
||||
survey: TSurvey;
|
||||
initialCount?: number;
|
||||
}) => {
|
||||
const [responseCount, setResponseCount] = useState(initialCount ?? 0);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
|
||||
// Hook logic...
|
||||
|
||||
return {
|
||||
responseCount,
|
||||
isLoading,
|
||||
refetch,
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
### Hook Dependencies
|
||||
- Use context hooks for shared state
|
||||
- Implement proper cleanup with AbortController
|
||||
- Optimize dependency arrays to prevent unnecessary re-renders
|
||||
|
||||
## Data Fetching Patterns
|
||||
|
||||
### Server Actions
|
||||
The app uses Next.js server actions for data fetching:
|
||||
|
||||
```typescript
|
||||
// Server action
|
||||
export async function getResponsesAction(params: GetResponsesParams) {
|
||||
const responses = await getResponses(params);
|
||||
return { data: responses };
|
||||
}
|
||||
|
||||
// Client usage
|
||||
const { data } = await getResponsesAction(params);
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
Consistent error handling across the application:
|
||||
|
||||
```typescript
|
||||
try {
|
||||
const result = await apiCall();
|
||||
return { data: result };
|
||||
} catch (error) {
|
||||
console.error("Operation failed:", error);
|
||||
return { error: error.message };
|
||||
}
|
||||
```
|
||||
|
||||
## Type Safety
|
||||
|
||||
### Type Organization
|
||||
Types are organized in packages:
|
||||
- `@formbricks/types` - Shared type definitions
|
||||
- Local types in component/hook files
|
||||
|
||||
### Common Types
|
||||
```typescript
|
||||
import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { TResponse } from "@formbricks/types/responses";
|
||||
import { TEnvironment } from "@formbricks/types/environment";
|
||||
```
|
||||
|
||||
## State Management
|
||||
|
||||
### Local State
|
||||
- Use `useState` for component-specific state
|
||||
- Use `useReducer` for complex state logic
|
||||
- Use refs for mutable values that don't trigger re-renders
|
||||
|
||||
### Global State
|
||||
- React Context for feature-specific shared state
|
||||
- URL state for filters and pagination
|
||||
- Server state through server actions
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Code Splitting
|
||||
- Dynamic imports for heavy components
|
||||
- Route-based code splitting with app router
|
||||
- Lazy loading for non-critical features
|
||||
|
||||
### Caching Strategy
|
||||
- Server-side caching for database queries
|
||||
- Client-side caching with React Query (where applicable)
|
||||
- Static generation for public pages
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Test Organization
|
||||
```
|
||||
component/
|
||||
├── Component.tsx
|
||||
├── Component.test.tsx
|
||||
└── hooks/
|
||||
├── useHook.ts
|
||||
└── useHook.test.tsx
|
||||
```
|
||||
|
||||
### Test Patterns
|
||||
- Unit tests for utilities and services
|
||||
- Integration tests for components with context
|
||||
- Hook tests with proper mocking
|
||||
|
||||
## Build & Deployment
|
||||
|
||||
### Build Process
|
||||
- TypeScript compilation
|
||||
- Next.js build optimization
|
||||
- Asset optimization and bundling
|
||||
|
||||
### Environment Configuration
|
||||
- Environment-specific configurations
|
||||
- Feature flags for gradual rollouts
|
||||
- Database connection management
|
||||
|
||||
## Security Patterns
|
||||
|
||||
### Authentication
|
||||
- Session-based authentication
|
||||
- Environment-based access control
|
||||
- API route protection
|
||||
|
||||
### Data Validation
|
||||
- Input validation on both client and server
|
||||
- Type-safe API contracts
|
||||
- Sanitization of user inputs
|
||||
|
||||
## Monitoring & Observability
|
||||
|
||||
### Error Tracking
|
||||
- Client-side error boundaries
|
||||
- Server-side error logging
|
||||
- Performance monitoring
|
||||
|
||||
### Analytics
|
||||
- User interaction tracking
|
||||
- Performance metrics
|
||||
- Database query monitoring
|
||||
|
||||
## Best Practices Summary
|
||||
|
||||
### Code Organization
|
||||
- ✅ Follow the established directory structure
|
||||
- ✅ Use consistent naming conventions
|
||||
- ✅ Separate concerns (UI, logic, data)
|
||||
- ✅ Keep components focused and small
|
||||
|
||||
### Performance
|
||||
- ✅ Implement proper loading states
|
||||
- ✅ Use AbortController for async operations
|
||||
- ✅ Optimize database queries
|
||||
- ✅ Implement proper caching strategies
|
||||
|
||||
### Type Safety
|
||||
- ✅ Use TypeScript throughout
|
||||
- ✅ Define proper interfaces for props
|
||||
- ✅ Use type guards for runtime validation
|
||||
- ✅ Leverage shared type packages
|
||||
|
||||
### Testing
|
||||
- ✅ Write tests for critical functionality
|
||||
- ✅ Mock external dependencies properly
|
||||
- ✅ Test error scenarios and edge cases
|
||||
- ✅ Maintain good test coverage
|
||||
@@ -1,232 +0,0 @@
|
||||
---
|
||||
description: Security best practices and guidelines for writing GitHub Actions and workflows
|
||||
globs: .github/workflows/*.yml,.github/workflows/*.yaml,.github/actions/*/action.yml,.github/actions/*/action.yaml
|
||||
---
|
||||
|
||||
# GitHub Actions Security Best Practices
|
||||
|
||||
## Required Security Measures
|
||||
|
||||
### 1. Set Minimum GITHUB_TOKEN Permissions
|
||||
|
||||
Always explicitly set the minimum required permissions for GITHUB_TOKEN:
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
contents: read
|
||||
# Only add additional permissions if absolutely necessary:
|
||||
# pull-requests: write # for commenting on PRs
|
||||
# issues: write # for creating/updating issues
|
||||
# checks: write # for publishing check results
|
||||
```
|
||||
|
||||
### 2. Add Harden-Runner as First Step
|
||||
|
||||
For **every job** on `ubuntu-latest`, add Harden-Runner as the first step:
|
||||
|
||||
```yaml
|
||||
- name: Harden the runner
|
||||
uses: step-security/harden-runner@ec9f2d5744a09debf3a187a3f4f675c53b671911 # v2.13.0
|
||||
with:
|
||||
egress-policy: audit # or 'block' for stricter security
|
||||
```
|
||||
|
||||
### 3. Pin Actions to Full Commit SHA
|
||||
|
||||
**Always** pin third-party actions to their full commit SHA, not tags:
|
||||
|
||||
```yaml
|
||||
# ❌ BAD - uses mutable tag
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# ✅ GOOD - pinned to immutable commit SHA
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
```
|
||||
|
||||
### 4. Secure Variable Handling
|
||||
|
||||
Prevent command injection by properly quoting variables:
|
||||
|
||||
```yaml
|
||||
# ❌ BAD - potential command injection
|
||||
run: echo "Processing ${{ inputs.user_input }}"
|
||||
|
||||
# ✅ GOOD - properly quoted
|
||||
env:
|
||||
USER_INPUT: ${{ inputs.user_input }}
|
||||
run: echo "Processing ${USER_INPUT}"
|
||||
```
|
||||
|
||||
Use `${VARIABLE}` syntax in shell scripts instead of `$VARIABLE`.
|
||||
|
||||
### 5. Environment Variables for Secrets
|
||||
|
||||
Store sensitive data in environment variables, not inline:
|
||||
|
||||
```yaml
|
||||
# ❌ BAD
|
||||
run: curl -H "Authorization: Bearer ${{ secrets.TOKEN }}" api.example.com
|
||||
|
||||
# ✅ GOOD
|
||||
env:
|
||||
API_TOKEN: ${{ secrets.TOKEN }}
|
||||
run: curl -H "Authorization: Bearer ${API_TOKEN}" api.example.com
|
||||
```
|
||||
|
||||
## Workflow Structure Best Practices
|
||||
|
||||
### Required Workflow Elements
|
||||
|
||||
```yaml
|
||||
name: "Descriptive Workflow Name"
|
||||
|
||||
on:
|
||||
# Define specific triggers
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
# Always set explicit permissions
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
job-name:
|
||||
name: "Descriptive Job Name"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30 # tune per job; standardize repo-wide
|
||||
|
||||
# Set job-level permissions if different from workflow level
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
# Always start with Harden-Runner on ubuntu-latest
|
||||
- name: Harden the runner
|
||||
uses: step-security/harden-runner@v2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
# Pin all actions to commit SHA
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
```
|
||||
|
||||
### Input Validation for Actions
|
||||
|
||||
For composite actions, always validate inputs:
|
||||
|
||||
```yaml
|
||||
inputs:
|
||||
user_input:
|
||||
description: "User provided input"
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Validate input
|
||||
shell: bash
|
||||
run: |
|
||||
# Harden shell and validate input format/content before use
|
||||
set -euo pipefail
|
||||
|
||||
USER_INPUT="${{ inputs.user_input }}"
|
||||
|
||||
if [[ ! "${USER_INPUT}" =~ ^[A-Za-z0-9._-]+$ ]]; then
|
||||
echo "❌ Invalid input format"
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
## Docker Security in Actions
|
||||
|
||||
### Pin Docker Images to Digests
|
||||
|
||||
```yaml
|
||||
# ❌ BAD - mutable tag
|
||||
container: node:18
|
||||
|
||||
# ✅ GOOD - pinned to digest
|
||||
container: node:18@sha256:a1ba21bf0c92931d02a8416f0a54daad66cb36a85d6a37b82dfe1604c4c09cad
|
||||
```
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Secure File Operations
|
||||
|
||||
```yaml
|
||||
- name: Process files securely
|
||||
shell: bash
|
||||
env:
|
||||
FILE_PATH: ${{ inputs.file_path }}
|
||||
run: |
|
||||
set -euo pipefail # Fail on errors, undefined vars, pipe failures
|
||||
|
||||
# Use absolute paths and validate
|
||||
SAFE_PATH=$(realpath "${FILE_PATH}")
|
||||
if [[ "$SAFE_PATH" != "${GITHUB_WORKSPACE}"/* ]]; then
|
||||
echo "❌ Path outside workspace"
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
### Artifact Handling
|
||||
|
||||
```yaml
|
||||
- name: Upload artifacts securely
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: |
|
||||
dist/
|
||||
!dist/**/*.log # Exclude sensitive files
|
||||
retention-days: 30
|
||||
```
|
||||
|
||||
### GHCR authentication for pulls/scans
|
||||
|
||||
```yaml
|
||||
# Minimal permissions required for GHCR pulls/scans
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
|
||||
steps:
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
```
|
||||
|
||||
## Security Checklist
|
||||
|
||||
- [ ] Minimum GITHUB_TOKEN permissions set
|
||||
- [ ] Harden-Runner added to all ubuntu-latest jobs
|
||||
- [ ] All third-party actions pinned to commit SHA
|
||||
- [ ] Input validation implemented for custom actions
|
||||
- [ ] Variables properly quoted in shell scripts
|
||||
- [ ] Secrets stored in environment variables
|
||||
- [ ] Docker images pinned to digests (if used)
|
||||
- [ ] Error handling with `set -euo pipefail`
|
||||
- [ ] File paths validated and sanitized
|
||||
- [ ] No sensitive data in logs or outputs
|
||||
- [ ] GHCR login performed before pulls/scans (packages: read)
|
||||
- [ ] Job timeouts configured (`timeout-minutes`)
|
||||
|
||||
## Recommended Additional Workflows
|
||||
|
||||
Consider adding these security-focused workflows to your repository:
|
||||
|
||||
1. **CodeQL Analysis** - Static Application Security Testing (SAST)
|
||||
2. **Dependency Review** - Scan for vulnerable dependencies in PRs
|
||||
3. **Dependabot Configuration** - Automated dependency updates
|
||||
|
||||
## Resources
|
||||
|
||||
- [GitHub Security Hardening Guide](https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions)
|
||||
- [Step Security Harden-Runner](https://github.com/step-security/harden-runner)
|
||||
- [Secure-Repo Best Practices](https://github.com/step-security/secure-repo)
|
||||
@@ -1,457 +0,0 @@
|
||||
---
|
||||
title: i18n Management with Lingo.dev
|
||||
description: Guidelines for managing internationalization (i18n) with Lingo.dev, including translation workflow, key validation, and best practices
|
||||
---
|
||||
|
||||
# i18n Management with Lingo.dev
|
||||
|
||||
This rule defines the workflow and best practices for managing internationalization (i18n) in the Formbricks project using Lingo.dev.
|
||||
|
||||
## Overview
|
||||
|
||||
Formbricks uses [Lingo.dev](https://lingo.dev) for managing translations across multiple languages. The translation workflow includes:
|
||||
|
||||
1. **Translation Keys**: Defined in code using the `t()` function from `react-i18next`
|
||||
2. **Translation Files**: JSON files stored in `apps/web/locales/` for each supported language
|
||||
3. **Validation**: Automated scanning to detect missing and unused translation keys
|
||||
4. **CI/CD**: Pre-commit hooks and GitHub Actions to enforce translation quality
|
||||
|
||||
## Translation Workflow
|
||||
|
||||
### 1. Using Translations in Code
|
||||
|
||||
When adding translatable text in the web app, use the `t()` function or `<Trans>` component:
|
||||
|
||||
**Using the `t()` function:**
|
||||
```tsx
|
||||
import { useTranslate } from "@/lib/i18n/translate";
|
||||
|
||||
const MyComponent = () => {
|
||||
const { t } = useTranslate();
|
||||
|
||||
return (
|
||||
<div>
|
||||
<h1>{t("common.welcome")}</h1>
|
||||
<p>{t("pages.dashboard.description")}</p>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
```
|
||||
|
||||
**Using the `<Trans>` component (for text with HTML elements):**
|
||||
```tsx
|
||||
import { Trans } from "react-i18next";
|
||||
|
||||
const MyComponent = () => {
|
||||
return (
|
||||
<div>
|
||||
<p>
|
||||
<Trans
|
||||
i18nKey="auth.terms_agreement"
|
||||
components={{
|
||||
link: <a href="/terms" />,
|
||||
b: <b />
|
||||
}}
|
||||
/>
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
```
|
||||
|
||||
**Key Naming Conventions:**
|
||||
- Use dot notation for nested keys: `section.subsection.key`
|
||||
- Use descriptive names: `auth.login.success_message` not `auth.msg1`
|
||||
- Group related keys together: `auth.*`, `errors.*`, `common.*`
|
||||
- Use lowercase with underscores: `user_profile_settings` not `UserProfileSettings`
|
||||
|
||||
### 2. Translation File Structure
|
||||
|
||||
Translation files are located in `apps/web/locales/` and use the following naming convention:
|
||||
- `en-US.json` (English - United States, default)
|
||||
- `de-DE.json` (German)
|
||||
- `fr-FR.json` (French)
|
||||
- `pt-BR.json` (Portuguese - Brazil)
|
||||
- etc.
|
||||
|
||||
**File Structure:**
|
||||
```json
|
||||
{
|
||||
"common": {
|
||||
"welcome": "Welcome",
|
||||
"save": "Save",
|
||||
"cancel": "Cancel"
|
||||
},
|
||||
"auth": {
|
||||
"login": {
|
||||
"title": "Login",
|
||||
"email_placeholder": "Enter your email",
|
||||
"password_placeholder": "Enter your password"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Adding New Translation Keys
|
||||
|
||||
When adding new translation keys:
|
||||
|
||||
1. **Add the key in your code** using `t("your.new.key")`
|
||||
2. **Add translation for that key in en-US.json file**
|
||||
3. **Run the translation workflow:**
|
||||
```bash
|
||||
pnpm i18n
|
||||
```
|
||||
This will:
|
||||
- Generate translations for all languages using Lingo.dev
|
||||
- Validate that all keys are present and used
|
||||
|
||||
4. **Review and commit** the generated translation files
|
||||
|
||||
### 4. Available Scripts
|
||||
|
||||
```bash
|
||||
# Generate translations using Lingo.dev
|
||||
pnpm generate-translations
|
||||
|
||||
# Scan and validate translation keys
|
||||
pnpm scan-translations
|
||||
|
||||
# Full workflow: generate + validate
|
||||
pnpm i18n
|
||||
|
||||
# Validate only (without generation)
|
||||
pnpm i18n:validate
|
||||
```
|
||||
|
||||
## Translation Key Validation
|
||||
|
||||
### Automated Validation
|
||||
|
||||
The project includes automated validation that runs:
|
||||
- **Pre-commit hook**: Validates translations before allowing commits (when `LINGODOTDEV_API_KEY` is set)
|
||||
- **GitHub Actions**: Validates translations on every PR and push to main
|
||||
|
||||
### Validation Rules
|
||||
|
||||
The validation script (`scan-translations.ts`) checks for:
|
||||
|
||||
1. **Missing Keys**: Translation keys used in code but not present in translation files
|
||||
2. **Unused Keys**: Translation keys present in translation files but not used in code
|
||||
3. **Incomplete Translations**: Keys that exist in the default language (`en-US`) but are missing in target languages
|
||||
|
||||
**What gets scanned:**
|
||||
- All `.ts` and `.tsx` files in `apps/web/`
|
||||
- Both `t()` function calls and `<Trans i18nKey="">` components
|
||||
- All locale files (`de-DE.json`, `fr-FR.json`, `ja-JP.json`, etc.)
|
||||
|
||||
**What gets excluded:**
|
||||
- Test files (`*.test.ts`, `*.test.tsx`, `*.spec.ts`, `*.spec.tsx`)
|
||||
- Build directories (`node_modules`, `dist`, `build`, `.next`, `coverage`)
|
||||
- Locale files themselves (from code scanning)
|
||||
|
||||
**Note:** Test files are excluded because they often use mock or example translation keys for testing purposes that don't need to exist in production translation files.
|
||||
|
||||
### Fixing Validation Errors
|
||||
|
||||
#### Missing Keys
|
||||
|
||||
If you encounter missing key errors:
|
||||
|
||||
```
|
||||
❌ MISSING KEYS (2):
|
||||
|
||||
These keys are used in code but not found in translation files:
|
||||
|
||||
• auth.signup.email_required
|
||||
• settings.profile.update_success
|
||||
```
|
||||
|
||||
**Resolution:**
|
||||
1. Ensure that translations for those keys are present in en-US.json .
|
||||
2. Run `pnpm generate-translations` to have Lingo.dev generate the missing translations
|
||||
3. OR manually add the keys to `apps/web/locales/en-US.json`:
|
||||
```json
|
||||
{
|
||||
"auth": {
|
||||
"signup": {
|
||||
"email_required": "Email is required"
|
||||
}
|
||||
},
|
||||
"settings": {
|
||||
"profile": {
|
||||
"update_success": "Profile updated successfully"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
3. Run `pnpm scan-translations` to verify
|
||||
4. Commit the changes
|
||||
|
||||
#### Unused Keys
|
||||
|
||||
If you encounter unused key errors:
|
||||
|
||||
```
|
||||
⚠️ UNUSED KEYS (1):
|
||||
|
||||
These keys exist in translation files but are not used in code:
|
||||
|
||||
• old.deprecated.key
|
||||
```
|
||||
|
||||
**Resolution:**
|
||||
1. If the key is truly unused, remove it from all translation files
|
||||
2. If the key should be used, add it to your code using `t("old.deprecated.key")`
|
||||
3. Run `pnpm scan-translations` to verify
|
||||
4. Commit the changes
|
||||
|
||||
#### Incomplete Translations
|
||||
|
||||
If you encounter incomplete translation errors:
|
||||
|
||||
```
|
||||
⚠️ INCOMPLETE TRANSLATIONS:
|
||||
|
||||
Some keys from en-US are missing in target languages:
|
||||
|
||||
📝 de-DE (5 missing keys):
|
||||
• auth.new_feature.title
|
||||
• auth.new_feature.description
|
||||
• settings.advanced.option
|
||||
... and 2 more
|
||||
```
|
||||
|
||||
**Resolution:**
|
||||
1. **Recommended:** Run `pnpm generate-translations` to have Lingo.dev automatically translate the missing keys
|
||||
2. **Manual:** Add the missing keys to the target language files:
|
||||
```bash
|
||||
# Copy the structure from en-US.json and translate the values
|
||||
# For example, in de-DE.json:
|
||||
{
|
||||
"auth": {
|
||||
"new_feature": {
|
||||
"title": "Neues Feature",
|
||||
"description": "Beschreibung des neuen Features"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
3. Run `pnpm scan-translations` to verify all translations are complete
|
||||
4. Commit the changes
|
||||
|
||||
## Pre-commit Hook Behavior
|
||||
|
||||
The pre-commit hook will:
|
||||
|
||||
1. Run `lint-staged` for code formatting
|
||||
2. If `LINGODOTDEV_API_KEY` is set:
|
||||
- Generate translations using Lingo.dev
|
||||
- Validate translation keys
|
||||
- Auto-add updated locale files to the commit
|
||||
- **Block the commit** if validation fails
|
||||
3. If `LINGODOTDEV_API_KEY` is not set:
|
||||
- Skip translation validation (for community contributors)
|
||||
- Show a warning message
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### LINGODOTDEV_API_KEY
|
||||
|
||||
This is the API key for Lingo.dev integration.
|
||||
|
||||
**For Core Team:**
|
||||
- Add to your local `.env` file
|
||||
- Required for running translation generation
|
||||
|
||||
**For Community Contributors:**
|
||||
- Not required for local development
|
||||
- Translation validation will be skipped
|
||||
- The CI will still validate translations
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Keep Keys Organized
|
||||
|
||||
Group related keys together:
|
||||
```json
|
||||
{
|
||||
"auth": {
|
||||
"login": { ... },
|
||||
"signup": { ... },
|
||||
"forgot_password": { ... }
|
||||
},
|
||||
"dashboard": {
|
||||
"header": { ... },
|
||||
"sidebar": { ... }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Avoid Hardcoded Strings
|
||||
|
||||
**❌ Bad:**
|
||||
```tsx
|
||||
<button>Click here</button>
|
||||
```
|
||||
|
||||
**✅ Good:**
|
||||
```tsx
|
||||
<button>{t("common.click_here")}</button>
|
||||
```
|
||||
|
||||
### 3. Use Interpolation for Dynamic Content
|
||||
|
||||
**❌ Bad:**
|
||||
```tsx
|
||||
{t("welcome")} {userName}!
|
||||
```
|
||||
|
||||
**✅ Good:**
|
||||
```tsx
|
||||
{t("auth.welcome_message", { userName })}
|
||||
```
|
||||
|
||||
With translation:
|
||||
```json
|
||||
{
|
||||
"auth": {
|
||||
"welcome_message": "Welcome, {userName}!"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Avoid Dynamic Key Construction
|
||||
|
||||
**❌ Bad:**
|
||||
```tsx
|
||||
const key = `errors.${errorCode}`;
|
||||
t(key);
|
||||
```
|
||||
|
||||
**✅ Good:**
|
||||
```tsx
|
||||
switch (errorCode) {
|
||||
case "401":
|
||||
return t("errors.unauthorized");
|
||||
case "404":
|
||||
return t("errors.not_found");
|
||||
default:
|
||||
return t("errors.unknown");
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Test Translation Keys
|
||||
|
||||
When adding new features:
|
||||
1. Add translation keys
|
||||
2. Test in multiple languages using the language switcher
|
||||
3. Ensure text doesn't overflow in longer translations (German, French)
|
||||
4. Run `pnpm scan-translations` before committing
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Issue: Pre-commit hook fails with validation errors
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Run the full i18n workflow
|
||||
pnpm i18n
|
||||
|
||||
# Fix any missing or unused keys
|
||||
# Then commit again
|
||||
git add .
|
||||
git commit -m "your message"
|
||||
```
|
||||
|
||||
### Issue: Translation validation passes locally but fails in CI
|
||||
|
||||
**Solution:**
|
||||
- Ensure all translation files are committed
|
||||
- Check that `scan-translations.ts` hasn't been modified
|
||||
- Verify that locale files are properly formatted JSON
|
||||
|
||||
### Issue: Cannot commit because of missing translations
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# If you have LINGODOTDEV_API_KEY:
|
||||
pnpm generate-translations
|
||||
|
||||
# If you don't have the API key (community contributor):
|
||||
# Manually add the missing keys to en-US.json
|
||||
# Then run validation:
|
||||
pnpm scan-translations
|
||||
```
|
||||
|
||||
### Issue: Getting "unused keys" for keys that are used
|
||||
|
||||
**Solution:**
|
||||
- The script scans `.ts` and `.tsx` files only
|
||||
- If keys are used in other file types, they may be flagged
|
||||
- Verify the key is actually used with `grep -r "your.key" apps/web/`
|
||||
- If it's a false positive, consider updating the scanning patterns in `scan-translations.ts`
|
||||
|
||||
## AI Assistant Guidelines
|
||||
|
||||
When assisting with i18n-related tasks, always:
|
||||
|
||||
1. **Use the `t()` function** for all user-facing text
|
||||
2. **Follow key naming conventions** (lowercase, dots for nesting)
|
||||
3. **Run validation** after making changes: `pnpm scan-translations`
|
||||
4. **Fix missing keys** by adding them to `en-US.json`
|
||||
5. **Remove unused keys** from all translation files
|
||||
6. **Test the pre-commit hook** if making changes to translation workflow
|
||||
7. **Update this rule file** if translation workflow changes
|
||||
|
||||
### Fixing Missing Translation Keys
|
||||
|
||||
When the AI encounters missing translation key errors:
|
||||
|
||||
1. Identify the missing keys from the error output
|
||||
2. Determine the appropriate section and naming for each key
|
||||
3. Add the keys to `apps/web/locales/en-US.json` with meaningful English text
|
||||
4. Ensure proper JSON structure and nesting
|
||||
5. Run `pnpm scan-translations` to verify
|
||||
6. Inform the user that other language files will be updated via Lingo.dev
|
||||
|
||||
**Example:**
|
||||
```typescript
|
||||
// Error: Missing key "settings.api.rate_limit_exceeded"
|
||||
|
||||
// Add to en-US.json:
|
||||
{
|
||||
"settings": {
|
||||
"api": {
|
||||
"rate_limit_exceeded": "API rate limit exceeded. Please try again later."
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Removing Unused Translation Keys
|
||||
|
||||
When the AI encounters unused translation key errors:
|
||||
|
||||
1. Verify the keys are truly unused by searching the codebase
|
||||
2. Remove the keys from `apps/web/locales/en-US.json`
|
||||
3. Note that removal from other language files can be handled via Lingo.dev
|
||||
4. Run `pnpm scan-translations` to verify
|
||||
|
||||
## Migration Notes
|
||||
|
||||
This project previously used Tolgee for translations. As of this migration:
|
||||
|
||||
- **Old scripts**: `tolgee-pull` is deprecated (kept for reference)
|
||||
- **New scripts**: Use `pnpm i18n` or `pnpm generate-translations`
|
||||
- **Old workflows**: `tolgee.yml` and `tolgee-missing-key-check.yml` removed
|
||||
- **New workflow**: `translation-check.yml` handles all validation
|
||||
|
||||
---
|
||||
|
||||
**Last Updated:** October 14, 2025
|
||||
**Related Files:**
|
||||
- `scan-translations.ts` - Translation validation script
|
||||
- `.husky/pre-commit` - Pre-commit hook with i18n validation
|
||||
- `.github/workflows/translation-check.yml` - CI workflow for translation validation
|
||||
- `apps/web/locales/*.json` - Translation files
|
||||
@@ -1,52 +0,0 @@
|
||||
---
|
||||
description:
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
# React Context & Provider Patterns
|
||||
|
||||
## Context Provider Best Practices
|
||||
|
||||
### Provider Implementation
|
||||
- Use TypeScript interfaces for provider props with optional `initialCount` for testing
|
||||
- Implement proper cleanup in `useEffect` to avoid React hooks warnings
|
||||
- Reference: [apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/components/ResponseCountProvider.tsx](mdc:apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/components/ResponseCountProvider.tsx)
|
||||
|
||||
### Cleanup Pattern for Refs
|
||||
```typescript
|
||||
useEffect(() => {
|
||||
const currentPendingRequests = pendingRequests.current;
|
||||
const currentAbortController = abortController.current;
|
||||
|
||||
return () => {
|
||||
if (currentAbortController) {
|
||||
currentAbortController.abort();
|
||||
}
|
||||
currentPendingRequests.clear();
|
||||
};
|
||||
}, []);
|
||||
```
|
||||
|
||||
### Testing Context Providers
|
||||
- Always wrap components using context in the provider during tests
|
||||
- Use `initialCount` prop for predictable test scenarios
|
||||
- Mock context dependencies like `useParams`, `useResponseFilter`
|
||||
- Example from [apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SurveyAnalysisCTA.test.tsx](mdc:apps/web/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/SurveyAnalysisCTA.test.tsx):
|
||||
|
||||
```typescript
|
||||
render(
|
||||
<ResponseCountProvider survey={dummySurvey} initialCount={5}>
|
||||
<ComponentUnderTest />
|
||||
</ResponseCountProvider>
|
||||
);
|
||||
```
|
||||
|
||||
### Required Mocks for Context Testing
|
||||
- Mock `next/navigation` with `useParams` returning environment and survey IDs
|
||||
- Mock response filter context and actions
|
||||
- Mock API actions that the provider depends on
|
||||
|
||||
### Context Hook Usage
|
||||
- Create custom hooks like `useResponseCountContext()` for consuming context
|
||||
- Provide meaningful error messages when context is used outside provider
|
||||
- Use context for shared state that multiple components need to access
|
||||
@@ -1,179 +0,0 @@
|
||||
---
|
||||
description: Apply these quality standards before finalizing code changes to ensure DRY principles, React best practices, TypeScript conventions, and maintainable code.
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Review & Refine
|
||||
|
||||
Before finalizing any code changes, review your implementation against these quality standards:
|
||||
|
||||
## Core Principles
|
||||
|
||||
### DRY (Don't Repeat Yourself)
|
||||
|
||||
- Extract duplicated logic into reusable functions or hooks
|
||||
- If the same code appears in multiple places, consolidate it
|
||||
- Create helper functions at appropriate scope (component-level, module-level, or utility files)
|
||||
- Avoid copy-pasting code blocks
|
||||
|
||||
### Code Reduction
|
||||
|
||||
- Remove unnecessary code, comments, and abstractions
|
||||
- Prefer built-in solutions over custom implementations
|
||||
- Consolidate similar logic
|
||||
- Remove dead code and unused imports
|
||||
- Question if every line of code is truly needed
|
||||
|
||||
## React Best Practices
|
||||
|
||||
### Component Design
|
||||
|
||||
- Keep components focused on a single responsibility
|
||||
- Extract complex logic into custom hooks
|
||||
- Prefer composition over prop drilling
|
||||
- Use children props and render props when appropriate
|
||||
- Keep component files under 300 lines when possible
|
||||
|
||||
### Hooks Usage
|
||||
|
||||
- Follow Rules of Hooks (only call at top level, only in React functions)
|
||||
- Extract complex `useEffect` logic into custom hooks
|
||||
- Use `useMemo` and `useCallback` only when you have a measured performance issue
|
||||
- Declare dependencies arrays correctly - don't ignore exhaustive-deps warnings
|
||||
- Keep `useEffect` focused on a single concern
|
||||
|
||||
### State Management
|
||||
|
||||
- Colocate state as close as possible to where it's used
|
||||
- Lift state only when necessary
|
||||
- Use `useReducer` for complex state logic with multiple sub-values
|
||||
- Avoid derived state - compute values during render instead
|
||||
- Don't store values in state that can be computed from props
|
||||
|
||||
### Event Handlers
|
||||
|
||||
- Name event handlers with `handle` prefix (e.g., `handleClick`, `handleSubmit`)
|
||||
- Extract complex event handler logic into separate functions
|
||||
- Avoid inline arrow functions in JSX when they contain complex logic
|
||||
|
||||
## TypeScript Best Practices
|
||||
|
||||
### Type Safety
|
||||
|
||||
- Prefer type inference over explicit types when possible
|
||||
- Use `const` assertions for literal types
|
||||
- Avoid `any` - use `unknown` if type is truly unknown
|
||||
- Use discriminated unions for complex conditional logic
|
||||
- Leverage type guards and narrowing
|
||||
|
||||
### Interface & Type Usage
|
||||
|
||||
- Use existing types from `@formbricks/types` - don't recreate them
|
||||
- Prefer `interface` for object shapes that might be extended
|
||||
- Prefer `type` for unions, intersections, and mapped types
|
||||
- Define types close to where they're used unless they're shared
|
||||
- Export types from index files for shared types
|
||||
|
||||
### Type Assertions
|
||||
|
||||
- Avoid type assertions (`as`) when possible
|
||||
- Use type guards instead of assertions
|
||||
- Only assert when you have more information than TypeScript
|
||||
|
||||
## Code Organization
|
||||
|
||||
### Separation of Concerns
|
||||
|
||||
- Separate business logic from UI rendering
|
||||
- Extract API calls into separate functions or modules
|
||||
- Keep data transformation separate from component logic
|
||||
- Use custom hooks for stateful logic that doesn't render UI
|
||||
|
||||
### Function Clarity
|
||||
|
||||
- Functions should do one thing well
|
||||
- Name functions clearly and descriptively
|
||||
- Keep functions small (aim for under 20 lines)
|
||||
- Extract complex conditionals into named boolean variables or functions
|
||||
- Avoid deep nesting (max 3 levels)
|
||||
|
||||
### File Structure
|
||||
|
||||
- Group related functions together
|
||||
- Order declarations logically (types → hooks → helpers → component)
|
||||
- Keep imports organized (external → internal → relative)
|
||||
- Consider splitting large files by concern
|
||||
|
||||
## Additional Quality Checks
|
||||
|
||||
### Performance
|
||||
|
||||
- Don't optimize prematurely - measure first
|
||||
- Avoid creating new objects/arrays/functions in render unnecessarily
|
||||
- Use keys properly in lists (stable, unique identifiers)
|
||||
- Lazy load heavy components when appropriate
|
||||
|
||||
### Accessibility
|
||||
|
||||
- Use semantic HTML elements
|
||||
- Include ARIA labels where needed
|
||||
- Ensure keyboard navigation works
|
||||
- Check color contrast and focus states
|
||||
|
||||
### Error Handling
|
||||
|
||||
- Handle error states in components
|
||||
- Provide user feedback for failed operations
|
||||
- Use error boundaries for component errors
|
||||
- Log errors appropriately (avoid swallowing errors silently)
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
- Use descriptive names (avoid abbreviations unless very common)
|
||||
- Boolean variables/props should sound like yes/no questions (`isLoading`, `hasError`, `canEdit`)
|
||||
- Arrays should be plural (`users`, `choices`, `items`)
|
||||
- Event handlers: `handleX` in components, `onX` for props
|
||||
- Constants in UPPER_SNAKE_CASE only for true constants
|
||||
|
||||
### Code Readability
|
||||
|
||||
- Prefer early returns to reduce nesting
|
||||
- Use destructuring to make code clearer
|
||||
- Break complex expressions into named variables
|
||||
- Add comments only when code can't be made self-explanatory
|
||||
- Use whitespace to group related code
|
||||
|
||||
### Testing Considerations
|
||||
|
||||
- Write code that's easy to test (pure functions, clear inputs/outputs)
|
||||
- Avoid hard-to-mock dependencies when possible
|
||||
- Keep side effects at the edges of your code
|
||||
|
||||
## Review Checklist
|
||||
|
||||
Before submitting your changes, ask yourself:
|
||||
|
||||
1. **DRY**: Is there any duplicated logic I can extract?
|
||||
2. **Clarity**: Would another developer understand this code easily?
|
||||
3. **Simplicity**: Is this the simplest solution that works?
|
||||
4. **Types**: Am I using TypeScript effectively?
|
||||
5. **React**: Am I following React idioms and best practices?
|
||||
6. **Performance**: Are there obvious performance issues?
|
||||
7. **Separation**: Are concerns properly separated?
|
||||
8. **Testing**: Is this code testable?
|
||||
9. **Maintenance**: Will this be easy to change in 6 months?
|
||||
10. **Deletion**: Can I remove any code and still accomplish the goal?
|
||||
|
||||
## When to Apply This Rule
|
||||
|
||||
Apply this rule:
|
||||
|
||||
- After implementing a feature but before marking it complete
|
||||
- When you notice your code feels "messy" or complex
|
||||
- Before requesting code review
|
||||
- When you see yourself copy-pasting code
|
||||
- After receiving feedback about code quality
|
||||
|
||||
Don't let perfect be the enemy of good, but always strive for:
|
||||
**Simple, readable, maintainable code that does one thing well.**
|
||||
@@ -1,216 +0,0 @@
|
||||
---
|
||||
description: Migrate deprecated UI components to a unified component
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
# Component Migration Automation Rule
|
||||
|
||||
## Overview
|
||||
This rule automates the migration of deprecated components to new component systems in React/TypeScript codebases.
|
||||
|
||||
## Trigger
|
||||
When the user requests component migration (e.g., "migrate [DeprecatedComponent] to [NewComponent]" or "component migration").
|
||||
|
||||
## Process
|
||||
|
||||
### Step 1: Discovery and Planning
|
||||
1. **Identify migration parameters:**
|
||||
- Ask user for deprecated component name (e.g., "Modal")
|
||||
- Ask user for new component name(s) (e.g., "Dialog")
|
||||
- Ask for any components to exclude (e.g., "ModalWithTabs")
|
||||
- Ask for specific import paths if needed
|
||||
|
||||
2. **Scan codebase** for deprecated components:
|
||||
- Search for `import.*[DeprecatedComponent]` patterns
|
||||
- Exclude specified components that should not be migrated
|
||||
- List all found components with file paths
|
||||
- Present numbered list to user for confirmation
|
||||
|
||||
### Step 2: Component-by-Component Migration
|
||||
For each component, follow this exact sequence:
|
||||
|
||||
#### 2.1 Component Migration
|
||||
- **Import changes:**
|
||||
- Ask user to provide the new import structure
|
||||
- Example transformation pattern:
|
||||
```typescript
|
||||
// FROM:
|
||||
import { [DeprecatedComponent] } from "@/components/ui/[DeprecatedComponent]"
|
||||
|
||||
// TO:
|
||||
import {
|
||||
[NewComponent],
|
||||
[NewComponentPart1],
|
||||
[NewComponentPart2],
|
||||
// ... other parts
|
||||
} from "@/components/ui/[NewComponent]"
|
||||
```
|
||||
|
||||
- **Props transformation:**
|
||||
- Ask user for prop mapping rules (e.g., `open` → `open`, `setOpen` → `onOpenChange`)
|
||||
- Ask for props to remove (e.g., `noPadding`, `closeOnOutsideClick`, `size`)
|
||||
- Apply transformations based on user specifications
|
||||
|
||||
- **Structure transformation:**
|
||||
- Ask user for the new component structure pattern
|
||||
- Apply the transformation maintaining all functionality
|
||||
- Preserve all existing logic, state management, and event handlers
|
||||
|
||||
#### 2.2 Wait for User Approval
|
||||
- Present the migration changes
|
||||
- Wait for explicit user approval before proceeding
|
||||
- If rejected, ask for specific feedback and iterate
|
||||
#### 2.3 Re-read and Apply Additional Changes
|
||||
- Re-read the component file to capture any user modifications
|
||||
- Apply any additional improvements the user made
|
||||
- Ensure all changes are incorporated
|
||||
|
||||
#### 2.4 Test File Updates
|
||||
- **Find corresponding test file** (same name with `.test.tsx` or `.test.ts`)
|
||||
- **Update test mocks:**
|
||||
- Ask user for new component mock structure
|
||||
- Replace old component mocks with new ones
|
||||
- Example pattern:
|
||||
```typescript
|
||||
// Add to test setup:
|
||||
jest.mock("@/components/ui/[NewComponent]", () => ({
|
||||
[NewComponent]: ({ children, [props] }: any) => ([mock implementation]),
|
||||
[NewComponentPart1]: ({ children }: any) => <div data-testid="[new-component-part1]">{children}</div>,
|
||||
[NewComponentPart2]: ({ children }: any) => <div data-testid="[new-component-part2]">{children}</div>,
|
||||
// ... other parts
|
||||
}));
|
||||
```
|
||||
- **Update test expectations:**
|
||||
- Change test IDs from old component to new component
|
||||
- Update any component-specific assertions
|
||||
- Ensure all new component parts used in the component are mocked
|
||||
|
||||
#### 2.5 Run Tests and Optimize
|
||||
- Execute `Node package manager test -- ComponentName.test.tsx`
|
||||
- Fix any failing tests
|
||||
- Optimize code quality (imports, formatting, etc.)
|
||||
- Re-run tests until all pass
|
||||
- **Maximum 3 iterations** - if still failing, ask user for guidance
|
||||
|
||||
#### 2.6 Wait for Final Approval
|
||||
- Present test results and any optimizations made
|
||||
- Wait for user approval of the complete migration
|
||||
- If rejected, iterate based on feedback
|
||||
|
||||
#### 2.7 Git Commit
|
||||
- Run: `git add .`
|
||||
- Run: `git commit -m "migrate [ComponentName] from [DeprecatedComponent] to [NewComponent]"`
|
||||
- Confirm commit was successful
|
||||
|
||||
### Step 3: Final Report Generation
|
||||
After all components are migrated, generate a comprehensive GitHub PR report:
|
||||
|
||||
#### PR Title
|
||||
```
|
||||
feat: migrate [DeprecatedComponent] components to [NewComponent] system
|
||||
```
|
||||
|
||||
#### PR Description Template
|
||||
```markdown
|
||||
## 🔄 [DeprecatedComponent] to [NewComponent] Migration
|
||||
|
||||
### Overview
|
||||
Migrated [X] [DeprecatedComponent] components to the new [NewComponent] component system to modernize the UI architecture and improve consistency.
|
||||
|
||||
### Components Migrated
|
||||
[List each component with file path]
|
||||
|
||||
### Technical Changes
|
||||
- **Imports:** Replaced `[DeprecatedComponent]` with `[NewComponent], [NewComponentParts...]`
|
||||
- **Props:** [List prop transformations]
|
||||
- **Structure:** Implemented proper [NewComponent] component hierarchy
|
||||
- **Styling:** [Describe styling changes]
|
||||
- **Tests:** Updated all test mocks and expectations
|
||||
|
||||
### Migration Pattern
|
||||
```typescript
|
||||
// Before
|
||||
<[DeprecatedComponent] [oldProps]>
|
||||
[oldStructure]
|
||||
</[DeprecatedComponent]>
|
||||
|
||||
// After
|
||||
<[NewComponent] [newProps]>
|
||||
[newStructure]
|
||||
</[NewComponent]>
|
||||
```
|
||||
|
||||
### Testing
|
||||
- ✅ All existing tests updated and passing
|
||||
- ✅ Component functionality preserved
|
||||
- ✅ UI/UX behavior maintained
|
||||
|
||||
### How to Test This PR
|
||||
1. **Functional Testing:**
|
||||
- Navigate to each migrated component's usage
|
||||
- Verify [component] opens and closes correctly
|
||||
- Test all interactive elements within [components]
|
||||
- Confirm styling and layout are preserved
|
||||
|
||||
2. **Automated Testing:**
|
||||
```bash
|
||||
Node package manager test
|
||||
```
|
||||
|
||||
3. **Visual Testing:**
|
||||
- Check that all [components] maintain proper styling
|
||||
- Verify responsive behavior
|
||||
- Test keyboard navigation and accessibility
|
||||
|
||||
### Breaking Changes
|
||||
[List any breaking changes or state "None - this is a drop-in replacement maintaining all existing functionality."]
|
||||
|
||||
### Notes
|
||||
- [Any excluded components] were preserved as they already use [NewComponent] internally
|
||||
- All form validation and complex state management preserved
|
||||
- Enhanced code quality with better imports and formatting
|
||||
```
|
||||
|
||||
## Special Considerations
|
||||
|
||||
### Excluded Components
|
||||
- **DO NOT MIGRATE** components specified by user as exclusions
|
||||
- They may already use the new component internally or have other reasons
|
||||
- Inform user these are skipped and why
|
||||
|
||||
### Complex Components
|
||||
- Preserve all existing functionality (forms, validation, state management)
|
||||
- Maintain prop interfaces
|
||||
- Keep all event handlers and callbacks
|
||||
- Preserve accessibility features
|
||||
|
||||
### Test Coverage
|
||||
- Ensure all new component parts are mocked when used
|
||||
- Mock all new component parts that appear in the component
|
||||
- Update test IDs from old component to new component
|
||||
- Maintain all existing test scenarios
|
||||
|
||||
### Error Handling
|
||||
- If tests fail after 3 iterations, stop and ask user for guidance
|
||||
- If component is too complex, ask user for specific guidance
|
||||
- If unsure about functionality preservation, ask for clarification
|
||||
|
||||
### Migration Patterns
|
||||
- Always ask user for specific migration patterns before starting
|
||||
- Confirm import structures, prop mappings, and component hierarchies
|
||||
- Adapt to different component architectures (simple replacements, complex restructuring, etc.)
|
||||
|
||||
## Success Criteria
|
||||
- All deprecated components successfully migrated to new components
|
||||
- All tests passing
|
||||
- No functionality lost
|
||||
- Code quality maintained or improved
|
||||
- User approval on each component
|
||||
- Successful git commits for each migration
|
||||
- Comprehensive PR report generated
|
||||
|
||||
## Usage Examples
|
||||
- "migrate Modal to Dialog"
|
||||
- "migrate Button to NewButton"
|
||||
- "migrate Card to ModernCard"
|
||||
- "component migration" (will prompt for details)
|
||||
@@ -1,177 +0,0 @@
|
||||
---
|
||||
description: Create a story in Storybook for a given component
|
||||
globs:
|
||||
alwaysApply: false
|
||||
---
|
||||
|
||||
# Formbricks Storybook Stories
|
||||
|
||||
## When generating Storybook stories for Formbricks components:
|
||||
|
||||
### 1. **File Structure**
|
||||
- Create `stories.tsx` (not `.stories.tsx`) in component directory
|
||||
- Use exact import: `import { Meta, StoryObj } from "@storybook/react-vite";`
|
||||
- Import component from `"./index"`
|
||||
|
||||
### 2. **Story Structure Template**
|
||||
```tsx
|
||||
import { Meta, StoryObj } from "@storybook/react-vite";
|
||||
import { ComponentName } from "./index";
|
||||
|
||||
// For complex components with configurable options
|
||||
// consider this as an example the options need to reflect the props types
|
||||
interface StoryOptions {
|
||||
showIcon: boolean;
|
||||
numberOfElements: number;
|
||||
customLabels: string[];
|
||||
}
|
||||
|
||||
type StoryProps = React.ComponentProps<typeof ComponentName> & StoryOptions;
|
||||
|
||||
const meta: Meta<StoryProps> = {
|
||||
title: "UI/ComponentName",
|
||||
component: ComponentName,
|
||||
tags: ["autodocs"],
|
||||
parameters: {
|
||||
layout: "centered",
|
||||
controls: { sort: "alpha", exclude: [] },
|
||||
docs: {
|
||||
description: {
|
||||
component: "The **ComponentName** component provides [description].",
|
||||
},
|
||||
},
|
||||
},
|
||||
argTypes: {
|
||||
// Organize in exactly these categories: Behavior, Appearance, Content
|
||||
},
|
||||
};
|
||||
|
||||
export default meta;
|
||||
type Story = StoryObj<typeof ComponentName> & { args: StoryOptions };
|
||||
```
|
||||
|
||||
### 3. **ArgTypes Organization**
|
||||
Organize ALL argTypes into exactly three categories:
|
||||
- **Behavior**: disabled, variant, onChange, etc.
|
||||
- **Appearance**: size, color, layout, styling, etc.
|
||||
- **Content**: text, icons, numberOfElements, etc.
|
||||
|
||||
Format:
|
||||
```tsx
|
||||
argTypes: {
|
||||
propName: {
|
||||
control: "select" | "boolean" | "text" | "number",
|
||||
options: ["option1", "option2"], // for select
|
||||
description: "Clear description",
|
||||
table: {
|
||||
category: "Behavior" | "Appearance" | "Content",
|
||||
type: { summary: "string" },
|
||||
defaultValue: { summary: "default" },
|
||||
},
|
||||
order: 1,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### 4. **Required Stories**
|
||||
Every component must include:
|
||||
- `Default`: Most common use case
|
||||
- `Disabled`: If component supports disabled state
|
||||
- `WithIcon`: If component supports icons
|
||||
- Variant stories for each variant (Primary, Secondary, Error, etc.)
|
||||
- Edge case stories (ManyElements, LongText, CustomStyling)
|
||||
|
||||
### 5. **Story Format**
|
||||
```tsx
|
||||
export const Default: Story = {
|
||||
args: {
|
||||
// Props with realistic values
|
||||
},
|
||||
};
|
||||
|
||||
export const EdgeCase: Story = {
|
||||
args: { /* ... */ },
|
||||
parameters: {
|
||||
docs: {
|
||||
description: {
|
||||
story: "Use this when [specific scenario].",
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
### 6. **Dynamic Content Pattern**
|
||||
For components with dynamic content, create render function:
|
||||
```tsx
|
||||
const renderComponent = (args: StoryProps) => {
|
||||
const { numberOfElements, showIcon, customLabels } = args;
|
||||
|
||||
// Generate dynamic content
|
||||
const elements = Array.from({ length: numberOfElements }, (_, i) => ({
|
||||
id: `element-${i}`,
|
||||
label: customLabels[i] || `Element ${i + 1}`,
|
||||
icon: showIcon ? <IconComponent /> : undefined,
|
||||
}));
|
||||
|
||||
return <ComponentName {...args} elements={elements} />;
|
||||
};
|
||||
|
||||
export const Dynamic: Story = {
|
||||
render: renderComponent,
|
||||
args: {
|
||||
numberOfElements: 3,
|
||||
showIcon: true,
|
||||
customLabels: ["First", "Second", "Third"],
|
||||
},
|
||||
};
|
||||
```
|
||||
|
||||
### 7. **State Management**
|
||||
For interactive components:
|
||||
```tsx
|
||||
import { useState } from "react";
|
||||
|
||||
const ComponentWithState = (args: any) => {
|
||||
const [value, setValue] = useState(args.defaultValue);
|
||||
|
||||
return (
|
||||
<ComponentName
|
||||
{...args}
|
||||
value={value}
|
||||
onChange={(newValue) => {
|
||||
setValue(newValue);
|
||||
args.onChange?.(newValue);
|
||||
}}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export const Interactive: Story = {
|
||||
render: ComponentWithState,
|
||||
args: { defaultValue: "initial" },
|
||||
};
|
||||
```
|
||||
|
||||
### 8. **Quality Requirements**
|
||||
- Include component description in parameters.docs
|
||||
- Add story documentation for non-obvious use cases
|
||||
- Test edge cases (overflow, empty states, many elements)
|
||||
- Ensure no TypeScript errors
|
||||
- Use realistic prop values
|
||||
- Include at least 3-5 story variants
|
||||
- Example values need to be in the context of survey application
|
||||
|
||||
### 9. **Naming Conventions**
|
||||
- **Story titles**: "UI/ComponentName"
|
||||
- **Story exports**: PascalCase (Default, WithIcon, ManyElements)
|
||||
- **Categories**: "Behavior", "Appearance", "Content" (exact spelling)
|
||||
- **Props**: camelCase matching component props
|
||||
|
||||
### 10. **Special Cases**
|
||||
- **Generic components**: Remove `component` from meta if type conflicts
|
||||
- **Form components**: Include Invalid, WithValue stories
|
||||
- **Navigation**: Include ManyItems stories
|
||||
- **Modals, Dropdowns and Popups **: Include trigger and content structure
|
||||
|
||||
## Generate stories that are comprehensive, well-documented, and reflect all component states and edge cases.
|
||||
12
.env.example
12
.env.example
@@ -168,6 +168,9 @@ SLACK_CLIENT_SECRET=
|
||||
# Enterprise License Key
|
||||
ENTERPRISE_LICENSE_KEY=
|
||||
|
||||
# Internal Environment (production, staging) - used for internal staging environment
|
||||
# ENVIRONMENT=production
|
||||
|
||||
# Automatically assign new users to a specific organization and role within that organization
|
||||
# Insert an existing organization id or generate a valid CUID for a new one at https://www.getuniqueid.com/cuid (e.g. cjld2cjxh0000qzrmn831i7rn)
|
||||
# (Role Management is an Enterprise feature)
|
||||
@@ -181,8 +184,13 @@ ENTERPRISE_LICENSE_KEY=
|
||||
# Ignore Rate Limiting across the Formbricks app
|
||||
# RATE_LIMITING_DISABLED=1
|
||||
|
||||
# OpenTelemetry URL for tracing
|
||||
# OPENTELEMETRY_LISTENER_URL=http://localhost:4318/v1/traces
|
||||
# OpenTelemetry OTLP endpoint (base URL, exporters append /v1/traces and /v1/metrics)
|
||||
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318
|
||||
# OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf
|
||||
# OTEL_SERVICE_NAME=formbricks
|
||||
# OTEL_RESOURCE_ATTRIBUTES=deployment.environment=development
|
||||
# OTEL_TRACES_SAMPLER=parentbased_traceidratio
|
||||
# OTEL_TRACES_SAMPLER_ARG=1
|
||||
|
||||
# Unsplash API Key
|
||||
UNSPLASH_ACCESS_KEY=
|
||||
|
||||
36
.github/workflows/pr-size-check.yml
vendored
36
.github/workflows/pr-size-check.yml
vendored
@@ -111,27 +111,21 @@ jobs:
|
||||
const additions = ${{ steps.check-size.outputs.total_additions }};
|
||||
const deletions = ${{ steps.check-size.outputs.total_deletions }};
|
||||
|
||||
const body = `## 🚨 PR Size Warning
|
||||
|
||||
This PR has approximately **${totalChanges} lines** of changes (${additions} additions, ${deletions} deletions across ${countedFiles} files).
|
||||
|
||||
Large PRs (>800 lines) are significantly harder to review and increase the chance of merge conflicts. Consider splitting this into smaller, self-contained PRs.
|
||||
|
||||
### 💡 Suggestions:
|
||||
- **Split by feature or module** - Break down into logical, independent pieces
|
||||
- **Create a sequence of PRs** - Each building on the previous one
|
||||
- **Branch off PR branches** - Don't wait for reviews to continue dependent work
|
||||
|
||||
### 📊 What was counted:
|
||||
- ✅ Source files, stylesheets, configuration files
|
||||
- ❌ Excluded ${excludedFiles} files (tests, locales, locks, generated files)
|
||||
|
||||
### 📚 Guidelines:
|
||||
- **Ideal:** 300-500 lines per PR
|
||||
- **Warning:** 500-800 lines
|
||||
- **Critical:** 800+ lines ⚠️
|
||||
|
||||
If this large PR is unavoidable (e.g., migration, dependency update, major refactor), please explain in the PR description why it couldn't be split.`;
|
||||
const body = '## 🚨 PR Size Warning\n\n' +
|
||||
'This PR has approximately **' + totalChanges + ' lines** of changes (' + additions + ' additions, ' + deletions + ' deletions across ' + countedFiles + ' files).\n\n' +
|
||||
'Large PRs (>800 lines) are significantly harder to review and increase the chance of merge conflicts. Consider splitting this into smaller, self-contained PRs.\n\n' +
|
||||
'### 💡 Suggestions:\n' +
|
||||
'- **Split by feature or module** - Break down into logical, independent pieces\n' +
|
||||
'- **Create a sequence of PRs** - Each building on the previous one\n' +
|
||||
'- **Branch off PR branches** - Don\'t wait for reviews to continue dependent work\n\n' +
|
||||
'### 📊 What was counted:\n' +
|
||||
'- ✅ Source files, stylesheets, configuration files\n' +
|
||||
'- ❌ Excluded ' + excludedFiles + ' files (tests, locales, locks, generated files)\n\n' +
|
||||
'### 📚 Guidelines:\n' +
|
||||
'- **Ideal:** 300-500 lines per PR\n' +
|
||||
'- **Warning:** 500-800 lines\n' +
|
||||
'- **Critical:** 800+ lines ⚠️\n\n' +
|
||||
'If this large PR is unavoidable (e.g., migration, dependency update, major refactor), please explain in the PR description why it couldn\'t be split.';
|
||||
|
||||
// Check if we already commented
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
|
||||
6
.github/workflows/release-helm-chart.yml
vendored
6
.github/workflows/release-helm-chart.yml
vendored
@@ -65,8 +65,8 @@ jobs:
|
||||
set -euo pipefail
|
||||
|
||||
echo "Updating Chart.yaml with version: ${VERSION}"
|
||||
yq -i ".version = \"${VERSION}\"" helm-chart/Chart.yaml
|
||||
yq -i ".appVersion = \"${VERSION}\"" helm-chart/Chart.yaml
|
||||
yq -i ".version = \"${VERSION}\"" charts/formbricks/Chart.yaml
|
||||
yq -i ".appVersion = \"${VERSION}\"" charts/formbricks/Chart.yaml
|
||||
|
||||
echo "✅ Successfully updated Chart.yaml"
|
||||
|
||||
@@ -77,7 +77,7 @@ jobs:
|
||||
set -euo pipefail
|
||||
|
||||
echo "Packaging Helm chart version: ${VERSION}"
|
||||
helm package ./helm-chart
|
||||
helm package ./charts/formbricks
|
||||
|
||||
echo "✅ Successfully packaged formbricks-${VERSION}.tgz"
|
||||
|
||||
|
||||
4
.github/workflows/sonarqube.yml
vendored
4
.github/workflows/sonarqube.yml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
merge_group:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
jobs:
|
||||
sonarqube:
|
||||
name: SonarQube
|
||||
@@ -50,6 +51,9 @@ jobs:
|
||||
pnpm test:coverage
|
||||
- name: SonarQube Scan
|
||||
uses: SonarSource/sonarqube-scan-action@2500896589ef8f7247069a56136f8dc177c27ccf
|
||||
with:
|
||||
args: >
|
||||
-Dsonar.verbose=true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Needed to get PR information, if any
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
|
||||
19
.github/workflows/translation-check.yml
vendored
19
.github/workflows/translation-check.yml
vendored
@@ -32,21 +32,20 @@ jobs:
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@1d0ff469b7ec7b3cb9d8673fde0c81c44821de2a # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
node-version: 18
|
||||
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # v3.0.0
|
||||
- name: Setup Node.js 22.x
|
||||
uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af
|
||||
with:
|
||||
version: 9.15.9
|
||||
node-version: 22.x
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@a7487c7e89a18df4991f7f222e4898a00d66ddda # v4.1.0
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
run: pnpm install --config.platform=linux --config.architecture=x64
|
||||
|
||||
- name: Validate translation keys
|
||||
run: |
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -13,6 +13,7 @@
|
||||
**/.next/
|
||||
**/out/
|
||||
**/build
|
||||
**/next-env.d.ts
|
||||
|
||||
# node
|
||||
**/dist/
|
||||
@@ -62,3 +63,6 @@ branch.json
|
||||
packages/ios/FormbricksSDK/FormbricksSDK.xcodeproj/project.xcworkspace/xcuserdata
|
||||
.cursorrules
|
||||
i18n.cache
|
||||
stats.html
|
||||
# next-agents-md
|
||||
.next-docs/
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
# Load environment variables from .env files
|
||||
if [ -f .env ]; then
|
||||
set -a
|
||||
|
||||
@@ -11,24 +11,24 @@
|
||||
"clean": "rimraf .turbo node_modules dist storybook-static"
|
||||
},
|
||||
"dependencies": {
|
||||
"@formbricks/survey-ui": "workspace:*",
|
||||
"eslint-plugin-react-refresh": "0.4.24"
|
||||
"@formbricks/survey-ui": "workspace:*"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chromatic-com/storybook": "^4.1.3",
|
||||
"@storybook/addon-a11y": "10.0.8",
|
||||
"@storybook/addon-links": "10.0.8",
|
||||
"@storybook/addon-onboarding": "10.0.8",
|
||||
"@storybook/react-vite": "10.0.8",
|
||||
"@typescript-eslint/eslint-plugin": "8.48.0",
|
||||
"@tailwindcss/vite": "4.1.17",
|
||||
"@typescript-eslint/parser": "8.48.0",
|
||||
"@vitejs/plugin-react": "5.1.1",
|
||||
"esbuild": "0.27.0",
|
||||
"eslint-plugin-storybook": "10.0.8",
|
||||
"@chromatic-com/storybook": "^5.0.0",
|
||||
"@storybook/addon-a11y": "10.1.11",
|
||||
"@storybook/addon-links": "10.1.11",
|
||||
"@storybook/addon-onboarding": "10.1.11",
|
||||
"@storybook/react-vite": "10.1.11",
|
||||
"@typescript-eslint/eslint-plugin": "8.53.0",
|
||||
"@tailwindcss/vite": "4.1.18",
|
||||
"@typescript-eslint/parser": "8.53.0",
|
||||
"@vitejs/plugin-react": "5.1.2",
|
||||
"esbuild": "0.25.12",
|
||||
"eslint-plugin-react-refresh": "0.4.26",
|
||||
"eslint-plugin-storybook": "10.1.11",
|
||||
"prop-types": "15.8.1",
|
||||
"storybook": "10.0.8",
|
||||
"vite": "7.2.4",
|
||||
"@storybook/addon-docs": "10.0.8"
|
||||
"storybook": "10.1.11",
|
||||
"vite": "7.3.1",
|
||||
"@storybook/addon-docs": "10.1.11"
|
||||
}
|
||||
}
|
||||
|
||||
7
apps/web/.eslintignore
Normal file
7
apps/web/.eslintignore
Normal file
@@ -0,0 +1,7 @@
|
||||
node_modules/
|
||||
.next/
|
||||
public/
|
||||
playwright/
|
||||
dist/
|
||||
coverage/
|
||||
vendor/
|
||||
@@ -1,20 +1,4 @@
|
||||
module.exports = {
|
||||
extends: ["@formbricks/eslint-config/legacy-next.js"],
|
||||
ignorePatterns: ["**/package.json", "**/tsconfig.json"],
|
||||
overrides: [
|
||||
{
|
||||
files: ["locales/*.json"],
|
||||
plugins: ["i18n-json"],
|
||||
rules: {
|
||||
"i18n-json/identical-keys": [
|
||||
"error",
|
||||
{
|
||||
filePath: require("path").join(__dirname, "locales", "en-US.json"),
|
||||
checkExtraKeys: false,
|
||||
checkMissingKeys: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM node:22-alpine3.22 AS base
|
||||
FROM node:24-alpine3.23 AS base
|
||||
|
||||
#
|
||||
## step 1: Prune monorepo
|
||||
@@ -20,7 +20,7 @@ FROM base AS installer
|
||||
# Enable corepack and prepare pnpm
|
||||
RUN npm install --ignore-scripts -g corepack@latest
|
||||
RUN corepack enable
|
||||
RUN corepack prepare pnpm@9.15.9 --activate
|
||||
RUN corepack prepare pnpm@10.28.2 --activate
|
||||
|
||||
# Install necessary build tools and compilers
|
||||
RUN apk update && apk add --no-cache cmake g++ gcc jq make openssl-dev python3
|
||||
@@ -69,20 +69,14 @@ RUN --mount=type=secret,id=database_url \
|
||||
--mount=type=secret,id=sentry_auth_token \
|
||||
/tmp/read-secrets.sh pnpm build --filter=@formbricks/web...
|
||||
|
||||
# Extract Prisma version
|
||||
RUN jq -r '.devDependencies.prisma' packages/database/package.json > /prisma_version.txt
|
||||
|
||||
#
|
||||
## step 3: setup production runner
|
||||
#
|
||||
FROM base AS runner
|
||||
|
||||
RUN npm install --ignore-scripts -g corepack@latest && \
|
||||
corepack enable
|
||||
|
||||
RUN apk add --no-cache curl \
|
||||
&& apk add --no-cache supercronic \
|
||||
# && addgroup --system --gid 1001 nodejs \
|
||||
# Update npm to latest, then create user
|
||||
# Note: npm's bundled tar has a known vulnerability but npm is only used during build, not at runtime
|
||||
RUN npm install --ignore-scripts -g npm@latest \
|
||||
&& addgroup -S nextjs \
|
||||
&& adduser -S -u 1001 -G nextjs nextjs
|
||||
|
||||
@@ -104,31 +98,53 @@ RUN chown -R nextjs:nextjs ./apps/web/.next/static && chmod -R 755 ./apps/web/.n
|
||||
COPY --from=installer /app/apps/web/public ./apps/web/public
|
||||
RUN chown -R nextjs:nextjs ./apps/web/public && chmod -R 755 ./apps/web/public
|
||||
|
||||
# Create packages/database directory structure with proper ownership for runtime migrations
|
||||
RUN mkdir -p ./packages/database/migrations && chown -R nextjs:nextjs ./packages/database
|
||||
|
||||
COPY --from=installer /app/packages/database/schema.prisma ./packages/database/schema.prisma
|
||||
RUN chown nextjs:nextjs ./packages/database/schema.prisma && chmod 644 ./packages/database/schema.prisma
|
||||
|
||||
COPY --from=installer /app/packages/database/dist ./packages/database/dist
|
||||
RUN chown -R nextjs:nextjs ./packages/database/dist && chmod -R 755 ./packages/database/dist
|
||||
|
||||
# Copy prisma client packages
|
||||
COPY --from=installer /app/node_modules/@prisma/client ./node_modules/@prisma/client
|
||||
RUN chown -R nextjs:nextjs ./node_modules/@prisma/client && chmod -R 755 ./node_modules/@prisma/client
|
||||
|
||||
COPY --from=installer /app/node_modules/.prisma ./node_modules/.prisma
|
||||
RUN chown -R nextjs:nextjs ./node_modules/.prisma && chmod -R 755 ./node_modules/.prisma
|
||||
|
||||
COPY --from=installer /prisma_version.txt .
|
||||
RUN chown nextjs:nextjs ./prisma_version.txt && chmod 644 ./prisma_version.txt
|
||||
|
||||
COPY --from=installer /app/node_modules/@paralleldrive/cuid2 ./node_modules/@paralleldrive/cuid2
|
||||
RUN chmod -R 755 ./node_modules/@paralleldrive/cuid2
|
||||
|
||||
COPY --from=installer /app/node_modules/uuid ./node_modules/uuid
|
||||
RUN chmod -R 755 ./node_modules/uuid
|
||||
|
||||
COPY --from=installer /app/node_modules/@noble/hashes ./node_modules/@noble/hashes
|
||||
RUN chmod -R 755 ./node_modules/@noble/hashes
|
||||
|
||||
COPY --from=installer /app/node_modules/zod ./node_modules/zod
|
||||
RUN chmod -R 755 ./node_modules/zod
|
||||
|
||||
RUN npm install -g prisma@6
|
||||
# Pino loads transport code in worker threads via dynamic require().
|
||||
# Next.js file tracing only traces static imports, missing runtime-loaded files
|
||||
# (e.g. pino/lib/transport-stream.js, transport targets).
|
||||
# Copy the full packages to ensure all runtime files are available.
|
||||
COPY --from=installer /app/node_modules/pino ./node_modules/pino
|
||||
RUN chmod -R 755 ./node_modules/pino
|
||||
|
||||
COPY --from=installer /app/node_modules/pino-opentelemetry-transport ./node_modules/pino-opentelemetry-transport
|
||||
RUN chmod -R 755 ./node_modules/pino-opentelemetry-transport
|
||||
|
||||
COPY --from=installer /app/node_modules/pino-abstract-transport ./node_modules/pino-abstract-transport
|
||||
RUN chmod -R 755 ./node_modules/pino-abstract-transport
|
||||
|
||||
COPY --from=installer /app/node_modules/otlp-logger ./node_modules/otlp-logger
|
||||
RUN chmod -R 755 ./node_modules/otlp-logger
|
||||
|
||||
# Install prisma CLI globally for database migrations and fix permissions for nextjs user
|
||||
RUN npm install --ignore-scripts -g prisma@6 \
|
||||
&& chown -R nextjs:nextjs /usr/local/lib/node_modules/prisma
|
||||
|
||||
# Create a startup script to handle the conditional logic
|
||||
COPY --from=installer /app/apps/web/scripts/docker/next-start.sh /home/nextjs/start.sh
|
||||
@@ -138,10 +154,8 @@ EXPOSE 3000
|
||||
ENV HOSTNAME="0.0.0.0"
|
||||
USER nextjs
|
||||
|
||||
# Prepare pnpm as the nextjs user to ensure it's available at runtime
|
||||
# Prepare volumes for uploads and SAML connections
|
||||
RUN corepack prepare pnpm@9.15.9 --activate && \
|
||||
mkdir -p /home/nextjs/apps/web/uploads/ && \
|
||||
RUN mkdir -p /home/nextjs/apps/web/uploads/ && \
|
||||
mkdir -p /home/nextjs/apps/web/saml-connection
|
||||
|
||||
VOLUME /home/nextjs/apps/web/uploads/
|
||||
|
||||
@@ -25,7 +25,7 @@ const mockProject: TProject = {
|
||||
},
|
||||
placement: "bottomRight",
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
languages: [],
|
||||
logo: null,
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import Image from "next/image";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
import { useMemo, useState } from "react";
|
||||
import { useForm } from "react-hook-form";
|
||||
import { toast } from "react-hot-toast";
|
||||
import { useTranslation } from "react-i18next";
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
import { createProjectAction } from "@/app/(app)/environments/[environmentId]/actions";
|
||||
import { previewSurvey } from "@/app/lib/templates";
|
||||
import { FORMBRICKS_SURVEYS_FILTERS_KEY_LS } from "@/lib/localStorage";
|
||||
import { buildStylingFromBrandColor } from "@/lib/styling/constants";
|
||||
import { getFormattedErrorMessage } from "@/lib/utils/helper";
|
||||
import { TOrganizationTeam } from "@/modules/ee/teams/project-teams/types/team";
|
||||
import { CreateTeamModal } from "@/modules/ee/teams/team-list/components/create-team-modal";
|
||||
@@ -64,10 +65,17 @@ export const ProjectSettings = ({
|
||||
const { t } = useTranslation();
|
||||
const addProject = async (data: TProjectUpdateInput) => {
|
||||
try {
|
||||
// Build the full styling from the chosen brand color so all derived
|
||||
// colours (question, button, input, option, progress, etc.) are persisted.
|
||||
// Without this, only brandColor is saved and the look-and-feel page falls
|
||||
// back to STYLE_DEFAULTS computed from the default brand (#64748b).
|
||||
const fullStyling = buildStylingFromBrandColor(data.styling?.brandColor?.light);
|
||||
|
||||
const createProjectResponse = await createProjectAction({
|
||||
organizationId,
|
||||
data: {
|
||||
...data,
|
||||
styling: fullStyling,
|
||||
config: { channel, industry },
|
||||
teamIds: data.teamIds,
|
||||
},
|
||||
@@ -112,6 +120,7 @@ export const ProjectSettings = ({
|
||||
const projectName = form.watch("name");
|
||||
const logoUrl = form.watch("logo.url");
|
||||
const brandColor = form.watch("styling.brandColor.light") ?? defaultBrandColor;
|
||||
const previewStyling = useMemo(() => buildStylingFromBrandColor(brandColor), [brandColor]);
|
||||
const { isSubmitting } = form.formState;
|
||||
|
||||
const organizationTeamsOptions = organizationTeams.map((team) => ({
|
||||
@@ -226,7 +235,7 @@ export const ProjectSettings = ({
|
||||
alt="Logo"
|
||||
width={256}
|
||||
height={56}
|
||||
className="absolute top-2 left-2 -mb-6 h-20 w-auto max-w-64 rounded-lg border object-contain p-1"
|
||||
className="absolute left-2 top-2 -mb-6 h-20 w-auto max-w-64 rounded-lg border object-contain p-1"
|
||||
/>
|
||||
)}
|
||||
<p className="text-sm text-slate-400">{t("common.preview")}</p>
|
||||
@@ -235,7 +244,7 @@ export const ProjectSettings = ({
|
||||
appUrl={publicDomain}
|
||||
isPreviewMode={true}
|
||||
survey={previewSurvey(projectName || "my Product", t)}
|
||||
styling={{ brandColor: { light: brandColor } }}
|
||||
styling={previewStyling}
|
||||
isBrandingEnabled={false}
|
||||
languageCode="default"
|
||||
onFileUpload={async (file) => file.name}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
import { z } from "zod";
|
||||
import { ZId } from "@formbricks/types/common";
|
||||
import { OperationNotAllowedError } from "@formbricks/types/errors";
|
||||
import { AuthorizationError, OperationNotAllowedError } from "@formbricks/types/errors";
|
||||
import { ZProjectUpdateInput } from "@formbricks/types/project";
|
||||
import { getMembershipByUserIdOrganizationId } from "@/lib/membership/service";
|
||||
import { getOrganization } from "@/lib/organization/service";
|
||||
@@ -138,7 +138,7 @@ export const getProjectsForSwitcherAction = authenticatedActionClient
|
||||
// Need membership for getProjectsByUserId (1 DB query)
|
||||
const membership = await getMembershipByUserIdOrganizationId(ctx.user.id, parsedInput.organizationId);
|
||||
if (!membership) {
|
||||
throw new Error("Membership not found");
|
||||
throw new AuthorizationError("Membership not found");
|
||||
}
|
||||
|
||||
return await getProjectsByUserId(ctx.user.id, membership);
|
||||
|
||||
@@ -36,7 +36,7 @@ export const EnvironmentLayout = async ({ layoutData, children }: EnvironmentLay
|
||||
// Calculate derived values (no queries)
|
||||
const { isMember, isOwner, isManager } = getAccessFlags(membership.role);
|
||||
|
||||
const { features, lastChecked, isPendingDowngrade, active } = license;
|
||||
const { features, lastChecked, isPendingDowngrade, active, status } = license;
|
||||
const isMultiOrgEnabled = features?.isMultiOrgEnabled ?? false;
|
||||
const organizationProjectsLimit = await getOrganizationProjectsLimit(organization.billing.limits);
|
||||
const isOwnerOrManager = isOwner || isManager;
|
||||
@@ -63,6 +63,7 @@ export const EnvironmentLayout = async ({ layoutData, children }: EnvironmentLay
|
||||
active={active}
|
||||
environmentId={environment.id}
|
||||
locale={user.locale}
|
||||
status={status}
|
||||
/>
|
||||
|
||||
<div className="flex h-full">
|
||||
|
||||
@@ -109,7 +109,10 @@ export const MainNavigation = ({
|
||||
href: `/environments/${environment.id}/contacts`,
|
||||
name: t("common.contacts"),
|
||||
icon: UserIcon,
|
||||
isActive: pathname?.includes("/contacts") || pathname?.includes("/segments"),
|
||||
isActive:
|
||||
pathname?.includes("/contacts") ||
|
||||
pathname?.includes("/segments") ||
|
||||
pathname?.includes("/attributes"),
|
||||
},
|
||||
{
|
||||
name: t("common.configuration"),
|
||||
|
||||
@@ -209,7 +209,7 @@ export const OrganizationBreadcrumb = ({
|
||||
)}
|
||||
{!isLoadingOrganizations && !loadError && (
|
||||
<>
|
||||
<DropdownMenuGroup>
|
||||
<DropdownMenuGroup className="max-h-[300px] overflow-y-auto">
|
||||
{organizations.map((org) => (
|
||||
<DropdownMenuCheckboxItem
|
||||
key={org.id}
|
||||
|
||||
@@ -234,7 +234,7 @@ export const ProjectBreadcrumb = ({
|
||||
)}
|
||||
{!isLoadingProjects && !loadError && (
|
||||
<>
|
||||
<DropdownMenuGroup>
|
||||
<DropdownMenuGroup className="max-h-[300px] overflow-y-auto">
|
||||
{projects.map((proj) => (
|
||||
<DropdownMenuCheckboxItem
|
||||
key={proj.id}
|
||||
|
||||
@@ -58,7 +58,7 @@ async function handleEmailUpdate({
|
||||
payload.email = inputEmail;
|
||||
await updateBrevoCustomer({ id: ctx.user.id, email: inputEmail });
|
||||
} else {
|
||||
await sendVerificationNewEmail(ctx.user.id, inputEmail);
|
||||
await sendVerificationNewEmail(ctx.user.id, inputEmail, ctx.user.locale);
|
||||
}
|
||||
return payload;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,142 @@
|
||||
"use client";
|
||||
|
||||
import { TFunction } from "i18next";
|
||||
import { RotateCcwIcon } from "lucide-react";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useState } from "react";
|
||||
import toast from "react-hot-toast";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { recheckLicenseAction } from "@/modules/ee/license-check/actions";
|
||||
import { Alert, AlertDescription } from "@/modules/ui/components/alert";
|
||||
import { Badge } from "@/modules/ui/components/badge";
|
||||
import { Button } from "@/modules/ui/components/button";
|
||||
import { SettingsCard } from "../../../components/SettingsCard";
|
||||
|
||||
type LicenseStatus = "active" | "expired" | "unreachable" | "invalid_license";
|
||||
|
||||
interface EnterpriseLicenseStatusProps {
|
||||
status: LicenseStatus;
|
||||
gracePeriodEnd?: Date;
|
||||
environmentId: string;
|
||||
}
|
||||
|
||||
const getBadgeConfig = (
|
||||
status: LicenseStatus,
|
||||
t: TFunction
|
||||
): { type: "success" | "error" | "warning" | "gray"; label: string } => {
|
||||
switch (status) {
|
||||
case "active":
|
||||
return { type: "success", label: t("environments.settings.enterprise.license_status_active") };
|
||||
case "expired":
|
||||
return { type: "error", label: t("environments.settings.enterprise.license_status_expired") };
|
||||
case "unreachable":
|
||||
return { type: "warning", label: t("environments.settings.enterprise.license_status_unreachable") };
|
||||
case "invalid_license":
|
||||
return { type: "error", label: t("environments.settings.enterprise.license_status_invalid") };
|
||||
default:
|
||||
return { type: "gray", label: t("environments.settings.enterprise.license_status") };
|
||||
}
|
||||
};
|
||||
|
||||
export const EnterpriseLicenseStatus = ({
|
||||
status,
|
||||
gracePeriodEnd,
|
||||
environmentId,
|
||||
}: EnterpriseLicenseStatusProps) => {
|
||||
const { t } = useTranslation();
|
||||
const router = useRouter();
|
||||
const [isRechecking, setIsRechecking] = useState(false);
|
||||
|
||||
const handleRecheck = async () => {
|
||||
setIsRechecking(true);
|
||||
try {
|
||||
const result = await recheckLicenseAction({ environmentId });
|
||||
if (result?.serverError) {
|
||||
toast.error(result.serverError || t("environments.settings.enterprise.recheck_license_failed"));
|
||||
return;
|
||||
}
|
||||
|
||||
if (result?.data) {
|
||||
if (result.data.status === "unreachable") {
|
||||
toast.error(t("environments.settings.enterprise.recheck_license_unreachable"));
|
||||
} else if (result.data.status === "invalid_license") {
|
||||
toast.error(t("environments.settings.enterprise.recheck_license_invalid"));
|
||||
} else {
|
||||
toast.success(t("environments.settings.enterprise.recheck_license_success"));
|
||||
}
|
||||
router.refresh();
|
||||
} else {
|
||||
toast.error(t("environments.settings.enterprise.recheck_license_failed"));
|
||||
}
|
||||
} catch (error) {
|
||||
toast.error(
|
||||
error instanceof Error ? error.message : t("environments.settings.enterprise.recheck_license_failed")
|
||||
);
|
||||
} finally {
|
||||
setIsRechecking(false);
|
||||
}
|
||||
};
|
||||
|
||||
const badgeConfig = getBadgeConfig(status, t);
|
||||
|
||||
return (
|
||||
<SettingsCard
|
||||
title={t("environments.settings.enterprise.license_status")}
|
||||
description={t("environments.settings.enterprise.license_status_description")}>
|
||||
<div className="flex flex-col gap-4">
|
||||
<div className="flex items-center justify-between gap-3">
|
||||
<div className="flex flex-col gap-1.5">
|
||||
<Badge type={badgeConfig.type} text={badgeConfig.label} size="normal" className="w-fit" />
|
||||
</div>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={handleRecheck}
|
||||
disabled={isRechecking}
|
||||
className="shrink-0">
|
||||
{isRechecking ? (
|
||||
<>
|
||||
<RotateCcwIcon className="mr-2 h-4 w-4 animate-spin" />
|
||||
{t("environments.settings.enterprise.rechecking")}
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<RotateCcwIcon className="mr-2 h-4 w-4" />
|
||||
{t("environments.settings.enterprise.recheck_license")}
|
||||
</>
|
||||
)}
|
||||
</Button>
|
||||
</div>
|
||||
{status === "unreachable" && gracePeriodEnd && (
|
||||
<Alert variant="warning" size="small">
|
||||
<AlertDescription className="overflow-visible whitespace-normal">
|
||||
{t("environments.settings.enterprise.license_unreachable_grace_period", {
|
||||
gracePeriodEnd: new Date(gracePeriodEnd).toLocaleDateString(undefined, {
|
||||
year: "numeric",
|
||||
month: "short",
|
||||
day: "numeric",
|
||||
}),
|
||||
})}
|
||||
</AlertDescription>
|
||||
</Alert>
|
||||
)}
|
||||
{status === "invalid_license" && (
|
||||
<Alert variant="error" size="small">
|
||||
<AlertDescription className="overflow-visible whitespace-normal">
|
||||
{t("environments.settings.enterprise.license_invalid_description")}
|
||||
</AlertDescription>
|
||||
</Alert>
|
||||
)}
|
||||
<p className="border-t border-slate-100 pt-4 text-sm text-slate-500">
|
||||
{t("environments.settings.enterprise.questions_please_reach_out_to")}{" "}
|
||||
<a
|
||||
className="font-medium text-slate-700 underline hover:text-slate-900"
|
||||
href="mailto:hola@formbricks.com">
|
||||
hola@formbricks.com
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
</SettingsCard>
|
||||
);
|
||||
};
|
||||
@@ -2,9 +2,10 @@ import { CheckIcon } from "lucide-react";
|
||||
import Link from "next/link";
|
||||
import { notFound } from "next/navigation";
|
||||
import { OrganizationSettingsNavbar } from "@/app/(app)/environments/[environmentId]/settings/(organization)/components/OrganizationSettingsNavbar";
|
||||
import { EnterpriseLicenseStatus } from "@/app/(app)/environments/[environmentId]/settings/(organization)/enterprise/components/EnterpriseLicenseStatus";
|
||||
import { IS_FORMBRICKS_CLOUD } from "@/lib/constants";
|
||||
import { getTranslate } from "@/lingodotdev/server";
|
||||
import { getEnterpriseLicense } from "@/modules/ee/license-check/lib/license";
|
||||
import { GRACE_PERIOD_MS, getEnterpriseLicense } from "@/modules/ee/license-check/lib/license";
|
||||
import { getEnvironmentAuth } from "@/modules/environments/lib/utils";
|
||||
import { Button } from "@/modules/ui/components/button";
|
||||
import { PageContentWrapper } from "@/modules/ui/components/page-content-wrapper";
|
||||
@@ -25,7 +26,8 @@ const Page = async (props) => {
|
||||
return notFound();
|
||||
}
|
||||
|
||||
const { active: isEnterpriseEdition } = await getEnterpriseLicense();
|
||||
const licenseState = await getEnterpriseLicense();
|
||||
const hasLicense = licenseState.status !== "no-license";
|
||||
|
||||
const paidFeatures = [
|
||||
{
|
||||
@@ -90,35 +92,22 @@ const Page = async (props) => {
|
||||
activeId="enterprise"
|
||||
/>
|
||||
</PageHeader>
|
||||
{isEnterpriseEdition ? (
|
||||
<div>
|
||||
<div className="mt-8 max-w-4xl rounded-lg border border-slate-300 bg-slate-100 shadow-sm">
|
||||
<div className="space-y-4 p-8">
|
||||
<div className="flex items-center gap-x-2">
|
||||
<div className="rounded-full border border-green-300 bg-green-100 p-0.5 dark:bg-green-800">
|
||||
<CheckIcon className="h-5 w-5 p-0.5 text-green-500 dark:text-green-400" />
|
||||
</div>
|
||||
<p className="text-slate-800">
|
||||
{t(
|
||||
"environments.settings.enterprise.your_enterprise_license_is_active_all_features_unlocked"
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
<p className="text-sm text-slate-500">
|
||||
{t("environments.settings.enterprise.questions_please_reach_out_to")}{" "}
|
||||
<a className="font-semibold underline" href="mailto:hola@formbricks.com">
|
||||
hola@formbricks.com
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{hasLicense ? (
|
||||
<EnterpriseLicenseStatus
|
||||
status={licenseState.status as "active" | "expired" | "unreachable" | "invalid_license"}
|
||||
gracePeriodEnd={
|
||||
licenseState.status === "unreachable"
|
||||
? new Date(licenseState.lastChecked.getTime() + GRACE_PERIOD_MS)
|
||||
: undefined
|
||||
}
|
||||
environmentId={params.environmentId}
|
||||
/>
|
||||
) : (
|
||||
<div>
|
||||
<div className="relative isolate mt-8 overflow-hidden rounded-lg bg-slate-900 px-3 pt-8 shadow-2xl sm:px-8 md:pt-12 lg:flex lg:gap-x-10 lg:px-12 lg:pt-0">
|
||||
<svg
|
||||
viewBox="0 0 1024 1024"
|
||||
className="absolute top-1/2 left-1/2 -z-10 h-[64rem] w-[64rem] -translate-y-1/2 [mask-image:radial-gradient(closest-side,white,transparent)] sm:left-full sm:-ml-80 lg:left-1/2 lg:ml-0 lg:-translate-x-1/2 lg:translate-y-0"
|
||||
className="absolute left-1/2 top-1/2 -z-10 h-[64rem] w-[64rem] -translate-y-1/2 [mask-image:radial-gradient(closest-side,white,transparent)] sm:left-full sm:-ml-80 lg:left-1/2 lg:ml-0 lg:-translate-x-1/2 lg:translate-y-0"
|
||||
aria-hidden="true">
|
||||
<circle
|
||||
cx={512}
|
||||
@@ -153,8 +142,8 @@ const Page = async (props) => {
|
||||
{t("environments.settings.enterprise.enterprise_features")}
|
||||
</h2>
|
||||
<ul className="my-4 space-y-4">
|
||||
{paidFeatures.map((feature, index) => (
|
||||
<li key={index} className="flex items-center">
|
||||
{paidFeatures.map((feature) => (
|
||||
<li key={feature.title} className="flex items-center">
|
||||
<div className="rounded-full border border-green-300 bg-green-100 p-0.5 dark:bg-green-800">
|
||||
<CheckIcon className="h-5 w-5 p-0.5 text-green-500 dark:text-green-400" />
|
||||
</div>
|
||||
|
||||
@@ -7,6 +7,7 @@ import { useTranslation } from "react-i18next";
|
||||
import { TOrganization } from "@formbricks/types/organizations";
|
||||
import { deleteOrganizationAction } from "@/app/(app)/environments/[environmentId]/settings/(organization)/general/actions";
|
||||
import { FORMBRICKS_ENVIRONMENT_ID_LS } from "@/lib/localStorage";
|
||||
import { getFormattedErrorMessage } from "@/lib/utils/helper";
|
||||
import { Alert, AlertDescription } from "@/modules/ui/components/alert";
|
||||
import { Button } from "@/modules/ui/components/button";
|
||||
import { DeleteDialog } from "@/modules/ui/components/delete-dialog";
|
||||
@@ -32,7 +33,12 @@ export const DeleteOrganization = ({
|
||||
setIsDeleting(true);
|
||||
|
||||
try {
|
||||
await deleteOrganizationAction({ organizationId: organization.id });
|
||||
const result = await deleteOrganizationAction({ organizationId: organization.id });
|
||||
if (result?.serverError) {
|
||||
toast.error(getFormattedErrorMessage(result));
|
||||
setIsDeleting(false);
|
||||
return;
|
||||
}
|
||||
toast.success(t("environments.settings.general.organization_deleted_successfully"));
|
||||
if (typeof localStorage !== "undefined") {
|
||||
localStorage.removeItem(FORMBRICKS_ENVIRONMENT_ID_LS);
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
"use client";
|
||||
|
||||
import { ShieldCheckIcon } from "lucide-react";
|
||||
import Link from "next/link";
|
||||
import { useTranslation } from "react-i18next";
|
||||
|
||||
export const SecurityListTip = () => {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<div className="max-w-4xl">
|
||||
<div className="flex items-center space-x-3 rounded-lg border border-blue-100 bg-blue-50 p-4 text-sm text-blue-900 shadow-sm">
|
||||
<ShieldCheckIcon className="h-5 w-5 flex-shrink-0 text-blue-400" />
|
||||
<p className="text-sm">
|
||||
{t("environments.settings.general.security_list_tip")}{" "}
|
||||
<Link
|
||||
href="https://formbricks.com/security#stay-informed-with-formbricks-security-updates"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="underline hover:text-blue-700">
|
||||
{t("environments.settings.general.security_list_tip_link")}
|
||||
</Link>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -12,6 +12,7 @@ import { PageHeader } from "@/modules/ui/components/page-header";
|
||||
import { SettingsCard } from "../../components/SettingsCard";
|
||||
import { DeleteOrganization } from "./components/DeleteOrganization";
|
||||
import { EditOrganizationNameForm } from "./components/EditOrganizationNameForm";
|
||||
import { SecurityListTip } from "./components/SecurityListTip";
|
||||
|
||||
const Page = async (props: { params: Promise<{ environmentId: string }> }) => {
|
||||
const params = await props.params;
|
||||
@@ -48,6 +49,7 @@ const Page = async (props: { params: Promise<{ environmentId: string }> }) => {
|
||||
</Alert>
|
||||
</div>
|
||||
)}
|
||||
{!IS_FORMBRICKS_CLOUD && <SecurityListTip />}
|
||||
<SettingsCard
|
||||
title={t("environments.settings.general.organization_name")}
|
||||
description={t("environments.settings.general.organization_name_description")}>
|
||||
|
||||
@@ -316,6 +316,14 @@ export const generateResponseTableColumns = (
|
||||
},
|
||||
};
|
||||
|
||||
const responseIdColumn: ColumnDef<TResponseTableData> = {
|
||||
accessorKey: "responseId",
|
||||
header: () => <div className="gap-x-1.5">{t("common.response_id")}</div>,
|
||||
cell: ({ row }) => {
|
||||
return <IdBadge id={row.original.responseId} />;
|
||||
},
|
||||
};
|
||||
|
||||
const quotasColumn: ColumnDef<TResponseTableData> = {
|
||||
accessorKey: "quota",
|
||||
header: t("common.quota"),
|
||||
@@ -414,6 +422,7 @@ export const generateResponseTableColumns = (
|
||||
const baseColumns = [
|
||||
personColumn,
|
||||
singleUseIdColumn,
|
||||
responseIdColumn,
|
||||
dateColumn,
|
||||
...(showQuotasColumn ? [quotasColumn] : []),
|
||||
statusColumn,
|
||||
|
||||
@@ -58,6 +58,7 @@ export const sendEmbedSurveyPreviewEmailAction = authenticatedActionClient
|
||||
ctx.user.email,
|
||||
emailHtml,
|
||||
survey.environmentId,
|
||||
ctx.user.locale,
|
||||
organizationLogoUrl || ""
|
||||
);
|
||||
});
|
||||
|
||||
@@ -8,7 +8,7 @@ import { TSurvey, TSurveyElementSummaryFileUpload } from "@formbricks/types/surv
|
||||
import { TUserLocale } from "@formbricks/types/user";
|
||||
import { timeSince } from "@/lib/time";
|
||||
import { getContactIdentifier } from "@/lib/utils/contact";
|
||||
import { getOriginalFileNameFromUrl } from "@/modules/storage/utils";
|
||||
import { getOriginalFileNameFromUrl } from "@/modules/storage/url-helpers";
|
||||
import { PersonAvatar } from "@/modules/ui/components/avatars";
|
||||
import { Button } from "@/modules/ui/components/button";
|
||||
import { EmptyState } from "@/modules/ui/components/empty-state";
|
||||
|
||||
@@ -213,6 +213,7 @@ export const SurveyAnalysisCTA = ({
|
||||
isFormbricksCloud={isFormbricksCloud}
|
||||
isReadOnly={isReadOnly}
|
||||
isStorageConfigured={isStorageConfigured}
|
||||
projectCustomScripts={project.customHeadScripts}
|
||||
/>
|
||||
)}
|
||||
<SuccessMessage environment={environment} survey={survey} />
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import { VisuallyHidden } from "@radix-ui/react-visually-hidden";
|
||||
import {
|
||||
Code2Icon,
|
||||
CodeIcon,
|
||||
Link2Icon,
|
||||
MailIcon,
|
||||
QrCodeIcon,
|
||||
@@ -18,6 +19,7 @@ import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { TUser } from "@formbricks/types/user";
|
||||
import { AnonymousLinksTab } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/shareEmbedModal/anonymous-links-tab";
|
||||
import { AppTab } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/shareEmbedModal/app-tab";
|
||||
import { CustomHtmlTab } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/shareEmbedModal/custom-html-tab";
|
||||
import { DynamicPopupTab } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/shareEmbedModal/dynamic-popup-tab";
|
||||
import { EmailTab } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/shareEmbedModal/email-tab";
|
||||
import { LinkSettingsTab } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/(analysis)/summary/components/shareEmbedModal/link-settings-tab";
|
||||
@@ -51,6 +53,7 @@ interface ShareSurveyModalProps {
|
||||
isFormbricksCloud: boolean;
|
||||
isReadOnly: boolean;
|
||||
isStorageConfigured: boolean;
|
||||
projectCustomScripts?: string | null;
|
||||
}
|
||||
|
||||
export const ShareSurveyModal = ({
|
||||
@@ -65,6 +68,7 @@ export const ShareSurveyModal = ({
|
||||
isFormbricksCloud,
|
||||
isReadOnly,
|
||||
isStorageConfigured,
|
||||
projectCustomScripts,
|
||||
}: ShareSurveyModalProps) => {
|
||||
const environmentId = survey.environmentId;
|
||||
const [surveyUrl, setSurveyUrl] = useState<string>(getSurveyUrl(survey, publicDomain, "default"));
|
||||
@@ -191,9 +195,24 @@ export const ShareSurveyModal = ({
|
||||
componentType: PrettyUrlTab,
|
||||
componentProps: { publicDomain, isReadOnly },
|
||||
},
|
||||
{
|
||||
id: ShareSettingsType.CUSTOM_HTML,
|
||||
type: LinkTabsType.SHARE_SETTING,
|
||||
label: t("environments.surveys.share.custom_html.nav_title"),
|
||||
icon: CodeIcon,
|
||||
title: t("environments.surveys.share.custom_html.nav_title"),
|
||||
description: t("environments.surveys.share.custom_html.description"),
|
||||
componentType: CustomHtmlTab,
|
||||
componentProps: { projectCustomScripts, isReadOnly },
|
||||
},
|
||||
];
|
||||
|
||||
return isFormbricksCloud ? tabs.filter((tab) => tab.id !== ShareSettingsType.PRETTY_URL) : tabs;
|
||||
// Filter out tabs that should not be shown on Formbricks Cloud
|
||||
return isFormbricksCloud
|
||||
? tabs.filter(
|
||||
(tab) => tab.id !== ShareSettingsType.PRETTY_URL && tab.id !== ShareSettingsType.CUSTOM_HTML
|
||||
)
|
||||
: tabs;
|
||||
}, [
|
||||
t,
|
||||
survey,
|
||||
@@ -207,6 +226,7 @@ export const ShareSurveyModal = ({
|
||||
isFormbricksCloud,
|
||||
email,
|
||||
isStorageConfigured,
|
||||
projectCustomScripts,
|
||||
]);
|
||||
|
||||
const getDefaultActiveId = useCallback(() => {
|
||||
|
||||
@@ -0,0 +1,163 @@
|
||||
"use client";
|
||||
|
||||
import { AlertTriangleIcon } from "lucide-react";
|
||||
import { useState } from "react";
|
||||
import { useForm } from "react-hook-form";
|
||||
import toast from "react-hot-toast";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { useSurvey } from "@/app/(app)/environments/[environmentId]/surveys/[surveyId]/context/survey-context";
|
||||
import { cn } from "@/lib/cn";
|
||||
import { updateSurveyAction } from "@/modules/survey/editor/actions";
|
||||
import { Alert, AlertDescription } from "@/modules/ui/components/alert";
|
||||
import { Button } from "@/modules/ui/components/button";
|
||||
import {
|
||||
FormControl,
|
||||
FormDescription,
|
||||
FormField,
|
||||
FormItem,
|
||||
FormLabel,
|
||||
FormProvider,
|
||||
} from "@/modules/ui/components/form";
|
||||
import { TabToggle } from "@/modules/ui/components/tab-toggle";
|
||||
|
||||
interface CustomHtmlTabProps {
|
||||
projectCustomScripts: string | null | undefined;
|
||||
isReadOnly: boolean;
|
||||
}
|
||||
|
||||
interface CustomHtmlFormData {
|
||||
customHeadScripts: string;
|
||||
customHeadScriptsMode: TSurvey["customHeadScriptsMode"];
|
||||
}
|
||||
|
||||
export const CustomHtmlTab = ({ projectCustomScripts, isReadOnly }: CustomHtmlTabProps) => {
|
||||
const { t } = useTranslation();
|
||||
const { survey } = useSurvey();
|
||||
const [isSaving, setIsSaving] = useState(false);
|
||||
|
||||
const form = useForm<CustomHtmlFormData>({
|
||||
defaultValues: {
|
||||
customHeadScripts: survey.customHeadScripts ?? "",
|
||||
customHeadScriptsMode: survey.customHeadScriptsMode ?? "add",
|
||||
},
|
||||
});
|
||||
|
||||
const {
|
||||
handleSubmit,
|
||||
watch,
|
||||
setValue,
|
||||
reset,
|
||||
formState: { isDirty },
|
||||
} = form;
|
||||
|
||||
const scriptsMode = watch("customHeadScriptsMode");
|
||||
|
||||
const onSubmit = async (data: CustomHtmlFormData) => {
|
||||
if (isSaving || isReadOnly) return;
|
||||
|
||||
setIsSaving(true);
|
||||
|
||||
const updatedSurvey: TSurvey = {
|
||||
...survey,
|
||||
customHeadScripts: data.customHeadScripts || null,
|
||||
customHeadScriptsMode: data.customHeadScriptsMode,
|
||||
};
|
||||
|
||||
const result = await updateSurveyAction(updatedSurvey);
|
||||
|
||||
if (result?.data) {
|
||||
toast.success(t("environments.surveys.share.custom_html.saved_successfully"));
|
||||
reset(data);
|
||||
} else {
|
||||
toast.error(t("common.something_went_wrong_please_try_again"));
|
||||
}
|
||||
|
||||
setIsSaving(false);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="px-1">
|
||||
<FormProvider {...form}>
|
||||
<form onSubmit={handleSubmit(onSubmit)} className="space-y-6">
|
||||
{/* Mode Toggle */}
|
||||
<div className="space-y-2">
|
||||
<FormLabel>{t("environments.surveys.share.custom_html.script_mode")}</FormLabel>
|
||||
<TabToggle
|
||||
id="custom-scripts-mode"
|
||||
options={[
|
||||
{ value: "add", label: t("environments.surveys.share.custom_html.add_to_workspace") },
|
||||
{ value: "replace", label: t("environments.surveys.share.custom_html.replace_workspace") },
|
||||
]}
|
||||
defaultSelected={scriptsMode ?? "add"}
|
||||
onChange={(value) => setValue("customHeadScriptsMode", value, { shouldDirty: true })}
|
||||
disabled={isReadOnly}
|
||||
/>
|
||||
<p className="text-sm text-slate-500">
|
||||
{scriptsMode === "add"
|
||||
? t("environments.surveys.share.custom_html.add_mode_description")
|
||||
: t("environments.surveys.share.custom_html.replace_mode_description")}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Workspace Scripts Preview */}
|
||||
{projectCustomScripts && (
|
||||
<div className={scriptsMode === "replace" ? "opacity-50" : ""}>
|
||||
<FormLabel>{t("environments.surveys.share.custom_html.workspace_scripts_label")}</FormLabel>
|
||||
<div className="mt-2 max-h-32 overflow-auto rounded-md border border-slate-200 bg-slate-50 p-3">
|
||||
<pre className="font-mono text-xs whitespace-pre-wrap text-slate-600">
|
||||
{projectCustomScripts}
|
||||
</pre>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{!projectCustomScripts && (
|
||||
<div className="rounded-md border border-slate-200 bg-slate-50 p-3">
|
||||
<p className="text-sm text-slate-500">
|
||||
{t("environments.surveys.share.custom_html.no_workspace_scripts")}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Survey Scripts */}
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="customHeadScripts"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>{t("environments.surveys.share.custom_html.survey_scripts_label")}</FormLabel>
|
||||
<FormDescription>
|
||||
{t("environments.surveys.share.custom_html.survey_scripts_description")}
|
||||
</FormDescription>
|
||||
<FormControl>
|
||||
<textarea
|
||||
rows={8}
|
||||
placeholder={t("environments.surveys.share.custom_html.placeholder")}
|
||||
className={cn(
|
||||
"focus:border-brand-dark flex w-full rounded-md border border-slate-300 bg-white px-3 py-2 font-mono text-xs text-slate-800 placeholder:text-slate-400 focus:ring-2 focus:ring-slate-400 focus:ring-offset-2 focus:outline-none disabled:cursor-not-allowed disabled:opacity-50"
|
||||
)}
|
||||
{...field}
|
||||
disabled={isReadOnly}
|
||||
/>
|
||||
</FormControl>
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
|
||||
{/* Save Button */}
|
||||
<Button type="submit" disabled={isSaving || isReadOnly || !isDirty}>
|
||||
{isSaving ? t("common.saving") : t("common.save")}
|
||||
</Button>
|
||||
{/* Security Warning */}
|
||||
<Alert variant="warning" className="flex items-start gap-2">
|
||||
<AlertTriangleIcon className="mt-0.5 h-4 w-4 shrink-0" />
|
||||
<AlertDescription>
|
||||
{t("environments.surveys.share.custom_html.security_warning")}
|
||||
</AlertDescription>
|
||||
</Alert>
|
||||
</form>
|
||||
</FormProvider>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -13,6 +13,7 @@ export enum ShareViaType {
|
||||
export enum ShareSettingsType {
|
||||
LINK_SETTINGS = "link-settings",
|
||||
PRETTY_URL = "pretty-url",
|
||||
CUSTOM_HTML = "custom-html",
|
||||
}
|
||||
|
||||
export enum LinkTabsType {
|
||||
|
||||
@@ -21,6 +21,7 @@ import {
|
||||
ListOrderedIcon,
|
||||
MessageSquareTextIcon,
|
||||
MousePointerClickIcon,
|
||||
NetworkIcon,
|
||||
PieChartIcon,
|
||||
Rows3Icon,
|
||||
SmartphoneIcon,
|
||||
@@ -99,6 +100,7 @@ const elementIcons = {
|
||||
action: MousePointerClickIcon,
|
||||
country: FlagIcon,
|
||||
url: LinkIcon,
|
||||
ipAddress: NetworkIcon,
|
||||
|
||||
// others
|
||||
Language: LanguagesIcon,
|
||||
@@ -190,7 +192,7 @@ export const ElementsComboBox = ({ options, selected, onChangeValue }: ElementCo
|
||||
value={inputValue}
|
||||
onValueChange={setInputValue}
|
||||
placeholder={open ? `${t("common.search")}...` : t("common.select_filter")}
|
||||
className="max-w-full grow border-none p-0 pl-2 text-sm shadow-none outline-none ring-offset-transparent focus:border-none focus:shadow-none focus:outline-none focus:ring-offset-0"
|
||||
className="max-w-full grow border-none p-0 pl-2 text-sm shadow-none ring-offset-transparent outline-none focus:border-none focus:shadow-none focus:ring-offset-0 focus:outline-none"
|
||||
/>
|
||||
)}
|
||||
<Button
|
||||
|
||||
@@ -21,6 +21,7 @@ import { createOrUpdateIntegrationAction } from "@/app/(app)/environments/[envir
|
||||
import { BaseSelectDropdown } from "@/app/(app)/environments/[environmentId]/workspace/integrations/airtable/components/BaseSelectDropdown";
|
||||
import { fetchTables } from "@/app/(app)/environments/[environmentId]/workspace/integrations/airtable/lib/airtable";
|
||||
import AirtableLogo from "@/images/airtableLogo.svg";
|
||||
import { getFormattedErrorMessage } from "@/lib/utils/helper";
|
||||
import { recallToHeadline } from "@/lib/utils/recall";
|
||||
import { getElementsFromBlocks } from "@/modules/survey/lib/client-utils";
|
||||
import { AdditionalIntegrationSettings } from "@/modules/ui/components/additional-integration-settings";
|
||||
@@ -268,7 +269,14 @@ export const AddIntegrationModal = ({
|
||||
airtableIntegrationData.config?.data.push(integrationData);
|
||||
}
|
||||
|
||||
await createOrUpdateIntegrationAction({ environmentId, integrationData: airtableIntegrationData });
|
||||
const result = await createOrUpdateIntegrationAction({
|
||||
environmentId,
|
||||
integrationData: airtableIntegrationData,
|
||||
});
|
||||
if (result?.serverError) {
|
||||
toast.error(getFormattedErrorMessage(result));
|
||||
return;
|
||||
}
|
||||
if (isEditMode) {
|
||||
toast.success(t("environments.integrations.integration_updated_successfully"));
|
||||
} else {
|
||||
@@ -304,7 +312,11 @@ export const AddIntegrationModal = ({
|
||||
const integrationData = structuredClone(airtableIntegrationData);
|
||||
integrationData.config.data.splice(index, 1);
|
||||
|
||||
await createOrUpdateIntegrationAction({ environmentId, integrationData });
|
||||
const result = await createOrUpdateIntegrationAction({ environmentId, integrationData });
|
||||
if (result?.serverError) {
|
||||
toast.error(getFormattedErrorMessage(result));
|
||||
return;
|
||||
}
|
||||
handleClose();
|
||||
router.refresh();
|
||||
|
||||
|
||||
@@ -165,7 +165,14 @@ export const AddIntegrationModal = ({
|
||||
// create action
|
||||
googleSheetIntegrationData.config.data.push(integrationData);
|
||||
}
|
||||
await createOrUpdateIntegrationAction({ environmentId, integrationData: googleSheetIntegrationData });
|
||||
const result = await createOrUpdateIntegrationAction({
|
||||
environmentId,
|
||||
integrationData: googleSheetIntegrationData,
|
||||
});
|
||||
if (result?.serverError) {
|
||||
toast.error(getFormattedErrorMessage(result));
|
||||
return;
|
||||
}
|
||||
if (selectedIntegration) {
|
||||
toast.success(t("environments.integrations.integration_updated_successfully"));
|
||||
} else {
|
||||
@@ -205,7 +212,14 @@ export const AddIntegrationModal = ({
|
||||
googleSheetIntegrationData.config.data.splice(selectedIntegration!.index, 1);
|
||||
try {
|
||||
setIsDeleting(true);
|
||||
await createOrUpdateIntegrationAction({ environmentId, integrationData: googleSheetIntegrationData });
|
||||
const result = await createOrUpdateIntegrationAction({
|
||||
environmentId,
|
||||
integrationData: googleSheetIntegrationData,
|
||||
});
|
||||
if (result?.serverError) {
|
||||
toast.error(getFormattedErrorMessage(result));
|
||||
return;
|
||||
}
|
||||
toast.success(t("environments.integrations.integration_removed_successfully"));
|
||||
setOpen(false);
|
||||
} catch (error) {
|
||||
|
||||
@@ -22,6 +22,7 @@ import {
|
||||
createEmptyMapping,
|
||||
} from "@/app/(app)/environments/[environmentId]/workspace/integrations/notion/components/MappingRow";
|
||||
import NotionLogo from "@/images/notion.png";
|
||||
import { getFormattedErrorMessage } from "@/lib/utils/helper";
|
||||
import { recallToHeadline } from "@/lib/utils/recall";
|
||||
import { getElementsFromBlocks } from "@/modules/survey/lib/client-utils";
|
||||
import { Button } from "@/modules/ui/components/button";
|
||||
@@ -217,7 +218,14 @@ export const AddIntegrationModal = ({
|
||||
notionIntegrationData.config.data.push(integrationData);
|
||||
}
|
||||
|
||||
await createOrUpdateIntegrationAction({ environmentId, integrationData: notionIntegrationData });
|
||||
const result = await createOrUpdateIntegrationAction({
|
||||
environmentId,
|
||||
integrationData: notionIntegrationData,
|
||||
});
|
||||
if (result?.serverError) {
|
||||
toast.error(getFormattedErrorMessage(result));
|
||||
return;
|
||||
}
|
||||
if (selectedIntegration) {
|
||||
toast.success(t("environments.integrations.integration_updated_successfully"));
|
||||
} else {
|
||||
@@ -236,7 +244,14 @@ export const AddIntegrationModal = ({
|
||||
notionIntegrationData.config.data.splice(selectedIntegration!.index, 1);
|
||||
try {
|
||||
setIsDeleting(true);
|
||||
await createOrUpdateIntegrationAction({ environmentId, integrationData: notionIntegrationData });
|
||||
const result = await createOrUpdateIntegrationAction({
|
||||
environmentId,
|
||||
integrationData: notionIntegrationData,
|
||||
});
|
||||
if (result?.serverError) {
|
||||
toast.error(getFormattedErrorMessage(result));
|
||||
return;
|
||||
}
|
||||
toast.success(t("environments.integrations.integration_removed_successfully"));
|
||||
setOpen(false);
|
||||
} catch (error) {
|
||||
|
||||
@@ -17,6 +17,7 @@ import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { getTextContent } from "@formbricks/types/surveys/validation";
|
||||
import { createOrUpdateIntegrationAction } from "@/app/(app)/environments/[environmentId]/workspace/integrations/actions";
|
||||
import SlackLogo from "@/images/slacklogo.png";
|
||||
import { getFormattedErrorMessage } from "@/lib/utils/helper";
|
||||
import { recallToHeadline } from "@/lib/utils/recall";
|
||||
import { getElementsFromBlocks } from "@/modules/survey/lib/client-utils";
|
||||
import { AdditionalIntegrationSettings } from "@/modules/ui/components/additional-integration-settings";
|
||||
@@ -144,7 +145,14 @@ export const AddChannelMappingModal = ({
|
||||
// create action
|
||||
slackIntegrationData.config.data.push(integrationData);
|
||||
}
|
||||
await createOrUpdateIntegrationAction({ environmentId, integrationData: slackIntegrationData });
|
||||
const result = await createOrUpdateIntegrationAction({
|
||||
environmentId,
|
||||
integrationData: slackIntegrationData,
|
||||
});
|
||||
if (result?.serverError) {
|
||||
toast.error(getFormattedErrorMessage(result));
|
||||
return;
|
||||
}
|
||||
if (selectedIntegration) {
|
||||
toast.success(t("environments.integrations.integration_updated_successfully"));
|
||||
} else {
|
||||
@@ -181,7 +189,14 @@ export const AddChannelMappingModal = ({
|
||||
slackIntegrationData.config.data.splice(selectedIntegration!.index, 1);
|
||||
try {
|
||||
setIsDeleting(true);
|
||||
await createOrUpdateIntegrationAction({ environmentId, integrationData: slackIntegrationData });
|
||||
const result = await createOrUpdateIntegrationAction({
|
||||
environmentId,
|
||||
integrationData: slackIntegrationData,
|
||||
});
|
||||
if (result?.serverError) {
|
||||
toast.error(getFormattedErrorMessage(result));
|
||||
return;
|
||||
}
|
||||
toast.success(t("environments.integrations.integration_removed_successfully"));
|
||||
setOpen(false);
|
||||
} catch (error) {
|
||||
|
||||
@@ -82,6 +82,7 @@ const mockPipelineInput = {
|
||||
},
|
||||
country: "USA",
|
||||
action: "Action Name",
|
||||
ipAddress: "203.0.113.7",
|
||||
} as TResponseMeta,
|
||||
personAttributes: {},
|
||||
singleUseId: null,
|
||||
@@ -346,7 +347,7 @@ describe("handleIntegrations", () => {
|
||||
expect(airtableWriteData).toHaveBeenCalledTimes(1);
|
||||
// Adjust expectations for metadata and recalled question
|
||||
const expectedMetadataString =
|
||||
"Source: web\nURL: http://example.com\nBrowser: Chrome\nOS: Mac OS\nDevice: Desktop\nCountry: USA\nAction: Action Name";
|
||||
"Source: web\nURL: http://example.com\nBrowser: Chrome\nOS: Mac OS\nDevice: Desktop\nCountry: USA\nAction: Action Name\nIP Address: 203.0.113.7";
|
||||
expect(airtableWriteData).toHaveBeenCalledWith(
|
||||
mockAirtableIntegration.config.key,
|
||||
mockAirtableIntegration.config.data[0],
|
||||
|
||||
@@ -31,6 +31,7 @@ const convertMetaObjectToString = (metadata: TResponseMeta): string => {
|
||||
if (metadata.userAgent?.device) result.push(`Device: ${metadata.userAgent.device}`);
|
||||
if (metadata.country) result.push(`Country: ${metadata.country}`);
|
||||
if (metadata.action) result.push(`Action: ${metadata.action}`);
|
||||
if (metadata.ipAddress) result.push(`IP Address: ${metadata.ipAddress}`);
|
||||
|
||||
// Join all the elements in the result array with a newline for formatting
|
||||
return result.join("\n");
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { PipelineTriggers, Webhook } from "@prisma/client";
|
||||
import { headers } from "next/headers";
|
||||
import { v7 as uuidv7 } from "uuid";
|
||||
import { prisma } from "@formbricks/database";
|
||||
import { logger } from "@formbricks/logger";
|
||||
import { ResourceNotFoundError } from "@formbricks/types/errors";
|
||||
@@ -8,6 +9,7 @@ import { ZPipelineInput } from "@/app/api/(internal)/pipeline/types/pipelines";
|
||||
import { responses } from "@/app/lib/api/response";
|
||||
import { transformErrorToDetails } from "@/app/lib/api/validator";
|
||||
import { CRON_SECRET } from "@/lib/constants";
|
||||
import { generateStandardWebhookSignature } from "@/lib/crypto";
|
||||
import { getIntegrations } from "@/lib/integration/service";
|
||||
import { getOrganizationByEnvironmentId } from "@/lib/organization/service";
|
||||
import { getResponseCountBySurveyId } from "@/lib/response/service";
|
||||
@@ -28,7 +30,10 @@ export const POST = async (request: Request) => {
|
||||
}
|
||||
|
||||
const jsonInput = await request.json();
|
||||
const convertedJsonInput = convertDatesInObject(jsonInput);
|
||||
const convertedJsonInput = convertDatesInObject(
|
||||
jsonInput,
|
||||
new Set(["contactAttributes", "variables", "data", "meta"])
|
||||
);
|
||||
|
||||
const inputValidation = ZPipelineInput.safeParse(convertedJsonInput);
|
||||
|
||||
@@ -90,28 +95,50 @@ export const POST = async (request: Request) => {
|
||||
]);
|
||||
};
|
||||
|
||||
const webhookPromises = webhooks.map((webhook) =>
|
||||
fetchWithTimeout(webhook.url, {
|
||||
method: "POST",
|
||||
headers: { "content-type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
webhookId: webhook.id,
|
||||
event,
|
||||
data: {
|
||||
...response,
|
||||
survey: {
|
||||
title: survey.name,
|
||||
type: survey.type,
|
||||
status: survey.status,
|
||||
createdAt: survey.createdAt,
|
||||
updatedAt: survey.updatedAt,
|
||||
},
|
||||
const webhookPromises = webhooks.map((webhook) => {
|
||||
const body = JSON.stringify({
|
||||
webhookId: webhook.id,
|
||||
event,
|
||||
data: {
|
||||
...response,
|
||||
survey: {
|
||||
title: survey.name,
|
||||
type: survey.type,
|
||||
status: survey.status,
|
||||
createdAt: survey.createdAt,
|
||||
updatedAt: survey.updatedAt,
|
||||
},
|
||||
}),
|
||||
},
|
||||
});
|
||||
|
||||
// Generate Standard Webhooks headers
|
||||
const webhookMessageId = uuidv7();
|
||||
const webhookTimestamp = Math.floor(Date.now() / 1000);
|
||||
|
||||
const requestHeaders: Record<string, string> = {
|
||||
"content-type": "application/json",
|
||||
"webhook-id": webhookMessageId,
|
||||
"webhook-timestamp": webhookTimestamp.toString(),
|
||||
};
|
||||
|
||||
// Add signature if webhook has a secret configured
|
||||
if (webhook.secret) {
|
||||
requestHeaders["webhook-signature"] = generateStandardWebhookSignature(
|
||||
webhookMessageId,
|
||||
webhookTimestamp,
|
||||
body,
|
||||
webhook.secret
|
||||
);
|
||||
}
|
||||
|
||||
return fetchWithTimeout(webhook.url, {
|
||||
method: "POST",
|
||||
headers: requestHeaders,
|
||||
body,
|
||||
}).catch((error) => {
|
||||
logger.error({ error, url: request.url }, `Webhook call to ${webhook.url} failed`);
|
||||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
if (event === "responseFinished") {
|
||||
// Fetch integrations and responseCount in parallel
|
||||
@@ -191,7 +218,14 @@ export const POST = async (request: Request) => {
|
||||
}
|
||||
|
||||
const emailPromises = usersWithNotifications.map((user) =>
|
||||
sendResponseFinishedEmail(user.email, environmentId, survey, response, responseCount).catch((error) => {
|
||||
sendResponseFinishedEmail(
|
||||
user.email,
|
||||
user.locale,
|
||||
environmentId,
|
||||
survey,
|
||||
response,
|
||||
responseCount
|
||||
).catch((error) => {
|
||||
logger.error(
|
||||
{ error, url: request.url, userEmail: user.email },
|
||||
`Failed to send email to ${user.email}`
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { google } from "googleapis";
|
||||
import { getServerSession } from "next-auth";
|
||||
import { responses } from "@/app/lib/api/response";
|
||||
import {
|
||||
GOOGLE_SHEETS_CLIENT_ID,
|
||||
@@ -6,18 +7,29 @@ import {
|
||||
GOOGLE_SHEETS_REDIRECT_URL,
|
||||
WEBAPP_URL,
|
||||
} from "@/lib/constants";
|
||||
import { hasUserEnvironmentAccess } from "@/lib/environment/auth";
|
||||
import { createOrUpdateIntegration } from "@/lib/integration/service";
|
||||
import { authOptions } from "@/modules/auth/lib/authOptions";
|
||||
|
||||
export const GET = async (req: Request) => {
|
||||
const url = req.url;
|
||||
const queryParams = new URLSearchParams(url.split("?")[1]); // Split the URL and get the query parameters
|
||||
const environmentId = queryParams.get("state"); // Get the value of the 'state' parameter
|
||||
const code = queryParams.get("code");
|
||||
const url = new URL(req.url);
|
||||
const environmentId = url.searchParams.get("state");
|
||||
const code = url.searchParams.get("code");
|
||||
|
||||
if (!environmentId) {
|
||||
return responses.badRequestResponse("Invalid environmentId");
|
||||
}
|
||||
|
||||
const session = await getServerSession(authOptions);
|
||||
if (!session) {
|
||||
return responses.notAuthenticatedResponse();
|
||||
}
|
||||
|
||||
const canUserAccessEnvironment = await hasUserEnvironmentAccess(session.user.id, environmentId);
|
||||
if (!canUserAccessEnvironment) {
|
||||
return responses.unauthorizedResponse();
|
||||
}
|
||||
|
||||
if (code && typeof code !== "string") {
|
||||
return responses.badRequestResponse("`code` must be a string");
|
||||
}
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
// Deprecated: This api route is deprecated now and will be removed in the future.
|
||||
// Deprecated: This is currently only being used for the older react native SDKs. Please upgrade to the latest SDKs.
|
||||
import { NextRequest, userAgent } from "next/server";
|
||||
import { prisma } from "@formbricks/database";
|
||||
import { logger } from "@formbricks/logger";
|
||||
import { TJsPeopleUserIdInput, ZJsPeopleUserIdInput } from "@formbricks/types/js";
|
||||
import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { getContactByUserId } from "@/app/api/v1/client/[environmentId]/app/sync/lib/contact";
|
||||
import { getSyncSurveys } from "@/app/api/v1/client/[environmentId]/app/sync/lib/survey";
|
||||
import { replaceAttributeRecall } from "@/app/api/v1/client/[environmentId]/app/sync/lib/utils";
|
||||
import { responses } from "@/app/lib/api/response";
|
||||
import { transformErrorToDetails } from "@/app/lib/api/validator";
|
||||
import { withV1ApiWrapper } from "@/app/lib/api/with-api-logging";
|
||||
import { getActionClasses } from "@/lib/actionClass/service";
|
||||
import { IS_FORMBRICKS_CLOUD } from "@/lib/constants";
|
||||
import { getEnvironment, updateEnvironment } from "@/lib/environment/service";
|
||||
import {
|
||||
getMonthlyOrganizationResponseCount,
|
||||
getOrganizationByEnvironmentId,
|
||||
} from "@/lib/organization/service";
|
||||
import { getProjectByEnvironmentId } from "@/lib/project/service";
|
||||
import { COLOR_DEFAULTS } from "@/lib/styling/constants";
|
||||
|
||||
const validateInput = (
|
||||
environmentId: string,
|
||||
userId: string
|
||||
): { isValid: true; data: TJsPeopleUserIdInput } | { isValid: false; error: Response } => {
|
||||
const inputValidation = ZJsPeopleUserIdInput.safeParse({ environmentId, userId });
|
||||
if (!inputValidation.success) {
|
||||
return {
|
||||
isValid: false,
|
||||
error: responses.badRequestResponse(
|
||||
"Fields are missing or incorrectly formatted",
|
||||
transformErrorToDetails(inputValidation.error),
|
||||
true
|
||||
),
|
||||
};
|
||||
}
|
||||
return { isValid: true, data: inputValidation.data };
|
||||
};
|
||||
|
||||
const checkResponseLimit = async (environmentId: string): Promise<boolean> => {
|
||||
if (!IS_FORMBRICKS_CLOUD) return false;
|
||||
|
||||
const organization = await getOrganizationByEnvironmentId(environmentId);
|
||||
if (!organization) {
|
||||
logger.error({ environmentId }, "Organization does not exist");
|
||||
|
||||
// fail closed if the organization does not exist
|
||||
return true;
|
||||
}
|
||||
|
||||
const currentResponseCount = await getMonthlyOrganizationResponseCount(organization.id);
|
||||
const monthlyResponseLimit = organization.billing.limits.monthly.responses;
|
||||
const isLimitReached = monthlyResponseLimit !== null && currentResponseCount >= monthlyResponseLimit;
|
||||
|
||||
return isLimitReached;
|
||||
};
|
||||
|
||||
export const OPTIONS = async (): Promise<Response> => {
|
||||
return responses.successResponse({}, true);
|
||||
};
|
||||
|
||||
export const GET = withV1ApiWrapper({
|
||||
handler: async ({
|
||||
req,
|
||||
props,
|
||||
}: {
|
||||
req: NextRequest;
|
||||
props: { params: Promise<{ environmentId: string; userId: string }> };
|
||||
}) => {
|
||||
const params = await props.params;
|
||||
try {
|
||||
const { device } = userAgent(req);
|
||||
|
||||
// validate using zod
|
||||
const validation = validateInput(params.environmentId, params.userId);
|
||||
if (!validation.isValid) {
|
||||
return { response: validation.error };
|
||||
}
|
||||
|
||||
const { environmentId, userId } = validation.data;
|
||||
|
||||
const environment = await getEnvironment(environmentId);
|
||||
if (!environment) {
|
||||
throw new Error("Environment does not exist");
|
||||
}
|
||||
|
||||
const project = await getProjectByEnvironmentId(environmentId);
|
||||
|
||||
if (!project) {
|
||||
throw new Error("Project not found");
|
||||
}
|
||||
|
||||
if (!environment.appSetupCompleted) {
|
||||
await updateEnvironment(environment.id, { appSetupCompleted: true });
|
||||
}
|
||||
|
||||
// check organization subscriptions and response limits
|
||||
const isAppSurveyResponseLimitReached = await checkResponseLimit(environmentId);
|
||||
|
||||
let contact = await getContactByUserId(environmentId, userId);
|
||||
if (!contact) {
|
||||
contact = await prisma.contact.create({
|
||||
data: {
|
||||
attributes: {
|
||||
create: {
|
||||
attributeKey: {
|
||||
connect: {
|
||||
key_environmentId: {
|
||||
key: "userId",
|
||||
environmentId,
|
||||
},
|
||||
},
|
||||
},
|
||||
value: userId,
|
||||
},
|
||||
},
|
||||
environment: { connect: { id: environmentId } },
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
attributes: { select: { attributeKey: { select: { key: true } }, value: true } },
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const contactAttributes = contact.attributes.reduce((acc, attribute) => {
|
||||
acc[attribute.attributeKey.key] = attribute.value;
|
||||
return acc;
|
||||
}, {}) as Record<string, string>;
|
||||
|
||||
const [surveys, actionClasses] = await Promise.all([
|
||||
getSyncSurveys(
|
||||
environmentId,
|
||||
contact.id,
|
||||
contactAttributes,
|
||||
device.type === "mobile" ? "phone" : "desktop"
|
||||
),
|
||||
getActionClasses(environmentId),
|
||||
]);
|
||||
|
||||
const updatedProject: any = {
|
||||
...project,
|
||||
brandColor: project.styling.brandColor?.light ?? COLOR_DEFAULTS.brandColor,
|
||||
...(project.styling.highlightBorderColor?.light && {
|
||||
highlightBorderColor: project.styling.highlightBorderColor.light,
|
||||
}),
|
||||
};
|
||||
|
||||
const language = contactAttributes["language"];
|
||||
|
||||
// Scenario 1: Multi language and updated trigger action classes supported.
|
||||
// Use the surveys as they are.
|
||||
let transformedSurveys: TSurvey[] = surveys;
|
||||
|
||||
// creating state object
|
||||
let state = {
|
||||
surveys: !isAppSurveyResponseLimitReached
|
||||
? transformedSurveys.map((survey) => replaceAttributeRecall(survey, contactAttributes))
|
||||
: [],
|
||||
actionClasses,
|
||||
language,
|
||||
project: updatedProject,
|
||||
};
|
||||
|
||||
return {
|
||||
response: responses.successResponse({ ...state }, true),
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error({ error, url: req.url }, "Error in GET /api/v1/client/[environmentId]/app/sync/[userId]");
|
||||
return {
|
||||
response: responses.internalServerErrorResponse(
|
||||
"Unable to handle the request: " + error.message,
|
||||
true
|
||||
),
|
||||
};
|
||||
}
|
||||
},
|
||||
});
|
||||
@@ -1,83 +0,0 @@
|
||||
import { afterEach, describe, expect, test, vi } from "vitest";
|
||||
import { prisma } from "@formbricks/database";
|
||||
import { TContact } from "@/modules/ee/contacts/types/contact";
|
||||
import { getContactByUserId } from "./contact";
|
||||
|
||||
// Mock prisma
|
||||
vi.mock("@formbricks/database", () => ({
|
||||
prisma: {
|
||||
contact: {
|
||||
findFirst: vi.fn(),
|
||||
},
|
||||
},
|
||||
}));
|
||||
|
||||
const environmentId = "test-environment-id";
|
||||
const userId = "test-user-id";
|
||||
const contactId = "test-contact-id";
|
||||
|
||||
const contactMock: Partial<TContact> & {
|
||||
attributes: { value: string; attributeKey: { key: string } }[];
|
||||
} = {
|
||||
id: contactId,
|
||||
attributes: [
|
||||
{ attributeKey: { key: "userId" }, value: userId },
|
||||
{ attributeKey: { key: "email" }, value: "test@example.com" },
|
||||
],
|
||||
};
|
||||
|
||||
describe("getContactByUserId", () => {
|
||||
afterEach(() => {
|
||||
vi.resetAllMocks();
|
||||
});
|
||||
|
||||
test("should return contact if found", async () => {
|
||||
vi.mocked(prisma.contact.findFirst).mockResolvedValue(contactMock as any);
|
||||
|
||||
const contact = await getContactByUserId(environmentId, userId);
|
||||
|
||||
expect(prisma.contact.findFirst).toHaveBeenCalledWith({
|
||||
where: {
|
||||
attributes: {
|
||||
some: {
|
||||
attributeKey: {
|
||||
key: "userId",
|
||||
environmentId,
|
||||
},
|
||||
value: userId,
|
||||
},
|
||||
},
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
attributes: { select: { attributeKey: { select: { key: true } }, value: true } },
|
||||
},
|
||||
});
|
||||
expect(contact).toEqual(contactMock);
|
||||
});
|
||||
|
||||
test("should return null if contact not found", async () => {
|
||||
vi.mocked(prisma.contact.findFirst).mockResolvedValue(null);
|
||||
|
||||
const contact = await getContactByUserId(environmentId, userId);
|
||||
|
||||
expect(prisma.contact.findFirst).toHaveBeenCalledWith({
|
||||
where: {
|
||||
attributes: {
|
||||
some: {
|
||||
attributeKey: {
|
||||
key: "userId",
|
||||
environmentId,
|
||||
},
|
||||
value: userId,
|
||||
},
|
||||
},
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
attributes: { select: { attributeKey: { select: { key: true } }, value: true } },
|
||||
},
|
||||
});
|
||||
expect(contact).toBeNull();
|
||||
});
|
||||
});
|
||||
@@ -1,42 +0,0 @@
|
||||
import "server-only";
|
||||
import { cache as reactCache } from "react";
|
||||
import { prisma } from "@formbricks/database";
|
||||
|
||||
export const getContactByUserId = reactCache(
|
||||
async (
|
||||
environmentId: string,
|
||||
userId: string
|
||||
): Promise<{
|
||||
attributes: {
|
||||
value: string;
|
||||
attributeKey: {
|
||||
key: string;
|
||||
};
|
||||
}[];
|
||||
id: string;
|
||||
} | null> => {
|
||||
const contact = await prisma.contact.findFirst({
|
||||
where: {
|
||||
attributes: {
|
||||
some: {
|
||||
attributeKey: {
|
||||
key: "userId",
|
||||
environmentId,
|
||||
},
|
||||
value: userId,
|
||||
},
|
||||
},
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
attributes: { select: { attributeKey: { select: { key: true } }, value: true } },
|
||||
},
|
||||
});
|
||||
|
||||
if (!contact) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return contact;
|
||||
}
|
||||
);
|
||||
@@ -1,323 +0,0 @@
|
||||
import { Prisma } from "@prisma/client";
|
||||
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
|
||||
import { prisma } from "@formbricks/database";
|
||||
import { logger } from "@formbricks/logger";
|
||||
import { DatabaseError } from "@formbricks/types/errors";
|
||||
import { TProject } from "@formbricks/types/project";
|
||||
import { TSegment } from "@formbricks/types/segment";
|
||||
import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { getProjectByEnvironmentId } from "@/lib/project/service";
|
||||
import { getSurveys } from "@/lib/survey/service";
|
||||
import { anySurveyHasFilters } from "@/lib/survey/utils";
|
||||
import { diffInDays } from "@/lib/utils/datetime";
|
||||
import { evaluateSegment } from "@/modules/ee/contacts/segments/lib/segments";
|
||||
import { getSyncSurveys } from "./survey";
|
||||
|
||||
vi.mock("@/lib/project/service", () => ({
|
||||
getProjectByEnvironmentId: vi.fn(),
|
||||
}));
|
||||
vi.mock("@/lib/survey/service", () => ({
|
||||
getSurveys: vi.fn(),
|
||||
}));
|
||||
vi.mock("@/lib/survey/utils", () => ({
|
||||
anySurveyHasFilters: vi.fn(),
|
||||
}));
|
||||
vi.mock("@/lib/utils/datetime", () => ({
|
||||
diffInDays: vi.fn(),
|
||||
}));
|
||||
vi.mock("@/lib/utils/validate", () => ({
|
||||
validateInputs: vi.fn(),
|
||||
}));
|
||||
vi.mock("@/modules/ee/contacts/segments/lib/segments", () => ({
|
||||
evaluateSegment: vi.fn(),
|
||||
}));
|
||||
vi.mock("@formbricks/database", () => ({
|
||||
prisma: {
|
||||
display: {
|
||||
findMany: vi.fn(),
|
||||
},
|
||||
response: {
|
||||
findMany: vi.fn(),
|
||||
},
|
||||
},
|
||||
}));
|
||||
vi.mock("@formbricks/logger", () => ({
|
||||
logger: {
|
||||
error: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
const environmentId = "test-env-id";
|
||||
const contactId = "test-contact-id";
|
||||
const contactAttributes = { userId: "user1", email: "test@example.com" };
|
||||
const deviceType = "desktop";
|
||||
|
||||
const mockProject = {
|
||||
id: "proj1",
|
||||
name: "Test Project",
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
organizationId: "org1",
|
||||
environments: [],
|
||||
recontactDays: 10,
|
||||
inAppSurveyBranding: true,
|
||||
linkSurveyBranding: true,
|
||||
placement: "bottomRight",
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
languages: [],
|
||||
} as unknown as TProject;
|
||||
|
||||
const baseSurvey: TSurvey = {
|
||||
id: "survey1",
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
name: "Test Survey 1",
|
||||
environmentId: environmentId,
|
||||
type: "app",
|
||||
status: "inProgress",
|
||||
questions: [],
|
||||
displayOption: "displayOnce",
|
||||
recontactDays: null,
|
||||
autoClose: null,
|
||||
delay: 0,
|
||||
displayPercentage: null,
|
||||
autoComplete: null,
|
||||
segment: null,
|
||||
surveyClosedMessage: null,
|
||||
singleUse: null,
|
||||
styling: null,
|
||||
pin: null,
|
||||
displayLimit: null,
|
||||
welcomeCard: { enabled: false } as TSurvey["welcomeCard"],
|
||||
endings: [],
|
||||
triggers: [],
|
||||
languages: [],
|
||||
variables: [],
|
||||
hiddenFields: { enabled: false },
|
||||
createdBy: null,
|
||||
isSingleResponsePerEmailEnabled: false,
|
||||
isVerifyEmailEnabled: false,
|
||||
projectOverwrites: null,
|
||||
showLanguageSwitch: false,
|
||||
isBackButtonHidden: false,
|
||||
followUps: [],
|
||||
recaptcha: { enabled: false, threshold: 0.5 },
|
||||
};
|
||||
|
||||
// Helper function to create mock display objects
|
||||
const createMockDisplay = (id: string, surveyId: string, contactId: string, createdAt?: Date) => ({
|
||||
id,
|
||||
createdAt: createdAt || new Date(),
|
||||
updatedAt: new Date(),
|
||||
surveyId,
|
||||
contactId,
|
||||
responseId: null,
|
||||
status: null,
|
||||
});
|
||||
|
||||
// Helper function to create mock response objects
|
||||
const createMockResponse = (id: string, surveyId: string, contactId: string, createdAt?: Date) => ({
|
||||
id,
|
||||
createdAt: createdAt || new Date(),
|
||||
updatedAt: new Date(),
|
||||
finished: false,
|
||||
surveyId,
|
||||
contactId,
|
||||
endingId: null,
|
||||
data: {},
|
||||
variables: {},
|
||||
ttc: {},
|
||||
meta: {},
|
||||
contactAttributes: null,
|
||||
singleUseId: null,
|
||||
language: null,
|
||||
displayId: null,
|
||||
});
|
||||
|
||||
describe("getSyncSurveys", () => {
|
||||
beforeEach(() => {
|
||||
vi.mocked(getProjectByEnvironmentId).mockResolvedValue(mockProject);
|
||||
vi.mocked(prisma.display.findMany).mockResolvedValue([]);
|
||||
vi.mocked(prisma.response.findMany).mockResolvedValue([]);
|
||||
vi.mocked(anySurveyHasFilters).mockReturnValue(false);
|
||||
vi.mocked(evaluateSegment).mockResolvedValue(true);
|
||||
vi.mocked(diffInDays).mockReturnValue(100); // Assume enough days passed
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.resetAllMocks();
|
||||
});
|
||||
|
||||
test("should throw error if product not found", async () => {
|
||||
vi.mocked(getProjectByEnvironmentId).mockResolvedValue(null);
|
||||
await expect(getSyncSurveys(environmentId, contactId, contactAttributes, deviceType)).rejects.toThrow(
|
||||
"Project not found"
|
||||
);
|
||||
});
|
||||
|
||||
test("should return empty array if no surveys found", async () => {
|
||||
vi.mocked(getSurveys).mockResolvedValue([]);
|
||||
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
test("should return empty array if no 'app' type surveys in progress", async () => {
|
||||
const surveys: TSurvey[] = [
|
||||
{ ...baseSurvey, id: "s1", type: "link", status: "inProgress" },
|
||||
{ ...baseSurvey, id: "s2", type: "app", status: "paused" },
|
||||
];
|
||||
vi.mocked(getSurveys).mockResolvedValue(surveys);
|
||||
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
test("should filter by displayOption 'displayOnce'", async () => {
|
||||
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", displayOption: "displayOnce" }];
|
||||
vi.mocked(getSurveys).mockResolvedValue(surveys);
|
||||
vi.mocked(prisma.display.findMany).mockResolvedValue([createMockDisplay("d1", "s1", contactId)]); // Already displayed
|
||||
|
||||
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result).toEqual([]);
|
||||
|
||||
vi.mocked(prisma.display.findMany).mockResolvedValue([]); // Not displayed yet
|
||||
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result2).toEqual(surveys);
|
||||
});
|
||||
|
||||
test("should filter by displayOption 'displayMultiple'", async () => {
|
||||
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", displayOption: "displayMultiple" }];
|
||||
vi.mocked(getSurveys).mockResolvedValue(surveys);
|
||||
vi.mocked(prisma.response.findMany).mockResolvedValue([createMockResponse("r1", "s1", contactId)]); // Already responded
|
||||
|
||||
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result).toEqual([]);
|
||||
|
||||
vi.mocked(prisma.response.findMany).mockResolvedValue([]); // Not responded yet
|
||||
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result2).toEqual(surveys);
|
||||
});
|
||||
|
||||
test("should filter by displayOption 'displaySome'", async () => {
|
||||
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", displayOption: "displaySome", displayLimit: 2 }];
|
||||
vi.mocked(getSurveys).mockResolvedValue(surveys);
|
||||
vi.mocked(prisma.display.findMany).mockResolvedValue([
|
||||
createMockDisplay("d1", "s1", contactId),
|
||||
createMockDisplay("d2", "s1", contactId),
|
||||
]); // Display limit reached
|
||||
|
||||
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result).toEqual([]);
|
||||
|
||||
vi.mocked(prisma.display.findMany).mockResolvedValue([createMockDisplay("d1", "s1", contactId)]); // Within limit
|
||||
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result2).toEqual(surveys);
|
||||
|
||||
// Test with response already submitted
|
||||
vi.mocked(prisma.response.findMany).mockResolvedValue([createMockResponse("r1", "s1", contactId)]);
|
||||
const result3 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result3).toEqual([]);
|
||||
});
|
||||
|
||||
test("should not filter by displayOption 'respondMultiple'", async () => {
|
||||
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", displayOption: "respondMultiple" }];
|
||||
vi.mocked(getSurveys).mockResolvedValue(surveys);
|
||||
vi.mocked(prisma.display.findMany).mockResolvedValue([createMockDisplay("d1", "s1", contactId)]);
|
||||
vi.mocked(prisma.response.findMany).mockResolvedValue([createMockResponse("r1", "s1", contactId)]);
|
||||
|
||||
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result).toEqual(surveys);
|
||||
});
|
||||
|
||||
test("should filter by product recontactDays if survey recontactDays is null", async () => {
|
||||
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", recontactDays: null }];
|
||||
vi.mocked(getSurveys).mockResolvedValue(surveys);
|
||||
const displayDate = new Date();
|
||||
vi.mocked(prisma.display.findMany).mockResolvedValue([
|
||||
createMockDisplay("d1", "s2", contactId, displayDate), // Display for another survey
|
||||
]);
|
||||
|
||||
vi.mocked(diffInDays).mockReturnValue(5); // Not enough days passed (product.recontactDays = 10)
|
||||
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result).toEqual([]);
|
||||
expect(diffInDays).toHaveBeenCalledWith(expect.any(Date), displayDate);
|
||||
|
||||
vi.mocked(diffInDays).mockReturnValue(15); // Enough days passed
|
||||
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result2).toEqual(surveys);
|
||||
});
|
||||
|
||||
test("should return surveys if no segment filters exist", async () => {
|
||||
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1" }];
|
||||
vi.mocked(getSurveys).mockResolvedValue(surveys);
|
||||
vi.mocked(anySurveyHasFilters).mockReturnValue(false);
|
||||
|
||||
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result).toEqual(surveys);
|
||||
expect(evaluateSegment).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test("should evaluate segment filters if they exist", async () => {
|
||||
const segment = { id: "seg1", filters: [{}] } as TSegment; // Mock filter structure
|
||||
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", segment }];
|
||||
vi.mocked(getSurveys).mockResolvedValue(surveys);
|
||||
vi.mocked(anySurveyHasFilters).mockReturnValue(true);
|
||||
|
||||
// Case 1: Segment evaluation matches
|
||||
vi.mocked(evaluateSegment).mockResolvedValue(true);
|
||||
const result1 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result1).toEqual(surveys);
|
||||
expect(evaluateSegment).toHaveBeenCalledWith(
|
||||
{
|
||||
attributes: contactAttributes,
|
||||
deviceType,
|
||||
environmentId,
|
||||
contactId,
|
||||
userId: contactAttributes.userId,
|
||||
},
|
||||
segment.filters
|
||||
);
|
||||
|
||||
// Case 2: Segment evaluation does not match
|
||||
vi.mocked(evaluateSegment).mockResolvedValue(false);
|
||||
const result2 = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result2).toEqual([]);
|
||||
});
|
||||
|
||||
test("should handle Prisma errors", async () => {
|
||||
const prismaError = new Prisma.PrismaClientKnownRequestError("Test Prisma Error", {
|
||||
code: "P2025",
|
||||
clientVersion: "test",
|
||||
});
|
||||
vi.mocked(getSurveys).mockRejectedValue(prismaError);
|
||||
|
||||
await expect(getSyncSurveys(environmentId, contactId, contactAttributes, deviceType)).rejects.toThrow(
|
||||
DatabaseError
|
||||
);
|
||||
expect(logger.error).toHaveBeenCalledWith(prismaError);
|
||||
});
|
||||
|
||||
test("should handle general errors", async () => {
|
||||
const generalError = new Error("Something went wrong");
|
||||
vi.mocked(getSurveys).mockRejectedValue(generalError);
|
||||
|
||||
await expect(getSyncSurveys(environmentId, contactId, contactAttributes, deviceType)).rejects.toThrow(
|
||||
generalError
|
||||
);
|
||||
});
|
||||
|
||||
test("should throw ResourceNotFoundError if resolved surveys are null after filtering", async () => {
|
||||
const segment = { id: "seg1", filters: [{}] } as TSegment; // Mock filter structure
|
||||
const surveys: TSurvey[] = [{ ...baseSurvey, id: "s1", segment }];
|
||||
vi.mocked(getSurveys).mockResolvedValue(surveys);
|
||||
vi.mocked(anySurveyHasFilters).mockReturnValue(true);
|
||||
vi.mocked(evaluateSegment).mockResolvedValue(false); // Ensure all surveys are filtered out
|
||||
|
||||
// This scenario is tricky to force directly as the code checks `if (!surveys)` before returning.
|
||||
// However, if `Promise.all` somehow resolved to null/undefined (highly unlikely), it should throw.
|
||||
// We can simulate this by mocking `Promise.all` if needed, but the current code structure makes this hard to test.
|
||||
// Let's assume the filter logic works correctly and test the intended path.
|
||||
const result = await getSyncSurveys(environmentId, contactId, contactAttributes, deviceType);
|
||||
expect(result).toEqual([]); // Expect empty array, not an error in this case.
|
||||
});
|
||||
});
|
||||
@@ -1,148 +0,0 @@
|
||||
import "server-only";
|
||||
import { Prisma } from "@prisma/client";
|
||||
import { cache as reactCache } from "react";
|
||||
import { prisma } from "@formbricks/database";
|
||||
import { logger } from "@formbricks/logger";
|
||||
import { ZId } from "@formbricks/types/common";
|
||||
import { DatabaseError, ResourceNotFoundError } from "@formbricks/types/errors";
|
||||
import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { getProjectByEnvironmentId } from "@/lib/project/service";
|
||||
import { getSurveys } from "@/lib/survey/service";
|
||||
import { anySurveyHasFilters } from "@/lib/survey/utils";
|
||||
import { diffInDays } from "@/lib/utils/datetime";
|
||||
import { validateInputs } from "@/lib/utils/validate";
|
||||
import { evaluateSegment } from "@/modules/ee/contacts/segments/lib/segments";
|
||||
|
||||
export const getSyncSurveys = reactCache(
|
||||
async (
|
||||
environmentId: string,
|
||||
contactId: string,
|
||||
contactAttributes: Record<string, string | number>,
|
||||
deviceType: "phone" | "desktop" = "desktop"
|
||||
): Promise<TSurvey[]> => {
|
||||
validateInputs([environmentId, ZId]);
|
||||
try {
|
||||
const project = await getProjectByEnvironmentId(environmentId);
|
||||
|
||||
if (!project) {
|
||||
throw new Error("Project not found");
|
||||
}
|
||||
|
||||
let surveys = await getSurveys(environmentId);
|
||||
|
||||
// filtered surveys for running and web
|
||||
surveys = surveys.filter((survey) => survey.status === "inProgress" && survey.type === "app");
|
||||
|
||||
// if no surveys are left, return an empty array
|
||||
if (surveys.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const displays = await prisma.display.findMany({
|
||||
where: {
|
||||
contactId,
|
||||
},
|
||||
});
|
||||
|
||||
const responses = await prisma.response.findMany({
|
||||
where: {
|
||||
contactId,
|
||||
},
|
||||
});
|
||||
|
||||
// filter surveys that meet the displayOption criteria
|
||||
surveys = surveys.filter((survey) => {
|
||||
switch (survey.displayOption) {
|
||||
case "respondMultiple":
|
||||
return true;
|
||||
case "displayOnce":
|
||||
return displays.filter((display) => display.surveyId === survey.id).length === 0;
|
||||
case "displayMultiple":
|
||||
if (!responses) return true;
|
||||
else {
|
||||
return responses.filter((response) => response.surveyId === survey.id).length === 0;
|
||||
}
|
||||
case "displaySome":
|
||||
if (survey.displayLimit === null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (responses && responses.filter((response) => response.surveyId === survey.id).length !== 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return displays.filter((display) => display.surveyId === survey.id).length < survey.displayLimit;
|
||||
default:
|
||||
throw Error("Invalid displayOption");
|
||||
}
|
||||
});
|
||||
|
||||
const latestDisplay = displays[0];
|
||||
|
||||
// filter surveys that meet the recontactDays criteria
|
||||
surveys = surveys.filter((survey) => {
|
||||
if (!latestDisplay) {
|
||||
return true;
|
||||
} else if (survey.recontactDays !== null) {
|
||||
const lastDisplaySurvey = displays.filter((display) => display.surveyId === survey.id)[0];
|
||||
if (!lastDisplaySurvey) {
|
||||
return true;
|
||||
}
|
||||
return diffInDays(new Date(), new Date(lastDisplaySurvey.createdAt)) >= survey.recontactDays;
|
||||
} else if (project.recontactDays !== null) {
|
||||
return diffInDays(new Date(), new Date(latestDisplay.createdAt)) >= project.recontactDays;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
});
|
||||
|
||||
// if no surveys are left, return an empty array
|
||||
if (surveys.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// if no surveys have segment filters, return the surveys
|
||||
if (!anySurveyHasFilters(surveys)) {
|
||||
return surveys;
|
||||
}
|
||||
|
||||
// the surveys now have segment filters, so we need to evaluate them
|
||||
const surveyPromises = surveys.map(async (survey) => {
|
||||
const { segment } = survey;
|
||||
// if the survey has no segment, or the segment has no filters, we return the survey
|
||||
if (!segment || !segment.filters?.length) {
|
||||
return survey;
|
||||
}
|
||||
|
||||
// Evaluate the segment filters
|
||||
const result = await evaluateSegment(
|
||||
{
|
||||
attributes: contactAttributes ?? {},
|
||||
deviceType,
|
||||
environmentId,
|
||||
contactId,
|
||||
userId: String(contactAttributes.userId),
|
||||
},
|
||||
segment.filters
|
||||
);
|
||||
|
||||
return result ? survey : null;
|
||||
});
|
||||
|
||||
const resolvedSurveys = await Promise.all(surveyPromises);
|
||||
surveys = resolvedSurveys.filter((survey) => !!survey) as TSurvey[];
|
||||
|
||||
if (!surveys) {
|
||||
throw new ResourceNotFoundError("Survey", environmentId);
|
||||
}
|
||||
return surveys;
|
||||
} catch (error) {
|
||||
if (error instanceof Prisma.PrismaClientKnownRequestError) {
|
||||
logger.error(error);
|
||||
throw new DatabaseError(error.message);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
);
|
||||
@@ -1,245 +0,0 @@
|
||||
import { describe, expect, test, vi } from "vitest";
|
||||
import { TAttributes } from "@formbricks/types/attributes";
|
||||
import { TLanguage } from "@formbricks/types/project";
|
||||
import {
|
||||
TSurvey,
|
||||
TSurveyEnding,
|
||||
TSurveyQuestion,
|
||||
TSurveyQuestionTypeEnum,
|
||||
} from "@formbricks/types/surveys/types";
|
||||
import { parseRecallInfo } from "@/lib/utils/recall";
|
||||
import { replaceAttributeRecall } from "./utils";
|
||||
|
||||
vi.mock("@/lib/utils/recall", () => ({
|
||||
parseRecallInfo: vi.fn((text, attributes) => {
|
||||
const recallPattern = /recall:([a-zA-Z0-9_-]+)/;
|
||||
const match = text.match(recallPattern);
|
||||
if (match && match[1]) {
|
||||
const recallKey = match[1];
|
||||
const attributeValue = attributes[recallKey];
|
||||
if (attributeValue !== undefined) {
|
||||
return text.replace(recallPattern, `parsed-${attributeValue}`);
|
||||
}
|
||||
}
|
||||
return text; // Return original text if no match or attribute not found
|
||||
}),
|
||||
}));
|
||||
|
||||
const baseSurvey: TSurvey = {
|
||||
id: "survey1",
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
name: "Test Survey",
|
||||
environmentId: "env1",
|
||||
type: "app",
|
||||
status: "inProgress",
|
||||
questions: [],
|
||||
endings: [],
|
||||
welcomeCard: { enabled: false } as TSurvey["welcomeCard"],
|
||||
languages: [
|
||||
{ language: { id: "lang1", code: "en" } as unknown as TLanguage, default: true, enabled: true },
|
||||
],
|
||||
triggers: [],
|
||||
recontactDays: null,
|
||||
displayLimit: null,
|
||||
singleUse: null,
|
||||
styling: null,
|
||||
surveyClosedMessage: null,
|
||||
hiddenFields: { enabled: false },
|
||||
variables: [],
|
||||
createdBy: null,
|
||||
isSingleResponsePerEmailEnabled: false,
|
||||
isVerifyEmailEnabled: false,
|
||||
projectOverwrites: null,
|
||||
showLanguageSwitch: false,
|
||||
isBackButtonHidden: false,
|
||||
followUps: [],
|
||||
recaptcha: { enabled: false, threshold: 0.5 },
|
||||
displayOption: "displayOnce",
|
||||
autoClose: null,
|
||||
delay: 0,
|
||||
displayPercentage: null,
|
||||
autoComplete: null,
|
||||
segment: null,
|
||||
pin: null,
|
||||
metadata: {},
|
||||
};
|
||||
|
||||
const attributes: TAttributes = {
|
||||
name: "John Doe",
|
||||
email: "john.doe@example.com",
|
||||
plan: "premium",
|
||||
};
|
||||
|
||||
describe("replaceAttributeRecall", () => {
|
||||
test("should replace recall info in question headlines and subheaders", () => {
|
||||
const surveyWithRecall: TSurvey = {
|
||||
...baseSurvey,
|
||||
questions: [
|
||||
{
|
||||
id: "q1",
|
||||
type: TSurveyQuestionTypeEnum.OpenText,
|
||||
headline: { default: "Hello recall:name!" },
|
||||
subheader: { default: "Your email is recall:email" },
|
||||
required: true,
|
||||
buttonLabel: { default: "Next" },
|
||||
placeholder: { default: "Type here..." },
|
||||
longAnswer: false,
|
||||
logic: [],
|
||||
} as unknown as TSurveyQuestion,
|
||||
],
|
||||
};
|
||||
|
||||
const result = replaceAttributeRecall(surveyWithRecall, attributes);
|
||||
expect(result.questions[0].headline.default).toBe("Hello parsed-John Doe!");
|
||||
expect(result.questions[0].subheader?.default).toBe("Your email is parsed-john.doe@example.com");
|
||||
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Hello recall:name!", attributes);
|
||||
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Your email is recall:email", attributes);
|
||||
});
|
||||
|
||||
test("should replace recall info in welcome card headline", () => {
|
||||
const surveyWithRecall: TSurvey = {
|
||||
...baseSurvey,
|
||||
welcomeCard: {
|
||||
enabled: true,
|
||||
headline: { default: "Welcome, recall:name!" },
|
||||
subheader: { default: "<p>Some content</p>" },
|
||||
buttonLabel: { default: "Start" },
|
||||
timeToFinish: false,
|
||||
showResponseCount: false,
|
||||
},
|
||||
};
|
||||
|
||||
const result = replaceAttributeRecall(surveyWithRecall, attributes);
|
||||
expect(result.welcomeCard.headline?.default).toBe("Welcome, parsed-John Doe!");
|
||||
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Welcome, recall:name!", attributes);
|
||||
});
|
||||
|
||||
test("should replace recall info in end screen headlines and subheaders", () => {
|
||||
const surveyWithRecall: TSurvey = {
|
||||
...baseSurvey,
|
||||
endings: [
|
||||
{
|
||||
type: "endScreen",
|
||||
headline: { default: "Thank you, recall:name!" },
|
||||
subheader: { default: "Your plan: recall:plan" },
|
||||
buttonLabel: { default: "Finish" },
|
||||
buttonLink: "https://example.com",
|
||||
} as unknown as TSurveyEnding,
|
||||
],
|
||||
};
|
||||
|
||||
const result = replaceAttributeRecall(surveyWithRecall, attributes);
|
||||
expect(result.endings[0].type).toBe("endScreen");
|
||||
if (result.endings[0].type === "endScreen") {
|
||||
expect(result.endings[0].headline?.default).toBe("Thank you, parsed-John Doe!");
|
||||
expect(result.endings[0].subheader?.default).toBe("Your plan: parsed-premium");
|
||||
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Thank you, recall:name!", attributes);
|
||||
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Your plan: recall:plan", attributes);
|
||||
}
|
||||
});
|
||||
|
||||
test("should handle multiple languages", () => {
|
||||
const surveyMultiLang: TSurvey = {
|
||||
...baseSurvey,
|
||||
languages: [
|
||||
{ language: { id: "lang1", code: "en" } as unknown as TLanguage, default: true, enabled: true },
|
||||
{ language: { id: "lang2", code: "es" } as unknown as TLanguage, default: false, enabled: true },
|
||||
],
|
||||
questions: [
|
||||
{
|
||||
id: "q1",
|
||||
type: TSurveyQuestionTypeEnum.OpenText,
|
||||
headline: { default: "Hello recall:name!", es: "Hola recall:name!" },
|
||||
required: true,
|
||||
buttonLabel: { default: "Next", es: "Siguiente" },
|
||||
placeholder: { default: "Type here...", es: "Escribe aquí..." },
|
||||
longAnswer: false,
|
||||
logic: [],
|
||||
} as unknown as TSurveyQuestion,
|
||||
],
|
||||
};
|
||||
|
||||
const result = replaceAttributeRecall(surveyMultiLang, attributes);
|
||||
expect(result.questions[0].headline.default).toBe("Hello parsed-John Doe!");
|
||||
expect(result.questions[0].headline.es).toBe("Hola parsed-John Doe!");
|
||||
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Hello recall:name!", attributes);
|
||||
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Hola recall:name!", attributes);
|
||||
});
|
||||
|
||||
test("should not replace if recall key is not in attributes", () => {
|
||||
const surveyWithRecall: TSurvey = {
|
||||
...baseSurvey,
|
||||
questions: [
|
||||
{
|
||||
id: "q1",
|
||||
type: TSurveyQuestionTypeEnum.OpenText,
|
||||
headline: { default: "Your company: recall:company" },
|
||||
required: true,
|
||||
buttonLabel: { default: "Next" },
|
||||
placeholder: { default: "Type here..." },
|
||||
longAnswer: false,
|
||||
logic: [],
|
||||
} as unknown as TSurveyQuestion,
|
||||
],
|
||||
};
|
||||
|
||||
const result = replaceAttributeRecall(surveyWithRecall, attributes);
|
||||
expect(result.questions[0].headline.default).toBe("Your company: recall:company");
|
||||
expect(vi.mocked(parseRecallInfo)).toHaveBeenCalledWith("Your company: recall:company", attributes);
|
||||
});
|
||||
|
||||
test("should handle surveys with no recall information", async () => {
|
||||
const surveyNoRecall: TSurvey = {
|
||||
...baseSurvey,
|
||||
questions: [
|
||||
{
|
||||
id: "q1",
|
||||
type: TSurveyQuestionTypeEnum.OpenText,
|
||||
headline: { default: "Just a regular question" },
|
||||
required: true,
|
||||
buttonLabel: { default: "Next" },
|
||||
placeholder: { default: "Type here..." },
|
||||
longAnswer: false,
|
||||
logic: [],
|
||||
} as unknown as TSurveyQuestion,
|
||||
],
|
||||
welcomeCard: {
|
||||
enabled: true,
|
||||
headline: { default: "Welcome!" },
|
||||
subheader: { default: "<p>Some content</p>" },
|
||||
buttonLabel: { default: "Start" },
|
||||
timeToFinish: false,
|
||||
showResponseCount: false,
|
||||
},
|
||||
endings: [
|
||||
{
|
||||
type: "endScreen",
|
||||
headline: { default: "Thank you!" },
|
||||
buttonLabel: { default: "Finish" },
|
||||
} as unknown as TSurveyEnding,
|
||||
],
|
||||
};
|
||||
const parseRecallInfoSpy = vi.spyOn(await import("@/lib/utils/recall"), "parseRecallInfo");
|
||||
|
||||
const result = replaceAttributeRecall(surveyNoRecall, attributes);
|
||||
expect(result).toEqual(surveyNoRecall); // Should be unchanged
|
||||
expect(parseRecallInfoSpy).not.toHaveBeenCalled();
|
||||
parseRecallInfoSpy.mockRestore();
|
||||
});
|
||||
|
||||
test("should handle surveys with empty questions, endings, or disabled welcome card", async () => {
|
||||
const surveyEmpty: TSurvey = {
|
||||
...baseSurvey,
|
||||
questions: [],
|
||||
endings: [],
|
||||
welcomeCard: { enabled: false } as TSurvey["welcomeCard"],
|
||||
};
|
||||
const parseRecallInfoSpy = vi.spyOn(await import("@/lib/utils/recall"), "parseRecallInfo");
|
||||
|
||||
const result = replaceAttributeRecall(surveyEmpty, attributes);
|
||||
expect(result).toEqual(surveyEmpty);
|
||||
expect(parseRecallInfoSpy).not.toHaveBeenCalled();
|
||||
parseRecallInfoSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
@@ -1,55 +0,0 @@
|
||||
import { TAttributes } from "@formbricks/types/attributes";
|
||||
import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { parseRecallInfo } from "@/lib/utils/recall";
|
||||
|
||||
export const replaceAttributeRecall = (survey: TSurvey, attributes: TAttributes): TSurvey => {
|
||||
const surveyTemp = structuredClone(survey);
|
||||
const languages = surveyTemp.languages
|
||||
.map((surveyLanguage) => {
|
||||
if (surveyLanguage.default) {
|
||||
return "default";
|
||||
}
|
||||
|
||||
if (surveyLanguage.enabled) {
|
||||
return surveyLanguage.language.code;
|
||||
}
|
||||
|
||||
return null;
|
||||
})
|
||||
.filter((language): language is string => language !== null);
|
||||
|
||||
surveyTemp.questions.forEach((question) => {
|
||||
languages.forEach((language) => {
|
||||
if (question.headline[language]?.includes("recall:")) {
|
||||
question.headline[language] = parseRecallInfo(question.headline[language], attributes);
|
||||
}
|
||||
if (question.subheader && question.subheader[language]?.includes("recall:")) {
|
||||
question.subheader[language] = parseRecallInfo(question.subheader[language], attributes);
|
||||
}
|
||||
});
|
||||
});
|
||||
if (surveyTemp.welcomeCard.enabled && surveyTemp.welcomeCard.headline) {
|
||||
languages.forEach((language) => {
|
||||
if (surveyTemp.welcomeCard.headline && surveyTemp.welcomeCard.headline[language]?.includes("recall:")) {
|
||||
surveyTemp.welcomeCard.headline[language] = parseRecallInfo(
|
||||
surveyTemp.welcomeCard.headline[language],
|
||||
attributes
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
surveyTemp.endings.forEach((ending) => {
|
||||
if (ending.type === "endScreen") {
|
||||
languages.forEach((language) => {
|
||||
if (ending.headline && ending.headline[language]?.includes("recall:")) {
|
||||
ending.headline[language] = parseRecallInfo(ending.headline[language], attributes);
|
||||
if (ending.subheader && ending.subheader[language]?.includes("recall:")) {
|
||||
ending.subheader[language] = parseRecallInfo(ending.subheader[language], attributes);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return surveyTemp;
|
||||
};
|
||||
@@ -1,6 +0,0 @@
|
||||
import {
|
||||
OPTIONS,
|
||||
PUT,
|
||||
} from "@/modules/ee/contacts/api/v1/client/[environmentId]/contacts/[userId]/attributes/route";
|
||||
|
||||
export { OPTIONS, PUT };
|
||||
@@ -0,0 +1,314 @@
|
||||
import { Prisma } from "@prisma/client";
|
||||
import { afterEach, beforeEach, describe, expect, test, vi } from "vitest";
|
||||
import { prisma } from "@formbricks/database";
|
||||
import { logger } from "@formbricks/logger";
|
||||
import { DatabaseError, ResourceNotFoundError } from "@formbricks/types/errors";
|
||||
import { getEnvironmentStateData } from "./data";
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock("@formbricks/database", () => ({
|
||||
prisma: {
|
||||
environment: {
|
||||
findUnique: vi.fn(),
|
||||
},
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("@formbricks/logger", () => ({
|
||||
logger: {
|
||||
error: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("@/modules/survey/lib/utils", () => ({
|
||||
transformPrismaSurvey: vi.fn((survey) => survey),
|
||||
}));
|
||||
|
||||
const environmentId = "cjld2cjxh0000qzrmn831i7rn";
|
||||
|
||||
const mockEnvironmentData = {
|
||||
id: environmentId,
|
||||
type: "production",
|
||||
appSetupCompleted: true,
|
||||
project: {
|
||||
id: "project-123",
|
||||
recontactDays: 30,
|
||||
clickOutsideClose: true,
|
||||
overlay: "none",
|
||||
placement: "bottomRight",
|
||||
inAppSurveyBranding: true,
|
||||
styling: { allowStyleOverwrite: false },
|
||||
organization: {
|
||||
id: "org-123",
|
||||
billing: {
|
||||
plan: "free",
|
||||
limits: { monthly: { responses: 100 } },
|
||||
},
|
||||
},
|
||||
},
|
||||
actionClasses: [
|
||||
{
|
||||
id: "action-1",
|
||||
type: "code",
|
||||
name: "Test Action",
|
||||
key: "test-action",
|
||||
noCodeConfig: null,
|
||||
},
|
||||
],
|
||||
surveys: [
|
||||
{
|
||||
id: "survey-1",
|
||||
name: "Test Survey",
|
||||
type: "app",
|
||||
status: "inProgress",
|
||||
welcomeCard: { enabled: false },
|
||||
questions: [],
|
||||
blocks: null,
|
||||
variables: [],
|
||||
showLanguageSwitch: false,
|
||||
languages: [],
|
||||
endings: [],
|
||||
autoClose: null,
|
||||
styling: null,
|
||||
recaptcha: { enabled: false },
|
||||
segment: null,
|
||||
recontactDays: null,
|
||||
displayLimit: null,
|
||||
displayOption: "displayOnce",
|
||||
hiddenFields: { enabled: false },
|
||||
isBackButtonHidden: false,
|
||||
triggers: [],
|
||||
displayPercentage: null,
|
||||
delay: 0,
|
||||
projectOverwrites: null,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
describe("getEnvironmentStateData", () => {
|
||||
beforeEach(() => {
|
||||
vi.resetAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.resetAllMocks();
|
||||
});
|
||||
|
||||
test("should return environment state data when environment exists", async () => {
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue(mockEnvironmentData as never);
|
||||
|
||||
const result = await getEnvironmentStateData(environmentId);
|
||||
|
||||
expect(result).toEqual({
|
||||
environment: {
|
||||
id: environmentId,
|
||||
type: "production",
|
||||
appSetupCompleted: true,
|
||||
project: {
|
||||
id: "project-123",
|
||||
recontactDays: 30,
|
||||
clickOutsideClose: true,
|
||||
overlay: "none",
|
||||
placement: "bottomRight",
|
||||
inAppSurveyBranding: true,
|
||||
styling: { allowStyleOverwrite: false },
|
||||
},
|
||||
},
|
||||
organization: {
|
||||
id: "org-123",
|
||||
billing: {
|
||||
plan: "free",
|
||||
limits: { monthly: { responses: 100 } },
|
||||
},
|
||||
},
|
||||
surveys: mockEnvironmentData.surveys,
|
||||
actionClasses: mockEnvironmentData.actionClasses,
|
||||
});
|
||||
|
||||
expect(prisma.environment.findUnique).toHaveBeenCalledWith({
|
||||
where: { id: environmentId },
|
||||
select: expect.objectContaining({
|
||||
id: true,
|
||||
type: true,
|
||||
appSetupCompleted: true,
|
||||
project: expect.any(Object),
|
||||
actionClasses: expect.any(Object),
|
||||
surveys: expect.any(Object),
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
test("should throw ResourceNotFoundError when environment is not found", async () => {
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue(null);
|
||||
|
||||
await expect(getEnvironmentStateData(environmentId)).rejects.toThrow(ResourceNotFoundError);
|
||||
await expect(getEnvironmentStateData(environmentId)).rejects.toThrow("environment");
|
||||
});
|
||||
|
||||
test("should throw ResourceNotFoundError when project is not found", async () => {
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue({
|
||||
...mockEnvironmentData,
|
||||
project: null,
|
||||
} as never);
|
||||
|
||||
await expect(getEnvironmentStateData(environmentId)).rejects.toThrow(ResourceNotFoundError);
|
||||
});
|
||||
|
||||
test("should throw ResourceNotFoundError when organization is not found", async () => {
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue({
|
||||
...mockEnvironmentData,
|
||||
project: {
|
||||
...mockEnvironmentData.project,
|
||||
organization: null,
|
||||
},
|
||||
} as never);
|
||||
|
||||
await expect(getEnvironmentStateData(environmentId)).rejects.toThrow(ResourceNotFoundError);
|
||||
});
|
||||
|
||||
test("should throw DatabaseError on Prisma database errors", async () => {
|
||||
const prismaError = new Prisma.PrismaClientKnownRequestError("Connection failed", {
|
||||
code: "P2024",
|
||||
clientVersion: "5.0.0",
|
||||
});
|
||||
vi.mocked(prisma.environment.findUnique).mockRejectedValue(prismaError);
|
||||
|
||||
await expect(getEnvironmentStateData(environmentId)).rejects.toThrow(DatabaseError);
|
||||
expect(logger.error).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test("should rethrow unexpected errors", async () => {
|
||||
const unexpectedError = new Error("Unexpected error");
|
||||
vi.mocked(prisma.environment.findUnique).mockRejectedValue(unexpectedError);
|
||||
|
||||
await expect(getEnvironmentStateData(environmentId)).rejects.toThrow("Unexpected error");
|
||||
expect(logger.error).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test("should handle empty surveys array", async () => {
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue({
|
||||
...mockEnvironmentData,
|
||||
surveys: [],
|
||||
} as never);
|
||||
|
||||
const result = await getEnvironmentStateData(environmentId);
|
||||
|
||||
expect(result.surveys).toEqual([]);
|
||||
});
|
||||
|
||||
test("should handle empty actionClasses array", async () => {
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue({
|
||||
...mockEnvironmentData,
|
||||
actionClasses: [],
|
||||
} as never);
|
||||
|
||||
const result = await getEnvironmentStateData(environmentId);
|
||||
|
||||
expect(result.actionClasses).toEqual([]);
|
||||
});
|
||||
|
||||
test("should transform surveys using transformPrismaSurvey", async () => {
|
||||
const multipleSurveys = [
|
||||
...mockEnvironmentData.surveys,
|
||||
{
|
||||
...mockEnvironmentData.surveys[0],
|
||||
id: "survey-2",
|
||||
name: "Second Survey",
|
||||
},
|
||||
];
|
||||
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue({
|
||||
...mockEnvironmentData,
|
||||
surveys: multipleSurveys,
|
||||
} as never);
|
||||
|
||||
const result = await getEnvironmentStateData(environmentId);
|
||||
|
||||
expect(result.surveys).toHaveLength(2);
|
||||
});
|
||||
|
||||
test("should correctly map project properties to environment.project", async () => {
|
||||
const customProject = {
|
||||
...mockEnvironmentData.project,
|
||||
recontactDays: 14,
|
||||
clickOutsideClose: false,
|
||||
overlay: "dark",
|
||||
placement: "center",
|
||||
inAppSurveyBranding: false,
|
||||
styling: { allowStyleOverwrite: true, brandColor: "#ff0000" },
|
||||
};
|
||||
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue({
|
||||
...mockEnvironmentData,
|
||||
project: customProject,
|
||||
} as never);
|
||||
|
||||
const result = await getEnvironmentStateData(environmentId);
|
||||
|
||||
expect(result.environment.project).toEqual({
|
||||
id: "project-123",
|
||||
recontactDays: 14,
|
||||
clickOutsideClose: false,
|
||||
overlay: "dark",
|
||||
placement: "center",
|
||||
inAppSurveyBranding: false,
|
||||
styling: { allowStyleOverwrite: true, brandColor: "#ff0000" },
|
||||
});
|
||||
});
|
||||
|
||||
test("should validate environmentId input", async () => {
|
||||
// Invalid CUID should throw validation error
|
||||
await expect(getEnvironmentStateData("invalid-id")).rejects.toThrow();
|
||||
});
|
||||
|
||||
test("should handle different environment types", async () => {
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue({
|
||||
...mockEnvironmentData,
|
||||
type: "development",
|
||||
} as never);
|
||||
|
||||
const result = await getEnvironmentStateData(environmentId);
|
||||
|
||||
expect(result.environment.type).toBe("development");
|
||||
});
|
||||
|
||||
test("should handle appSetupCompleted false", async () => {
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue({
|
||||
...mockEnvironmentData,
|
||||
appSetupCompleted: false,
|
||||
} as never);
|
||||
|
||||
const result = await getEnvironmentStateData(environmentId);
|
||||
|
||||
expect(result.environment.appSetupCompleted).toBe(false);
|
||||
});
|
||||
|
||||
test("should correctly extract organization billing data", async () => {
|
||||
const customBilling = {
|
||||
plan: "enterprise",
|
||||
stripeCustomerId: "cus_123",
|
||||
limits: {
|
||||
monthly: { responses: 10000, miu: 50000 },
|
||||
projects: 100,
|
||||
},
|
||||
};
|
||||
|
||||
vi.mocked(prisma.environment.findUnique).mockResolvedValue({
|
||||
...mockEnvironmentData,
|
||||
project: {
|
||||
...mockEnvironmentData.project,
|
||||
organization: {
|
||||
id: "org-enterprise",
|
||||
billing: customBilling,
|
||||
},
|
||||
},
|
||||
} as never);
|
||||
|
||||
const result = await getEnvironmentStateData(environmentId);
|
||||
|
||||
expect(result.organization).toEqual({
|
||||
id: "org-enterprise",
|
||||
billing: customBilling,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -54,7 +54,7 @@ export const getEnvironmentStateData = async (environmentId: string): Promise<En
|
||||
id: true,
|
||||
recontactDays: true,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: true,
|
||||
overlay: true,
|
||||
placement: true,
|
||||
inAppSurveyBranding: true,
|
||||
styling: true,
|
||||
@@ -174,7 +174,7 @@ export const getEnvironmentStateData = async (environmentId: string): Promise<En
|
||||
id: environmentData.project.id,
|
||||
recontactDays: environmentData.project.recontactDays,
|
||||
clickOutsideClose: environmentData.project.clickOutsideClose,
|
||||
darkOverlay: environmentData.project.darkOverlay,
|
||||
overlay: environmentData.project.overlay,
|
||||
placement: environmentData.project.placement,
|
||||
inAppSurveyBranding: environmentData.project.inAppSurveyBranding,
|
||||
styling: environmentData.project.styling,
|
||||
|
||||
@@ -58,7 +58,7 @@ const mockProject: TJsEnvironmentStateProject = {
|
||||
inAppSurveyBranding: true,
|
||||
placement: "bottomRight",
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
styling: {
|
||||
allowStyleOverwrite: false,
|
||||
},
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
import {
|
||||
GET,
|
||||
OPTIONS,
|
||||
} from "@/modules/ee/contacts/api/v1/client/[environmentId]/identify/contacts/[userId]/route";
|
||||
|
||||
export { GET, OPTIONS };
|
||||
@@ -1,13 +1,15 @@
|
||||
import { NextRequest } from "next/server";
|
||||
import { logger } from "@formbricks/logger";
|
||||
import { DatabaseError, InvalidInputError, ResourceNotFoundError } from "@formbricks/types/errors";
|
||||
import { ZResponseUpdateInput } from "@formbricks/types/responses";
|
||||
import { TResponse, TResponseUpdateInput, ZResponseUpdateInput } from "@formbricks/types/responses";
|
||||
import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { responses } from "@/app/lib/api/response";
|
||||
import { transformErrorToDetails } from "@/app/lib/api/validator";
|
||||
import { withV1ApiWrapper } from "@/app/lib/api/with-api-logging";
|
||||
import { sendToPipeline } from "@/app/lib/pipelines";
|
||||
import { getResponse } from "@/lib/response/service";
|
||||
import { getSurvey } from "@/lib/survey/service";
|
||||
import { formatValidationErrorsForV1Api, validateResponseData } from "@/modules/api/lib/validation";
|
||||
import { validateOtherOptionLengthForMultipleChoice } from "@/modules/api/v2/lib/element";
|
||||
import { createQuotaFullObject } from "@/modules/ee/quotas/lib/helpers";
|
||||
import { validateFileUploads } from "@/modules/storage/utils";
|
||||
@@ -31,6 +33,38 @@ const handleDatabaseError = (error: Error, url: string, endpoint: string, respon
|
||||
return responses.internalServerErrorResponse("Unknown error occurred", true);
|
||||
};
|
||||
|
||||
const validateResponse = (
|
||||
response: TResponse,
|
||||
survey: TSurvey,
|
||||
responseUpdateInput: TResponseUpdateInput
|
||||
) => {
|
||||
// Validate response data against validation rules
|
||||
const mergedData = {
|
||||
...response.data,
|
||||
...responseUpdateInput.data,
|
||||
};
|
||||
|
||||
const isFinished = responseUpdateInput.finished ?? false;
|
||||
|
||||
const validationErrors = validateResponseData(
|
||||
survey.blocks,
|
||||
mergedData,
|
||||
responseUpdateInput.language ?? response.language ?? "en",
|
||||
isFinished,
|
||||
survey.questions
|
||||
);
|
||||
|
||||
if (validationErrors) {
|
||||
return {
|
||||
response: responses.badRequestResponse(
|
||||
"Validation failed",
|
||||
formatValidationErrorsForV1Api(validationErrors),
|
||||
true
|
||||
),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
export const PUT = withV1ApiWrapper({
|
||||
handler: async ({
|
||||
req,
|
||||
@@ -113,6 +147,11 @@ export const PUT = withV1ApiWrapper({
|
||||
};
|
||||
}
|
||||
|
||||
const validationResult = validateResponse(response, survey, inputValidation.data);
|
||||
if (validationResult) {
|
||||
return validationResult;
|
||||
}
|
||||
|
||||
// update response with quota evaluation
|
||||
let updatedResponse;
|
||||
try {
|
||||
|
||||
@@ -6,11 +6,14 @@ import { ZEnvironmentId } from "@formbricks/types/environment";
|
||||
import { InvalidInputError } from "@formbricks/types/errors";
|
||||
import { TResponseWithQuotaFull } from "@formbricks/types/quota";
|
||||
import { TResponseInput, ZResponseInput } from "@formbricks/types/responses";
|
||||
import { TSurvey } from "@formbricks/types/surveys/types";
|
||||
import { responses } from "@/app/lib/api/response";
|
||||
import { transformErrorToDetails } from "@/app/lib/api/validator";
|
||||
import { withV1ApiWrapper } from "@/app/lib/api/with-api-logging";
|
||||
import { sendToPipeline } from "@/app/lib/pipelines";
|
||||
import { getSurvey } from "@/lib/survey/service";
|
||||
import { getClientIpFromHeaders } from "@/lib/utils/client-ip";
|
||||
import { formatValidationErrorsForV1Api, validateResponseData } from "@/modules/api/lib/validation";
|
||||
import { getIsContactsEnabled } from "@/modules/ee/license-check/lib/utils";
|
||||
import { createQuotaFullObject } from "@/modules/ee/quotas/lib/helpers";
|
||||
import { validateFileUploads } from "@/modules/storage/utils";
|
||||
@@ -32,6 +35,27 @@ export const OPTIONS = async (): Promise<Response> => {
|
||||
);
|
||||
};
|
||||
|
||||
const validateResponse = (responseInputData: TResponseInput, survey: TSurvey) => {
|
||||
// Validate response data against validation rules
|
||||
const validationErrors = validateResponseData(
|
||||
survey.blocks,
|
||||
responseInputData.data,
|
||||
responseInputData.language ?? "en",
|
||||
responseInputData.finished,
|
||||
survey.questions
|
||||
);
|
||||
|
||||
if (validationErrors) {
|
||||
return {
|
||||
response: responses.badRequestResponse(
|
||||
"Validation failed",
|
||||
formatValidationErrorsForV1Api(validationErrors),
|
||||
true
|
||||
),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
export const POST = withV1ApiWrapper({
|
||||
handler: async ({ req, props }: { req: NextRequest; props: Context }) => {
|
||||
const params = await props.params;
|
||||
@@ -122,6 +146,11 @@ export const POST = withV1ApiWrapper({
|
||||
};
|
||||
}
|
||||
|
||||
const validationResult = validateResponse(responseInputData, survey);
|
||||
if (validationResult) {
|
||||
return validationResult;
|
||||
}
|
||||
|
||||
let response: TResponseWithQuotaFull;
|
||||
try {
|
||||
const meta: TResponseInput["meta"] = {
|
||||
@@ -136,6 +165,13 @@ export const POST = withV1ApiWrapper({
|
||||
action: responseInputData?.meta?.action,
|
||||
};
|
||||
|
||||
// Capture IP address if the survey has IP capture enabled
|
||||
// Server-derived IP always overwrites any client-provided value
|
||||
if (survey.isCaptureIpEnabled) {
|
||||
const ipAddress = await getClientIpFromHeaders();
|
||||
meta.ipAddress = ipAddress;
|
||||
}
|
||||
|
||||
response = await createResponseWithQuotaEvaluation({
|
||||
...responseInputData,
|
||||
meta,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ImageResponse } from "@vercel/og";
|
||||
import { ImageResponse } from "next/og";
|
||||
import { NextRequest } from "next/server";
|
||||
|
||||
export const GET = async (req: NextRequest) => {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { NextRequest } from "next/server";
|
||||
import { TIntegrationNotionConfigData, TIntegrationNotionInput } from "@formbricks/types/integration/notion";
|
||||
import { responses } from "@/app/lib/api/response";
|
||||
import { withV1ApiWrapper } from "@/app/lib/api/with-api-logging";
|
||||
import { TSessionAuthentication, withV1ApiWrapper } from "@/app/lib/api/with-api-logging";
|
||||
import {
|
||||
ENCRYPTION_KEY,
|
||||
NOTION_OAUTH_CLIENT_ID,
|
||||
@@ -10,10 +10,17 @@ import {
|
||||
WEBAPP_URL,
|
||||
} from "@/lib/constants";
|
||||
import { symmetricEncrypt } from "@/lib/crypto";
|
||||
import { hasUserEnvironmentAccess } from "@/lib/environment/auth";
|
||||
import { createOrUpdateIntegration, getIntegrationByType } from "@/lib/integration/service";
|
||||
|
||||
export const GET = withV1ApiWrapper({
|
||||
handler: async ({ req }: { req: NextRequest }) => {
|
||||
handler: async ({
|
||||
req,
|
||||
authentication,
|
||||
}: {
|
||||
req: NextRequest;
|
||||
authentication: NonNullable<TSessionAuthentication>;
|
||||
}) => {
|
||||
const url = req.url;
|
||||
const queryParams = new URLSearchParams(url.split("?")[1]); // Split the URL and get the query parameters
|
||||
const environmentId = queryParams.get("state"); // Get the value of the 'state' parameter
|
||||
@@ -26,6 +33,13 @@ export const GET = withV1ApiWrapper({
|
||||
};
|
||||
}
|
||||
|
||||
const canUserAccessEnvironment = await hasUserEnvironmentAccess(authentication.user.id, environmentId);
|
||||
if (!canUserAccessEnvironment) {
|
||||
return {
|
||||
response: responses.unauthorizedResponse(),
|
||||
};
|
||||
}
|
||||
|
||||
if (code && typeof code !== "string") {
|
||||
return {
|
||||
response: responses.badRequestResponse("`code` must be a string"),
|
||||
|
||||
@@ -5,12 +5,19 @@ import {
|
||||
TIntegrationSlackCredential,
|
||||
} from "@formbricks/types/integration/slack";
|
||||
import { responses } from "@/app/lib/api/response";
|
||||
import { withV1ApiWrapper } from "@/app/lib/api/with-api-logging";
|
||||
import { TSessionAuthentication, withV1ApiWrapper } from "@/app/lib/api/with-api-logging";
|
||||
import { SLACK_CLIENT_ID, SLACK_CLIENT_SECRET, WEBAPP_URL } from "@/lib/constants";
|
||||
import { hasUserEnvironmentAccess } from "@/lib/environment/auth";
|
||||
import { createOrUpdateIntegration, getIntegrationByType } from "@/lib/integration/service";
|
||||
|
||||
export const GET = withV1ApiWrapper({
|
||||
handler: async ({ req }: { req: NextRequest }) => {
|
||||
handler: async ({
|
||||
req,
|
||||
authentication,
|
||||
}: {
|
||||
req: NextRequest;
|
||||
authentication: NonNullable<TSessionAuthentication>;
|
||||
}) => {
|
||||
const url = req.url;
|
||||
const queryParams = new URLSearchParams(url.split("?")[1]); // Split the URL and get the query parameters
|
||||
const environmentId = queryParams.get("state"); // Get the value of the 'state' parameter
|
||||
@@ -23,6 +30,13 @@ export const GET = withV1ApiWrapper({
|
||||
};
|
||||
}
|
||||
|
||||
const canUserAccessEnvironment = await hasUserEnvironmentAccess(authentication.user.id, environmentId);
|
||||
if (!canUserAccessEnvironment) {
|
||||
return {
|
||||
response: responses.unauthorizedResponse(),
|
||||
};
|
||||
}
|
||||
|
||||
if (code && typeof code !== "string") {
|
||||
return {
|
||||
response: responses.badRequestResponse("`code` must be a string"),
|
||||
|
||||
@@ -8,6 +8,7 @@ import { TApiAuditLog, TApiKeyAuthentication, withV1ApiWrapper } from "@/app/lib
|
||||
import { sendToPipeline } from "@/app/lib/pipelines";
|
||||
import { deleteResponse, getResponse } from "@/lib/response/service";
|
||||
import { getSurvey } from "@/lib/survey/service";
|
||||
import { formatValidationErrorsForV1Api, validateResponseData } from "@/modules/api/lib/validation";
|
||||
import { hasPermission } from "@/modules/organization/settings/api-keys/lib/utils";
|
||||
import { validateFileUploads } from "@/modules/storage/utils";
|
||||
import { updateResponseWithQuotaEvaluation } from "./lib/response";
|
||||
@@ -140,6 +141,25 @@ export const PUT = withV1ApiWrapper({
|
||||
};
|
||||
}
|
||||
|
||||
// Validate response data against validation rules
|
||||
const validationErrors = validateResponseData(
|
||||
result.survey.blocks,
|
||||
responseUpdate.data,
|
||||
responseUpdate.language ?? "en",
|
||||
responseUpdate.finished,
|
||||
result.survey.questions
|
||||
);
|
||||
|
||||
if (validationErrors) {
|
||||
return {
|
||||
response: responses.badRequestResponse(
|
||||
"Validation failed",
|
||||
formatValidationErrorsForV1Api(validationErrors),
|
||||
true
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
const inputValidation = ZResponseUpdateInput.safeParse(responseUpdate);
|
||||
if (!inputValidation.success) {
|
||||
return {
|
||||
|
||||
@@ -7,6 +7,7 @@ import { transformErrorToDetails } from "@/app/lib/api/validator";
|
||||
import { TApiAuditLog, TApiKeyAuthentication, withV1ApiWrapper } from "@/app/lib/api/with-api-logging";
|
||||
import { sendToPipeline } from "@/app/lib/pipelines";
|
||||
import { getSurvey } from "@/lib/survey/service";
|
||||
import { formatValidationErrorsForV1Api, validateResponseData } from "@/modules/api/lib/validation";
|
||||
import { hasPermission } from "@/modules/organization/settings/api-keys/lib/utils";
|
||||
import { validateFileUploads } from "@/modules/storage/utils";
|
||||
import {
|
||||
@@ -149,6 +150,25 @@ export const POST = withV1ApiWrapper({
|
||||
};
|
||||
}
|
||||
|
||||
// Validate response data against validation rules
|
||||
const validationErrors = validateResponseData(
|
||||
surveyResult.survey.blocks,
|
||||
responseInput.data,
|
||||
responseInput.language ?? "en",
|
||||
responseInput.finished,
|
||||
surveyResult.survey.questions
|
||||
);
|
||||
|
||||
if (validationErrors) {
|
||||
return {
|
||||
response: responses.badRequestResponse(
|
||||
"Validation failed",
|
||||
formatValidationErrorsForV1Api(validationErrors),
|
||||
true
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
if (responseInput.createdAt && !responseInput.updatedAt) {
|
||||
responseInput.updatedAt = responseInput.createdAt;
|
||||
}
|
||||
|
||||
@@ -19,6 +19,10 @@ vi.mock("@/lib/utils/validate", () => ({
|
||||
validateInputs: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("@/lib/crypto", () => ({
|
||||
generateWebhookSecret: vi.fn(() => "whsec_test_secret_1234567890"),
|
||||
}));
|
||||
|
||||
describe("createWebhook", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
@@ -59,6 +63,7 @@ describe("createWebhook", () => {
|
||||
source: webhookInput.source,
|
||||
surveyIds: webhookInput.surveyIds,
|
||||
triggers: webhookInput.triggers,
|
||||
secret: "whsec_test_secret_1234567890",
|
||||
environment: {
|
||||
connect: {
|
||||
id: webhookInput.environmentId,
|
||||
@@ -144,6 +149,7 @@ describe("createWebhook", () => {
|
||||
source: webhookInput.source,
|
||||
surveyIds: webhookInput.surveyIds,
|
||||
triggers: webhookInput.triggers,
|
||||
secret: "whsec_test_secret_1234567890",
|
||||
environment: {
|
||||
connect: {
|
||||
id: webhookInput.environmentId,
|
||||
|
||||
@@ -4,12 +4,15 @@ import { ZId, ZOptionalNumber } from "@formbricks/types/common";
|
||||
import { DatabaseError, InvalidInputError } from "@formbricks/types/errors";
|
||||
import { TWebhookInput, ZWebhookInput } from "@/app/api/v1/webhooks/types/webhooks";
|
||||
import { ITEMS_PER_PAGE } from "@/lib/constants";
|
||||
import { generateWebhookSecret } from "@/lib/crypto";
|
||||
import { validateInputs } from "@/lib/utils/validate";
|
||||
|
||||
export const createWebhook = async (webhookInput: TWebhookInput): Promise<Webhook> => {
|
||||
validateInputs([webhookInput, ZWebhookInput]);
|
||||
|
||||
try {
|
||||
const secret = generateWebhookSecret();
|
||||
|
||||
const createdWebhook = await prisma.webhook.create({
|
||||
data: {
|
||||
url: webhookInput.url,
|
||||
@@ -17,6 +20,7 @@ export const createWebhook = async (webhookInput: TWebhookInput): Promise<Webhoo
|
||||
source: webhookInput.source,
|
||||
surveyIds: webhookInput.surveyIds || [],
|
||||
triggers: webhookInput.triggers || [],
|
||||
secret,
|
||||
environment: {
|
||||
connect: {
|
||||
id: webhookInput.environmentId,
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
import {
|
||||
OPTIONS,
|
||||
PUT,
|
||||
} from "@/modules/ee/contacts/api/v1/client/[environmentId]/contacts/[userId]/attributes/route";
|
||||
|
||||
export { OPTIONS, PUT };
|
||||
@@ -1,6 +0,0 @@
|
||||
import {
|
||||
GET,
|
||||
OPTIONS,
|
||||
} from "@/modules/ee/contacts/api/v1/client/[environmentId]/identify/contacts/[userId]/route";
|
||||
|
||||
export { GET, OPTIONS };
|
||||
@@ -10,6 +10,8 @@ import { transformErrorToDetails } from "@/app/lib/api/validator";
|
||||
import { sendToPipeline } from "@/app/lib/pipelines";
|
||||
import { getSurvey } from "@/lib/survey/service";
|
||||
import { getElementsFromBlocks } from "@/lib/survey/utils";
|
||||
import { getClientIpFromHeaders } from "@/lib/utils/client-ip";
|
||||
import { formatValidationErrorsForV1Api, validateResponseData } from "@/modules/api/lib/validation";
|
||||
import { validateOtherOptionLengthForMultipleChoice } from "@/modules/api/v2/lib/element";
|
||||
import { getIsContactsEnabled } from "@/modules/ee/license-check/lib/utils";
|
||||
import { createQuotaFullObject } from "@/modules/ee/quotas/lib/helpers";
|
||||
@@ -105,6 +107,23 @@ export const POST = async (request: Request, context: Context): Promise<Response
|
||||
);
|
||||
}
|
||||
|
||||
// Validate response data against validation rules
|
||||
const validationErrors = validateResponseData(
|
||||
survey.blocks,
|
||||
responseInputData.data,
|
||||
responseInputData.language ?? "en",
|
||||
responseInputData.finished,
|
||||
survey.questions
|
||||
);
|
||||
|
||||
if (validationErrors) {
|
||||
return responses.badRequestResponse(
|
||||
"Validation failed",
|
||||
formatValidationErrorsForV1Api(validationErrors),
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
let response: TResponseWithQuotaFull;
|
||||
try {
|
||||
const meta: TResponseInputV2["meta"] = {
|
||||
@@ -119,6 +138,13 @@ export const POST = async (request: Request, context: Context): Promise<Response
|
||||
action: responseInputData?.meta?.action,
|
||||
};
|
||||
|
||||
// Capture IP address if the survey has IP capture enabled
|
||||
// Server-derived IP always overwrites any client-provided value
|
||||
if (survey.isCaptureIpEnabled) {
|
||||
const ipAddress = await getClientIpFromHeaders();
|
||||
meta.ipAddress = ipAddress;
|
||||
}
|
||||
|
||||
response = await createResponseWithQuotaEvaluation({
|
||||
...responseInputData,
|
||||
meta,
|
||||
|
||||
@@ -3,8 +3,9 @@
|
||||
// Error components must be Client components
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { TFunction } from "i18next";
|
||||
import { useEffect } from "react";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { type ClientErrorType, getClientErrorData } from "@formbricks/types/errors";
|
||||
import { type ClientErrorType, getClientErrorData, isExpectedError } from "@formbricks/types/errors";
|
||||
import { Button } from "@/modules/ui/components/button";
|
||||
import { ErrorComponent } from "@/modules/ui/components/error-component";
|
||||
|
||||
@@ -30,11 +31,13 @@ const ErrorBoundary = ({ error, reset }: { error: Error; reset: () => void }) =>
|
||||
const errorData = getClientErrorData(error);
|
||||
const { title, description } = getErrorMessages(errorData.type, t);
|
||||
|
||||
if (process.env.NODE_ENV === "development") {
|
||||
console.error(error.message);
|
||||
} else {
|
||||
Sentry.captureException(error);
|
||||
}
|
||||
useEffect(() => {
|
||||
if (process.env.NODE_ENV === "development") {
|
||||
console.error(error.message);
|
||||
} else if (!isExpectedError(error)) {
|
||||
Sentry.captureException(error);
|
||||
}
|
||||
}, [error]);
|
||||
|
||||
return (
|
||||
<div className="flex h-full w-full flex-col items-center justify-center">
|
||||
|
||||
@@ -68,7 +68,6 @@ vi.mock("@/app/middleware/endpoint-validator", async () => {
|
||||
isClientSideApiRoute: vi.fn().mockReturnValue({ isClientSideApi: false, isRateLimited: true }),
|
||||
isManagementApiRoute: vi.fn().mockReturnValue({ isManagementApi: false, authenticationMethod: "apiKey" }),
|
||||
isIntegrationRoute: vi.fn().mockReturnValue(false),
|
||||
isSyncWithUserIdentificationEndpoint: vi.fn().mockReturnValue(null),
|
||||
};
|
||||
});
|
||||
|
||||
@@ -82,7 +81,6 @@ vi.mock("@/modules/core/rate-limit/rate-limit-configs", () => ({
|
||||
api: {
|
||||
client: { windowMs: 60000, max: 100 },
|
||||
v1: { windowMs: 60000, max: 1000 },
|
||||
syncUserIdentification: { windowMs: 60000, max: 50 },
|
||||
},
|
||||
},
|
||||
}));
|
||||
@@ -463,45 +461,6 @@ describe("withV1ApiWrapper", () => {
|
||||
expect(handler).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test("handles sync user identification rate limiting", async () => {
|
||||
const { applyRateLimit, applyIPRateLimit } = await import("@/modules/core/rate-limit/helpers");
|
||||
const {
|
||||
isClientSideApiRoute,
|
||||
isManagementApiRoute,
|
||||
isIntegrationRoute,
|
||||
isSyncWithUserIdentificationEndpoint,
|
||||
} = await import("@/app/middleware/endpoint-validator");
|
||||
const { authenticateRequest } = await import("@/app/api/v1/auth");
|
||||
|
||||
vi.mocked(isClientSideApiRoute).mockReturnValue({ isClientSideApi: true, isRateLimited: true });
|
||||
vi.mocked(isManagementApiRoute).mockReturnValue({
|
||||
isManagementApi: false,
|
||||
authenticationMethod: AuthenticationMethod.None,
|
||||
});
|
||||
vi.mocked(isIntegrationRoute).mockReturnValue(false);
|
||||
vi.mocked(isSyncWithUserIdentificationEndpoint).mockReturnValue({
|
||||
userId: "user-123",
|
||||
environmentId: "env-123",
|
||||
});
|
||||
vi.mocked(authenticateRequest).mockResolvedValue(null);
|
||||
vi.mocked(applyIPRateLimit).mockResolvedValue(undefined);
|
||||
const rateLimitError = new Error("Sync rate limit exceeded");
|
||||
rateLimitError.message = "Sync rate limit exceeded";
|
||||
vi.mocked(applyRateLimit).mockRejectedValue(rateLimitError);
|
||||
|
||||
const handler = vi.fn();
|
||||
const req = createMockRequest({ url: "/api/v1/client/env-123/app/sync/user-123" });
|
||||
const { withV1ApiWrapper } = await import("./with-api-logging");
|
||||
const wrapped = withV1ApiWrapper({ handler });
|
||||
const res = await wrapped(req, undefined);
|
||||
|
||||
expect(res.status).toBe(429);
|
||||
expect(applyRateLimit).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ windowMs: 60000, max: 50 }),
|
||||
"user-123"
|
||||
);
|
||||
});
|
||||
|
||||
test("skips audit log creation when no action/targetType provided", async () => {
|
||||
const { queueAuditEvent: mockedQueueAuditEvent } = (await import(
|
||||
"@/modules/ee/audit-logs/lib/handler"
|
||||
|
||||
@@ -10,7 +10,6 @@ import {
|
||||
isClientSideApiRoute,
|
||||
isIntegrationRoute,
|
||||
isManagementApiRoute,
|
||||
isSyncWithUserIdentificationEndpoint,
|
||||
} from "@/app/middleware/endpoint-validator";
|
||||
import { AUDIT_LOG_ENABLED, IS_PRODUCTION, SENTRY_DSN } from "@/lib/constants";
|
||||
import { authOptions } from "@/modules/auth/lib/authOptions";
|
||||
@@ -48,23 +47,16 @@ enum ApiV1RouteTypeEnum {
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply client-side API rate limiting (IP-based or sync-specific)
|
||||
* Apply client-side API rate limiting (IP-based)
|
||||
*/
|
||||
const applyClientRateLimit = async (url: string, customRateLimitConfig?: TRateLimitConfig): Promise<void> => {
|
||||
const syncEndpoint = isSyncWithUserIdentificationEndpoint(url);
|
||||
if (syncEndpoint) {
|
||||
const syncRateLimitConfig = rateLimitConfigs.api.syncUserIdentification;
|
||||
await applyRateLimit(syncRateLimitConfig, syncEndpoint.userId);
|
||||
} else {
|
||||
await applyIPRateLimit(customRateLimitConfig ?? rateLimitConfigs.api.client);
|
||||
}
|
||||
const applyClientRateLimit = async (customRateLimitConfig?: TRateLimitConfig): Promise<void> => {
|
||||
await applyIPRateLimit(customRateLimitConfig ?? rateLimitConfigs.api.client);
|
||||
};
|
||||
|
||||
/**
|
||||
* Handle rate limiting based on authentication and API type
|
||||
*/
|
||||
const handleRateLimiting = async (
|
||||
url: string,
|
||||
authentication: TApiV1Authentication,
|
||||
routeType: ApiV1RouteTypeEnum,
|
||||
customRateLimitConfig?: TRateLimitConfig
|
||||
@@ -84,7 +76,7 @@ const handleRateLimiting = async (
|
||||
}
|
||||
|
||||
if (routeType === ApiV1RouteTypeEnum.Client) {
|
||||
await applyClientRateLimit(url, customRateLimitConfig);
|
||||
await applyClientRateLimit(customRateLimitConfig);
|
||||
}
|
||||
} catch (error) {
|
||||
return responses.tooManyRequestsResponse(error.message);
|
||||
@@ -255,7 +247,6 @@ const getRouteType = (
|
||||
* Features:
|
||||
* - Performs authentication once and passes result to handler
|
||||
* - Applies API key-based rate limiting with differentiated limits for client vs management APIs
|
||||
* - Includes additional sync user identification rate limiting for client-side sync endpoints
|
||||
* - Sets userId and organizationId in audit log automatically when audit logging is enabled
|
||||
* - System and Sentry logs are always called for non-success responses
|
||||
* - Uses function overloads to provide type safety without requiring type guards
|
||||
@@ -328,12 +319,7 @@ export const withV1ApiWrapper: {
|
||||
|
||||
// === Rate Limiting ===
|
||||
if (isRateLimited) {
|
||||
const rateLimitResponse = await handleRateLimiting(
|
||||
req.nextUrl.pathname,
|
||||
authentication,
|
||||
routeType,
|
||||
customRateLimitConfig
|
||||
);
|
||||
const rateLimitResponse = await handleRateLimiting(authentication, routeType, customRateLimitConfig);
|
||||
if (rateLimitResponse) return rateLimitResponse;
|
||||
}
|
||||
|
||||
|
||||
80
apps/web/app/lib/survey-block-builder.test.ts
Normal file
80
apps/web/app/lib/survey-block-builder.test.ts
Normal file
@@ -0,0 +1,80 @@
|
||||
import type { TFunction } from "i18next";
|
||||
import { describe, expect, test, vi } from "vitest";
|
||||
import { TSurveyElementTypeEnum } from "@formbricks/types/surveys/elements";
|
||||
import { createI18nString } from "@/lib/i18n/utils";
|
||||
import { buildBlock } from "./survey-block-builder";
|
||||
|
||||
const mockT = vi.fn((key: string) => {
|
||||
const translations: Record<string, string> = {
|
||||
"common.next": "Next",
|
||||
"common.back": "Back",
|
||||
"": "",
|
||||
};
|
||||
return translations[key] || key;
|
||||
}) as unknown as TFunction;
|
||||
|
||||
describe("survey-block-builder", () => {
|
||||
describe("buildBlock", () => {
|
||||
const mockElements = [
|
||||
{
|
||||
id: "element-1",
|
||||
type: TSurveyElementTypeEnum.OpenText,
|
||||
headline: createI18nString("Test Question", []),
|
||||
required: false,
|
||||
inputType: "text",
|
||||
longAnswer: false,
|
||||
charLimit: { enabled: false },
|
||||
},
|
||||
];
|
||||
|
||||
test("should use getDefaultButtonLabel when buttonLabel is provided", () => {
|
||||
const result = buildBlock({
|
||||
name: "Test Block",
|
||||
elements: mockElements,
|
||||
buttonLabel: "Custom Next",
|
||||
t: mockT,
|
||||
});
|
||||
|
||||
expect(result.buttonLabel).toEqual({
|
||||
default: "Custom Next",
|
||||
});
|
||||
});
|
||||
|
||||
test("should use createI18nString with empty translation when buttonLabel is not provided", () => {
|
||||
const result = buildBlock({
|
||||
name: "Test Block",
|
||||
elements: mockElements,
|
||||
t: mockT,
|
||||
});
|
||||
|
||||
expect(result.buttonLabel).toEqual({
|
||||
default: "",
|
||||
});
|
||||
});
|
||||
|
||||
test("should use getDefaultBackButtonLabel when backButtonLabel is provided", () => {
|
||||
const result = buildBlock({
|
||||
name: "Test Block",
|
||||
elements: mockElements,
|
||||
backButtonLabel: "Custom Back",
|
||||
t: mockT,
|
||||
});
|
||||
|
||||
expect(result.backButtonLabel).toEqual({
|
||||
default: "Custom Back",
|
||||
});
|
||||
});
|
||||
|
||||
test("should use createI18nString with empty translation when backButtonLabel is not provided", () => {
|
||||
const result = buildBlock({
|
||||
name: "Test Block",
|
||||
elements: mockElements,
|
||||
t: mockT,
|
||||
});
|
||||
|
||||
expect(result.backButtonLabel).toEqual({
|
||||
default: "",
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -302,7 +302,9 @@ export const buildBlock = ({
|
||||
elements,
|
||||
logic,
|
||||
logicFallback,
|
||||
buttonLabel: buttonLabel ? getDefaultButtonLabel(buttonLabel, t) : undefined,
|
||||
backButtonLabel: backButtonLabel ? getDefaultBackButtonLabel(backButtonLabel, t) : undefined,
|
||||
buttonLabel: buttonLabel ? getDefaultButtonLabel(buttonLabel, t) : createI18nString(t(""), []),
|
||||
backButtonLabel: backButtonLabel
|
||||
? getDefaultBackButtonLabel(backButtonLabel, t)
|
||||
: createI18nString(t(""), []),
|
||||
};
|
||||
};
|
||||
|
||||
@@ -4835,7 +4835,7 @@ export const previewSurvey = (projectName: string, t: TFunction): TSurvey => {
|
||||
segment: null,
|
||||
blocks: [
|
||||
{
|
||||
id: createId(),
|
||||
id: "cltxxaa6x0000g8hacxdxeje1",
|
||||
name: "Block 1",
|
||||
elements: [
|
||||
{
|
||||
@@ -4848,16 +4848,18 @@ export const previewSurvey = (projectName: string, t: TFunction): TSurvey => {
|
||||
t("templates.preview_survey_question_2_choice_2_label"),
|
||||
],
|
||||
headline: t("templates.preview_survey_question_2_headline"),
|
||||
subheader: t("templates.preview_survey_question_2_subheader"),
|
||||
required: true,
|
||||
shuffleOption: "none",
|
||||
}),
|
||||
isDraft: true,
|
||||
},
|
||||
],
|
||||
buttonLabel: createI18nString(t("templates.next"), []),
|
||||
backButtonLabel: createI18nString(t("templates.preview_survey_question_2_back_button_label"), []),
|
||||
},
|
||||
{
|
||||
id: createId(),
|
||||
id: "cltxxaa6x0000g8hacxdxeje2",
|
||||
name: "Block 2",
|
||||
elements: [
|
||||
{
|
||||
@@ -4913,6 +4915,7 @@ export const previewSurvey = (projectName: string, t: TFunction): TSurvey => {
|
||||
showLanguageSwitch: false,
|
||||
followUps: [],
|
||||
isBackButtonHidden: false,
|
||||
isCaptureIpEnabled: false,
|
||||
metadata: {},
|
||||
questions: [], // Required for build-time type checking (Zod defaults to [] at runtime)
|
||||
slug: null,
|
||||
|
||||
@@ -8,7 +8,6 @@ import {
|
||||
isManagementApiRoute,
|
||||
isPublicDomainRoute,
|
||||
isRouteAllowedForDomain,
|
||||
isSyncWithUserIdentificationEndpoint,
|
||||
} from "./endpoint-validator";
|
||||
|
||||
describe("endpoint-validator", () => {
|
||||
@@ -270,58 +269,6 @@ describe("endpoint-validator", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("isSyncWithUserIdentificationEndpoint", () => {
|
||||
test("should return environmentId and userId for valid sync URLs", () => {
|
||||
const result1 = isSyncWithUserIdentificationEndpoint("/api/v1/client/env123/app/sync/user456");
|
||||
expect(result1).toEqual({
|
||||
environmentId: "env123",
|
||||
userId: "user456",
|
||||
});
|
||||
|
||||
const result2 = isSyncWithUserIdentificationEndpoint("/api/v1/client/abc-123/app/sync/xyz-789");
|
||||
expect(result2).toEqual({
|
||||
environmentId: "abc-123",
|
||||
userId: "xyz-789",
|
||||
});
|
||||
|
||||
const result3 = isSyncWithUserIdentificationEndpoint(
|
||||
"/api/v1/client/env_123_test/app/sync/user_456_test"
|
||||
);
|
||||
expect(result3).toEqual({
|
||||
environmentId: "env_123_test",
|
||||
userId: "user_456_test",
|
||||
});
|
||||
});
|
||||
|
||||
test("should handle optional trailing slash", () => {
|
||||
// Test both with and without trailing slash
|
||||
const result1 = isSyncWithUserIdentificationEndpoint("/api/v1/client/env123/app/sync/user456");
|
||||
expect(result1).toEqual({
|
||||
environmentId: "env123",
|
||||
userId: "user456",
|
||||
});
|
||||
|
||||
const result2 = isSyncWithUserIdentificationEndpoint("/api/v1/client/env123/app/sync/user456/");
|
||||
expect(result2).toEqual({
|
||||
environmentId: "env123",
|
||||
userId: "user456",
|
||||
});
|
||||
});
|
||||
|
||||
test("should return false for invalid sync URLs", () => {
|
||||
expect(isSyncWithUserIdentificationEndpoint("/api/v1/client/env123/app/sync")).toBe(false);
|
||||
expect(isSyncWithUserIdentificationEndpoint("/api/v1/client/env123/something")).toBe(false);
|
||||
expect(isSyncWithUserIdentificationEndpoint("/api/something")).toBe(false);
|
||||
expect(isSyncWithUserIdentificationEndpoint("/api/v1/client/env123/app/other/user456")).toBe(false);
|
||||
expect(isSyncWithUserIdentificationEndpoint("/api/v2/client/env123/app/sync/user456")).toBe(false); // only v1 supported
|
||||
});
|
||||
|
||||
test("should handle empty or malformed IDs", () => {
|
||||
expect(isSyncWithUserIdentificationEndpoint("/api/v1/client//app/sync/user456")).toBe(false);
|
||||
expect(isSyncWithUserIdentificationEndpoint("/api/v1/client/env123/app/sync/")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isPublicDomainRoute", () => {
|
||||
test("should return true for health endpoint", () => {
|
||||
expect(isPublicDomainRoute("/health")).toBe(true);
|
||||
@@ -582,12 +529,6 @@ describe("endpoint-validator", () => {
|
||||
test("should handle special characters in survey IDs", () => {
|
||||
expect(isPublicDomainRoute("/s/survey-123_test.v2")).toBe(true);
|
||||
expect(isPublicDomainRoute("/c/jwt.token.with.dots")).toBe(true);
|
||||
expect(
|
||||
isSyncWithUserIdentificationEndpoint("/api/v1/client/env-123_test/app/sync/user-456_test")
|
||||
).toEqual({
|
||||
environmentId: "env-123_test",
|
||||
userId: "user-456_test",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -628,15 +569,6 @@ describe("endpoint-validator", () => {
|
||||
const longSurveyId = "a".repeat(1000);
|
||||
const longPath = `s/${longSurveyId}`;
|
||||
expect(isPublicDomainRoute(`/${longPath}`)).toBe(true);
|
||||
|
||||
const longEnvironmentId = "env" + "a".repeat(1000);
|
||||
const longUserId = "user" + "b".repeat(1000);
|
||||
expect(
|
||||
isSyncWithUserIdentificationEndpoint(`/api/v1/client/${longEnvironmentId}/app/sync/${longUserId}`)
|
||||
).toEqual({
|
||||
environmentId: longEnvironmentId,
|
||||
userId: longUserId,
|
||||
});
|
||||
});
|
||||
|
||||
test("should handle empty and minimal inputs", () => {
|
||||
@@ -651,7 +583,6 @@ describe("endpoint-validator", () => {
|
||||
});
|
||||
expect(isIntegrationRoute("")).toBe(false);
|
||||
expect(isAuthProtectedRoute("")).toBe(false);
|
||||
expect(isSyncWithUserIdentificationEndpoint("")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -43,14 +43,6 @@ export const isAuthProtectedRoute = (url: string): boolean => {
|
||||
return protectedRoutes.some((route) => url.startsWith(route));
|
||||
};
|
||||
|
||||
export const isSyncWithUserIdentificationEndpoint = (
|
||||
url: string
|
||||
): { environmentId: string; userId: string } | false => {
|
||||
const regex = /\/api\/v1\/client\/(?<environmentId>[^/]+)\/app\/sync\/(?<userId>[^/]+)/;
|
||||
const match = url.match(regex);
|
||||
return match ? { environmentId: match.groups!.environmentId, userId: match.groups!.userId } : false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if the route should be accessible on the public domain (PUBLIC_URL)
|
||||
* Uses whitelist approach - only explicitly allowed routes are accessible
|
||||
|
||||
@@ -8,7 +8,7 @@ import { authorizePrivateDownload } from "@/app/storage/[environmentId]/[accessT
|
||||
import { authOptions } from "@/modules/auth/lib/authOptions";
|
||||
import { applyRateLimit } from "@/modules/core/rate-limit/helpers";
|
||||
import { rateLimitConfigs } from "@/modules/core/rate-limit/rate-limit-configs";
|
||||
import { deleteFile, getSignedUrlForDownload } from "@/modules/storage/service";
|
||||
import { deleteFile, getFileStreamForDownload } from "@/modules/storage/service";
|
||||
import { getErrorResponseFromStorageError } from "@/modules/storage/utils";
|
||||
import { logFileDeletion } from "./lib/audit-logs";
|
||||
|
||||
@@ -39,21 +39,25 @@ export const GET = async (
|
||||
}
|
||||
}
|
||||
|
||||
const signedUrlResult = await getSignedUrlForDownload(fileName, environmentId, accessType);
|
||||
// Stream the file directly
|
||||
const streamResult = await getFileStreamForDownload(fileName, environmentId, accessType);
|
||||
|
||||
if (!signedUrlResult.ok) {
|
||||
const errorResponse = getErrorResponseFromStorageError(signedUrlResult.error, { fileName });
|
||||
if (!streamResult.ok) {
|
||||
const errorResponse = getErrorResponseFromStorageError(streamResult.error, { fileName });
|
||||
return errorResponse;
|
||||
}
|
||||
|
||||
return new Response(null, {
|
||||
status: 302,
|
||||
const { body, contentType, contentLength } = streamResult.data;
|
||||
|
||||
return new Response(body, {
|
||||
status: 200,
|
||||
headers: {
|
||||
Location: signedUrlResult.data,
|
||||
"Content-Type": contentType,
|
||||
...(contentLength > 0 && { "Content-Length": String(contentLength) }),
|
||||
"Cache-Control":
|
||||
accessType === "private"
|
||||
? "no-store, no-cache, must-revalidate"
|
||||
: "public, max-age=300, s-maxage=300, stale-while-revalidate=300",
|
||||
: "public, max-age=31536000, immutable",
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
@@ -9,17 +9,18 @@
|
||||
"source": "en-US",
|
||||
"targets": [
|
||||
"de-DE",
|
||||
"es-ES",
|
||||
"fr-FR",
|
||||
"hu-HU",
|
||||
"ja-JP",
|
||||
"nl-NL",
|
||||
"pt-BR",
|
||||
"pt-PT",
|
||||
"ro-RO",
|
||||
"zh-Hans-CN",
|
||||
"zh-Hant-TW",
|
||||
"nl-NL",
|
||||
"es-ES",
|
||||
"ru-RU",
|
||||
"sv-SE",
|
||||
"ru-RU"
|
||||
"zh-Hans-CN",
|
||||
"zh-Hant-TW"
|
||||
]
|
||||
},
|
||||
"version": 1.8
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,59 +1,205 @@
|
||||
// instrumentation-node.ts
|
||||
// OpenTelemetry instrumentation for Next.js - loaded via instrumentation.ts hook
|
||||
// Pattern based on: ee/src/opentelemetry.ts (license server)
|
||||
import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
|
||||
import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http";
|
||||
import { PrometheusExporter } from "@opentelemetry/exporter-prometheus";
|
||||
import { HostMetrics } from "@opentelemetry/host-metrics";
|
||||
import { registerInstrumentations } from "@opentelemetry/instrumentation";
|
||||
import { HttpInstrumentation } from "@opentelemetry/instrumentation-http";
|
||||
import { RuntimeNodeInstrumentation } from "@opentelemetry/instrumentation-runtime-node";
|
||||
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
|
||||
import { resourceFromAttributes } from "@opentelemetry/resources";
|
||||
import { PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
|
||||
import { NodeSDK } from "@opentelemetry/sdk-node";
|
||||
import {
|
||||
detectResources,
|
||||
envDetector,
|
||||
hostDetector,
|
||||
processDetector,
|
||||
resourceFromAttributes,
|
||||
} from "@opentelemetry/resources";
|
||||
import { MeterProvider } from "@opentelemetry/sdk-metrics";
|
||||
AlwaysOffSampler,
|
||||
AlwaysOnSampler,
|
||||
BatchSpanProcessor,
|
||||
ParentBasedSampler,
|
||||
type Sampler,
|
||||
TraceIdRatioBasedSampler,
|
||||
} from "@opentelemetry/sdk-trace-base";
|
||||
import { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION } from "@opentelemetry/semantic-conventions";
|
||||
import { PrismaInstrumentation } from "@prisma/instrumentation";
|
||||
import { logger } from "@formbricks/logger";
|
||||
import { env } from "@/lib/env";
|
||||
|
||||
const exporter = new PrometheusExporter({
|
||||
port: env.PROMETHEUS_EXPORTER_PORT ? parseInt(env.PROMETHEUS_EXPORTER_PORT) : 9464,
|
||||
endpoint: "/metrics",
|
||||
host: "0.0.0.0", // Listen on all network interfaces
|
||||
});
|
||||
// --- Configuration from environment ---
|
||||
const serviceName = process.env.OTEL_SERVICE_NAME || "formbricks";
|
||||
const serviceVersion = process.env.npm_package_version || "0.0.0";
|
||||
const environment = process.env.ENVIRONMENT || process.env.NODE_ENV || "development";
|
||||
const otlpEndpoint = process.env.OTEL_EXPORTER_OTLP_ENDPOINT;
|
||||
const prometheusEnabled = process.env.PROMETHEUS_ENABLED === "1";
|
||||
const prometheusPort = process.env.PROMETHEUS_EXPORTER_PORT
|
||||
? Number.parseInt(process.env.PROMETHEUS_EXPORTER_PORT)
|
||||
: 9464;
|
||||
|
||||
const detectedResources = detectResources({
|
||||
detectors: [envDetector, processDetector, hostDetector],
|
||||
});
|
||||
// --- Configure OTLP exporters (conditional on endpoint being set) ---
|
||||
let traceExporter: OTLPTraceExporter | undefined;
|
||||
let otlpMetricExporter: OTLPMetricExporter | undefined;
|
||||
|
||||
const customResources = resourceFromAttributes({});
|
||||
|
||||
const resources = detectedResources.merge(customResources);
|
||||
|
||||
const meterProvider = new MeterProvider({
|
||||
readers: [exporter],
|
||||
resource: resources,
|
||||
});
|
||||
|
||||
const hostMetrics = new HostMetrics({
|
||||
name: `otel-metrics`,
|
||||
meterProvider,
|
||||
});
|
||||
|
||||
registerInstrumentations({
|
||||
meterProvider,
|
||||
instrumentations: [new HttpInstrumentation(), new RuntimeNodeInstrumentation()],
|
||||
});
|
||||
|
||||
hostMetrics.start();
|
||||
|
||||
process.on("SIGTERM", async () => {
|
||||
if (otlpEndpoint) {
|
||||
try {
|
||||
// Stop collecting metrics or flush them if needed
|
||||
await meterProvider.shutdown();
|
||||
// Possibly close other instrumentation resources
|
||||
// OTLPTraceExporter reads OTEL_EXPORTER_OTLP_ENDPOINT from env
|
||||
// and appends /v1/traces for HTTP transport
|
||||
// Uses OTEL_EXPORTER_OTLP_HEADERS from env natively (W3C OTel format: key=value,key2=value2)
|
||||
traceExporter = new OTLPTraceExporter();
|
||||
|
||||
// OTLPMetricExporter reads OTEL_EXPORTER_OTLP_ENDPOINT from env
|
||||
// and appends /v1/metrics for HTTP transport
|
||||
// Uses OTEL_EXPORTER_OTLP_HEADERS from env natively
|
||||
otlpMetricExporter = new OTLPMetricExporter();
|
||||
} catch (error) {
|
||||
logger.error(error, "Failed to create OTLP exporters. Telemetry will not be exported.");
|
||||
}
|
||||
}
|
||||
|
||||
// --- Configure Prometheus exporter (pull-based metrics for ServiceMonitor) ---
|
||||
let prometheusExporter: PrometheusExporter | undefined;
|
||||
if (prometheusEnabled) {
|
||||
prometheusExporter = new PrometheusExporter({
|
||||
port: prometheusPort,
|
||||
endpoint: "/metrics",
|
||||
host: "0.0.0.0",
|
||||
});
|
||||
}
|
||||
|
||||
// --- Build metric readers array ---
|
||||
const metricReaders: (PeriodicExportingMetricReader | PrometheusExporter)[] = [];
|
||||
|
||||
if (otlpMetricExporter) {
|
||||
metricReaders.push(
|
||||
new PeriodicExportingMetricReader({
|
||||
exporter: otlpMetricExporter,
|
||||
exportIntervalMillis: 60000, // Export every 60 seconds
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (prometheusExporter) {
|
||||
metricReaders.push(prometheusExporter);
|
||||
}
|
||||
|
||||
// --- Resource attributes ---
|
||||
const resourceAttributes: Record<string, string> = {
|
||||
[ATTR_SERVICE_NAME]: serviceName,
|
||||
[ATTR_SERVICE_VERSION]: serviceVersion,
|
||||
"deployment.environment": environment,
|
||||
};
|
||||
|
||||
// --- Configure sampler ---
|
||||
const samplerType = process.env.OTEL_TRACES_SAMPLER || "always_on";
|
||||
const parsedSamplerArg = process.env.OTEL_TRACES_SAMPLER_ARG
|
||||
? Number.parseFloat(process.env.OTEL_TRACES_SAMPLER_ARG)
|
||||
: undefined;
|
||||
const samplerArg =
|
||||
parsedSamplerArg !== undefined && !Number.isNaN(parsedSamplerArg) ? parsedSamplerArg : undefined;
|
||||
|
||||
let sampler: Sampler;
|
||||
switch (samplerType) {
|
||||
case "always_on":
|
||||
sampler = new AlwaysOnSampler();
|
||||
break;
|
||||
case "always_off":
|
||||
sampler = new AlwaysOffSampler();
|
||||
break;
|
||||
case "traceidratio":
|
||||
sampler = new TraceIdRatioBasedSampler(samplerArg ?? 1);
|
||||
break;
|
||||
case "parentbased_traceidratio":
|
||||
sampler = new ParentBasedSampler({
|
||||
root: new TraceIdRatioBasedSampler(samplerArg ?? 1),
|
||||
});
|
||||
break;
|
||||
case "parentbased_always_on":
|
||||
sampler = new ParentBasedSampler({
|
||||
root: new AlwaysOnSampler(),
|
||||
});
|
||||
break;
|
||||
case "parentbased_always_off":
|
||||
sampler = new ParentBasedSampler({
|
||||
root: new AlwaysOffSampler(),
|
||||
});
|
||||
break;
|
||||
default:
|
||||
logger.warn(`Unknown sampler type: ${samplerType}. Using always_on.`);
|
||||
sampler = new AlwaysOnSampler();
|
||||
}
|
||||
|
||||
// --- Initialize NodeSDK ---
|
||||
const sdk = new NodeSDK({
|
||||
sampler,
|
||||
resource: resourceFromAttributes(resourceAttributes),
|
||||
// When no OTLP endpoint is configured (e.g. Prometheus-only setups), pass an empty
|
||||
// spanProcessors array to prevent the SDK from falling back to its default OTLP exporter
|
||||
// which would attempt connections to localhost:4318 and cause noisy errors.
|
||||
spanProcessors: traceExporter
|
||||
? [
|
||||
new BatchSpanProcessor(traceExporter, {
|
||||
maxQueueSize: 2048,
|
||||
maxExportBatchSize: 512,
|
||||
scheduledDelayMillis: 5000,
|
||||
exportTimeoutMillis: 30000,
|
||||
}),
|
||||
]
|
||||
: [],
|
||||
metricReaders: metricReaders.length > 0 ? metricReaders : undefined,
|
||||
instrumentations: [
|
||||
getNodeAutoInstrumentations({
|
||||
// Disable noisy/unnecessary instrumentations
|
||||
"@opentelemetry/instrumentation-fs": {
|
||||
enabled: false,
|
||||
},
|
||||
"@opentelemetry/instrumentation-dns": {
|
||||
enabled: false,
|
||||
},
|
||||
"@opentelemetry/instrumentation-net": {
|
||||
enabled: false,
|
||||
},
|
||||
// Disable pg instrumentation - PrismaInstrumentation handles DB tracing
|
||||
"@opentelemetry/instrumentation-pg": {
|
||||
enabled: false,
|
||||
},
|
||||
"@opentelemetry/instrumentation-http": {
|
||||
// Ignore health/metrics endpoints to reduce noise
|
||||
ignoreIncomingRequestHook: (req) => {
|
||||
const url = req.url || "";
|
||||
return url === "/health" || url.startsWith("/metrics") || url === "/api/v2/health";
|
||||
},
|
||||
},
|
||||
// Enable runtime metrics for Node.js process monitoring
|
||||
"@opentelemetry/instrumentation-runtime-node": {
|
||||
enabled: true,
|
||||
},
|
||||
}),
|
||||
// Prisma instrumentation for database query tracing
|
||||
new PrismaInstrumentation(),
|
||||
],
|
||||
});
|
||||
|
||||
// Start the SDK
|
||||
sdk.start();
|
||||
|
||||
// --- Log initialization status ---
|
||||
const enabledFeatures: string[] = [];
|
||||
if (traceExporter) enabledFeatures.push("traces");
|
||||
if (otlpMetricExporter) enabledFeatures.push("otlp-metrics");
|
||||
if (prometheusExporter) enabledFeatures.push("prometheus-metrics");
|
||||
|
||||
const samplerArgStr = process.env.OTEL_TRACES_SAMPLER_ARG || "";
|
||||
const samplerArgMsg = samplerArgStr ? `, samplerArg=${samplerArgStr}` : "";
|
||||
|
||||
if (enabledFeatures.length > 0) {
|
||||
logger.info(
|
||||
`OpenTelemetry initialized: service=${serviceName}, version=${serviceVersion}, environment=${environment}, exporters=${enabledFeatures.join("+")}, sampler=${samplerType}${samplerArgMsg}`
|
||||
);
|
||||
} else {
|
||||
logger.info(
|
||||
`OpenTelemetry initialized (no exporters): service=${serviceName}, version=${serviceVersion}, environment=${environment}`
|
||||
);
|
||||
}
|
||||
|
||||
// --- Graceful shutdown ---
|
||||
// Run before other SIGTERM listeners (logger flush, etc.) so spans are drained first.
|
||||
process.prependListener("SIGTERM", async () => {
|
||||
try {
|
||||
await sdk.shutdown();
|
||||
} catch (e) {
|
||||
logger.error(e, "Error during graceful shutdown");
|
||||
} finally {
|
||||
process.exit(0);
|
||||
logger.error(e, "Error during OpenTelemetry shutdown");
|
||||
}
|
||||
});
|
||||
|
||||
@@ -5,10 +5,13 @@ export const onRequestError = Sentry.captureRequestError;
|
||||
|
||||
export const register = async () => {
|
||||
if (process.env.NEXT_RUNTIME === "nodejs") {
|
||||
if (PROMETHEUS_ENABLED) {
|
||||
// Load OpenTelemetry instrumentation when Prometheus metrics or OTLP export is enabled
|
||||
if (PROMETHEUS_ENABLED || process.env.OTEL_EXPORTER_OTLP_ENDPOINT) {
|
||||
await import("./instrumentation-node");
|
||||
}
|
||||
}
|
||||
// Sentry init loads after OTEL to avoid TracerProvider conflicts
|
||||
// Sentry tracing is disabled (tracesSampleRate: 0) -- SigNoz handles distributed tracing
|
||||
if (process.env.NEXT_RUNTIME === "nodejs" && IS_PRODUCTION && SENTRY_DSN) {
|
||||
await import("./sentry.server.config");
|
||||
}
|
||||
|
||||
@@ -165,19 +165,20 @@ export const MAX_ATTRIBUTE_CLASSES_PER_ENVIRONMENT = 150;
|
||||
|
||||
export const DEFAULT_LOCALE = "en-US";
|
||||
export const AVAILABLE_LOCALES: TUserLocale[] = [
|
||||
"en-US",
|
||||
"de-DE",
|
||||
"pt-BR",
|
||||
"en-US",
|
||||
"es-ES",
|
||||
"fr-FR",
|
||||
"hu-HU",
|
||||
"ja-JP",
|
||||
"nl-NL",
|
||||
"zh-Hant-TW",
|
||||
"pt-BR",
|
||||
"pt-PT",
|
||||
"ro-RO",
|
||||
"ja-JP",
|
||||
"zh-Hans-CN",
|
||||
"es-ES",
|
||||
"sv-SE",
|
||||
"ru-RU",
|
||||
"sv-SE",
|
||||
"zh-Hans-CN",
|
||||
"zh-Hant-TW",
|
||||
];
|
||||
|
||||
// Billing constants
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import * as crypto from "crypto";
|
||||
import * as crypto from "node:crypto";
|
||||
import { beforeEach, describe, expect, test, vi } from "vitest";
|
||||
import { logger } from "@formbricks/logger";
|
||||
// Import after unmocking
|
||||
import {
|
||||
generateStandardWebhookSignature,
|
||||
generateWebhookSecret,
|
||||
getWebhookSecretBytes,
|
||||
hashSecret,
|
||||
hashSha256,
|
||||
parseApiKeyV2,
|
||||
@@ -283,6 +286,133 @@ describe("Crypto Utils", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("Webhook Signature Functions", () => {
|
||||
describe("generateWebhookSecret", () => {
|
||||
test("should generate a secret with whsec_ prefix", () => {
|
||||
const secret = generateWebhookSecret();
|
||||
expect(secret.startsWith("whsec_")).toBe(true);
|
||||
});
|
||||
|
||||
test("should generate base64-encoded content after prefix", () => {
|
||||
const secret = generateWebhookSecret();
|
||||
const base64Part = secret.slice(6); // Remove "whsec_"
|
||||
|
||||
// Should be valid base64
|
||||
expect(() => Buffer.from(base64Part, "base64")).not.toThrow();
|
||||
|
||||
// Should decode to 32 bytes (256 bits)
|
||||
const decoded = Buffer.from(base64Part, "base64");
|
||||
expect(decoded.length).toBe(32);
|
||||
});
|
||||
|
||||
test("should generate unique secrets each time", () => {
|
||||
const secret1 = generateWebhookSecret();
|
||||
const secret2 = generateWebhookSecret();
|
||||
expect(secret1).not.toBe(secret2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getWebhookSecretBytes", () => {
|
||||
test("should decode whsec_ prefixed secret to bytes", () => {
|
||||
const secret = generateWebhookSecret();
|
||||
const bytes = getWebhookSecretBytes(secret);
|
||||
|
||||
expect(Buffer.isBuffer(bytes)).toBe(true);
|
||||
expect(bytes.length).toBe(32);
|
||||
});
|
||||
|
||||
test("should handle secret without whsec_ prefix", () => {
|
||||
const base64Secret = Buffer.from("test-secret-bytes-32-characters!").toString("base64");
|
||||
const bytes = getWebhookSecretBytes(base64Secret);
|
||||
|
||||
expect(Buffer.isBuffer(bytes)).toBe(true);
|
||||
expect(bytes.toString()).toBe("test-secret-bytes-32-characters!");
|
||||
});
|
||||
|
||||
test("should correctly decode a known secret", () => {
|
||||
// Create a known secret
|
||||
const knownBytes = Buffer.from("known-test-secret-for-testing!!");
|
||||
const secret = `whsec_${knownBytes.toString("base64")}`;
|
||||
|
||||
const decoded = getWebhookSecretBytes(secret);
|
||||
expect(decoded.toString()).toBe("known-test-secret-for-testing!!");
|
||||
});
|
||||
});
|
||||
|
||||
describe("generateStandardWebhookSignature", () => {
|
||||
test("should generate signature in v1,{base64} format", () => {
|
||||
const secret = generateWebhookSecret();
|
||||
const signature = generateStandardWebhookSignature("msg_123", 1704547200, '{"test":"data"}', secret);
|
||||
|
||||
expect(signature.startsWith("v1,")).toBe(true);
|
||||
const base64Part = signature.slice(3);
|
||||
expect(() => Buffer.from(base64Part, "base64")).not.toThrow();
|
||||
});
|
||||
|
||||
test("should generate deterministic signatures for same inputs", () => {
|
||||
const secret = "whsec_" + Buffer.from("test-secret-32-bytes-exactly!!!").toString("base64");
|
||||
const webhookId = "msg_test123";
|
||||
const timestamp = 1704547200;
|
||||
const payload = '{"event":"test"}';
|
||||
|
||||
const sig1 = generateStandardWebhookSignature(webhookId, timestamp, payload, secret);
|
||||
const sig2 = generateStandardWebhookSignature(webhookId, timestamp, payload, secret);
|
||||
|
||||
expect(sig1).toBe(sig2);
|
||||
});
|
||||
|
||||
test("should generate different signatures for different payloads", () => {
|
||||
const secret = "whsec_" + Buffer.from("test-secret-32-bytes-exactly!!!").toString("base64");
|
||||
const webhookId = "msg_test123";
|
||||
const timestamp = 1704547200;
|
||||
|
||||
const sig1 = generateStandardWebhookSignature(webhookId, timestamp, '{"event":"a"}', secret);
|
||||
const sig2 = generateStandardWebhookSignature(webhookId, timestamp, '{"event":"b"}', secret);
|
||||
|
||||
expect(sig1).not.toBe(sig2);
|
||||
});
|
||||
|
||||
test("should generate different signatures for different timestamps", () => {
|
||||
const secret = "whsec_" + Buffer.from("test-secret-32-bytes-exactly!!!").toString("base64");
|
||||
const webhookId = "msg_test123";
|
||||
const payload = '{"event":"test"}';
|
||||
|
||||
const sig1 = generateStandardWebhookSignature(webhookId, 1704547200, payload, secret);
|
||||
const sig2 = generateStandardWebhookSignature(webhookId, 1704547201, payload, secret);
|
||||
|
||||
expect(sig1).not.toBe(sig2);
|
||||
});
|
||||
|
||||
test("should generate different signatures for different webhook IDs", () => {
|
||||
const secret = "whsec_" + Buffer.from("test-secret-32-bytes-exactly!!!").toString("base64");
|
||||
const timestamp = 1704547200;
|
||||
const payload = '{"event":"test"}';
|
||||
|
||||
const sig1 = generateStandardWebhookSignature("msg_1", timestamp, payload, secret);
|
||||
const sig2 = generateStandardWebhookSignature("msg_2", timestamp, payload, secret);
|
||||
|
||||
expect(sig1).not.toBe(sig2);
|
||||
});
|
||||
|
||||
test("should produce verifiable signatures", () => {
|
||||
// This test verifies the signature can be verified using the same algorithm
|
||||
const secretBytes = Buffer.from("test-secret-32-bytes-exactly!!!");
|
||||
const secret = `whsec_${secretBytes.toString("base64")}`;
|
||||
const webhookId = "msg_verify";
|
||||
const timestamp = 1704547200;
|
||||
const payload = '{"event":"verify"}';
|
||||
|
||||
const signature = generateStandardWebhookSignature(webhookId, timestamp, payload, secret);
|
||||
|
||||
// Manually compute the expected signature
|
||||
const signedContent = `${webhookId}.${timestamp}.${payload}`;
|
||||
const expectedSig = crypto.createHmac("sha256", secretBytes).update(signedContent).digest("base64");
|
||||
|
||||
expect(signature).toBe(`v1,${expectedSig}`);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("GCM decryption failure logging", () => {
|
||||
// Test key - 32 bytes for AES-256
|
||||
const testKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
|
||||
@@ -314,11 +444,11 @@ describe("Crypto Utils", () => {
|
||||
expect(() => symmetricDecrypt(corruptedPayload, testKey)).toThrow();
|
||||
|
||||
// Verify logger.warn was called with the correct format (object first, message second)
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
{ err: expect.any(Error) },
|
||||
"AES-GCM decryption failed; refusing to fall back to insecure CBC"
|
||||
);
|
||||
expect(logger.warn).toHaveBeenCalledTimes(1);
|
||||
const [firstArg, secondArg] = vi.mocked(logger.warn).mock.calls[0];
|
||||
expect(firstArg).toHaveProperty("err");
|
||||
expect(firstArg.err).toHaveProperty("message");
|
||||
expect(secondArg).toBe("AES-GCM decryption failed; refusing to fall back to insecure CBC");
|
||||
});
|
||||
|
||||
test("logs warning and throws when GCM decryption fails with corrupted encrypted data", () => {
|
||||
@@ -342,11 +472,11 @@ describe("Crypto Utils", () => {
|
||||
expect(() => symmetricDecrypt(corruptedPayload, testKey)).toThrow();
|
||||
|
||||
// Verify logger.warn was called
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
{ err: expect.any(Error) },
|
||||
"AES-GCM decryption failed; refusing to fall back to insecure CBC"
|
||||
);
|
||||
expect(logger.warn).toHaveBeenCalledTimes(1);
|
||||
const [firstArg, secondArg] = vi.mocked(logger.warn).mock.calls[0];
|
||||
expect(firstArg).toHaveProperty("err");
|
||||
expect(firstArg.err).toHaveProperty("message");
|
||||
expect(secondArg).toBe("AES-GCM decryption failed; refusing to fall back to insecure CBC");
|
||||
});
|
||||
|
||||
test("logs warning and throws when GCM decryption fails with wrong key", () => {
|
||||
@@ -366,11 +496,11 @@ describe("Crypto Utils", () => {
|
||||
expect(() => symmetricDecrypt(payload, wrongKey)).toThrow();
|
||||
|
||||
// Verify logger.warn was called
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
{ err: expect.any(Error) },
|
||||
"AES-GCM decryption failed; refusing to fall back to insecure CBC"
|
||||
);
|
||||
expect(logger.warn).toHaveBeenCalledTimes(1);
|
||||
const [firstArg, secondArg] = vi.mocked(logger.warn).mock.calls[0];
|
||||
expect(firstArg).toHaveProperty("err");
|
||||
expect(firstArg.err).toHaveProperty("message");
|
||||
expect(secondArg).toBe("AES-GCM decryption failed; refusing to fall back to insecure CBC");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { compare, hash } from "bcryptjs";
|
||||
import { createCipheriv, createDecipheriv, createHash, randomBytes } from "crypto";
|
||||
import { createCipheriv, createDecipheriv, createHash, createHmac, randomBytes } from "node:crypto";
|
||||
import { logger } from "@formbricks/logger";
|
||||
import { ENCRYPTION_KEY } from "@/lib/constants";
|
||||
|
||||
@@ -141,3 +141,54 @@ export const parseApiKeyV2 = (key: string): { secret: string } | null => {
|
||||
|
||||
return { secret };
|
||||
};
|
||||
|
||||
// Standard Webhooks secret prefix
|
||||
const WEBHOOK_SECRET_PREFIX = "whsec_";
|
||||
|
||||
/**
|
||||
* Generate a Standard Webhooks compliant secret
|
||||
* Following: https://github.com/standard-webhooks/standard-webhooks/blob/main/spec/standard-webhooks.md
|
||||
*
|
||||
* Format: whsec_ + base64(32 random bytes)
|
||||
* @returns A webhook secret in format "whsec_{base64_encoded_random_bytes}"
|
||||
*/
|
||||
export const generateWebhookSecret = (): string => {
|
||||
const secretBytes = randomBytes(32); // 256 bits of entropy
|
||||
return `${WEBHOOK_SECRET_PREFIX}${secretBytes.toString("base64")}`;
|
||||
};
|
||||
|
||||
/**
|
||||
* Decode a Standard Webhooks secret to get the raw bytes
|
||||
* Strips the whsec_ prefix and base64 decodes the rest
|
||||
*
|
||||
* @param secret The webhook secret (with or without whsec_ prefix)
|
||||
* @returns Buffer containing the raw secret bytes
|
||||
*/
|
||||
export const getWebhookSecretBytes = (secret: string): Buffer => {
|
||||
const base64Part = secret.startsWith(WEBHOOK_SECRET_PREFIX)
|
||||
? secret.slice(WEBHOOK_SECRET_PREFIX.length)
|
||||
: secret;
|
||||
return Buffer.from(base64Part, "base64");
|
||||
};
|
||||
|
||||
/**
|
||||
* Generate Standard Webhooks compliant signature
|
||||
* Following: https://github.com/standard-webhooks/standard-webhooks/blob/main/spec/standard-webhooks.md
|
||||
*
|
||||
* @param webhookId Unique message identifier
|
||||
* @param timestamp Unix timestamp in seconds
|
||||
* @param payload The request body as a string
|
||||
* @param secret The shared secret (whsec_ prefixed)
|
||||
* @returns The signature in format "v1,{base64_signature}"
|
||||
*/
|
||||
export const generateStandardWebhookSignature = (
|
||||
webhookId: string,
|
||||
timestamp: number,
|
||||
payload: string,
|
||||
secret: string
|
||||
): string => {
|
||||
const signedContent = `${webhookId}.${timestamp}.${payload}`;
|
||||
const secretBytes = getWebhookSecretBytes(secret);
|
||||
const signature = createHmac("sha256", secretBytes).update(signedContent).digest("base64");
|
||||
return `v1,${signature}`;
|
||||
};
|
||||
|
||||
@@ -23,6 +23,7 @@ export const env = createEnv({
|
||||
EMAIL_VERIFICATION_DISABLED: z.enum(["1", "0"]).optional(),
|
||||
ENCRYPTION_KEY: z.string(),
|
||||
ENTERPRISE_LICENSE_KEY: z.string().optional(),
|
||||
ENVIRONMENT: z.enum(["production", "staging"]).default("production"),
|
||||
GITHUB_ID: z.string().optional(),
|
||||
GITHUB_SECRET: z.string().optional(),
|
||||
GOOGLE_CLIENT_ID: z.string().optional(),
|
||||
@@ -54,7 +55,6 @@ export const env = createEnv({
|
||||
OIDC_DISPLAY_NAME: z.string().optional(),
|
||||
OIDC_ISSUER: z.string().optional(),
|
||||
OIDC_SIGNING_ALGORITHM: z.string().optional(),
|
||||
OPENTELEMETRY_LISTENER_URL: z.string().optional(),
|
||||
REDIS_URL:
|
||||
process.env.NODE_ENV === "test"
|
||||
? z.string().optional()
|
||||
@@ -151,6 +151,7 @@ export const env = createEnv({
|
||||
EMAIL_VERIFICATION_DISABLED: process.env.EMAIL_VERIFICATION_DISABLED,
|
||||
ENCRYPTION_KEY: process.env.ENCRYPTION_KEY,
|
||||
ENTERPRISE_LICENSE_KEY: process.env.ENTERPRISE_LICENSE_KEY,
|
||||
ENVIRONMENT: process.env.ENVIRONMENT,
|
||||
GITHUB_ID: process.env.GITHUB_ID,
|
||||
GITHUB_SECRET: process.env.GITHUB_SECRET,
|
||||
GOOGLE_CLIENT_ID: process.env.GOOGLE_CLIENT_ID,
|
||||
@@ -172,7 +173,6 @@ export const env = createEnv({
|
||||
NEXTAUTH_URL: process.env.NEXTAUTH_URL,
|
||||
NEXTAUTH_SECRET: process.env.NEXTAUTH_SECRET,
|
||||
SENTRY_DSN: process.env.SENTRY_DSN,
|
||||
OPENTELEMETRY_LISTENER_URL: process.env.OPENTELEMETRY_LISTENER_URL,
|
||||
NOTION_OAUTH_CLIENT_ID: process.env.NOTION_OAUTH_CLIENT_ID,
|
||||
NOTION_OAUTH_CLIENT_SECRET: process.env.NOTION_OAUTH_CLIENT_SECRET,
|
||||
OIDC_CLIENT_ID: process.env.OIDC_CLIENT_ID,
|
||||
|
||||
@@ -167,6 +167,12 @@ export const createEnvironment = async (
|
||||
description: "Your contact's last name",
|
||||
type: "default",
|
||||
},
|
||||
{
|
||||
key: "language",
|
||||
name: "Language",
|
||||
description: "The language preference of a contact",
|
||||
type: "default",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
@@ -88,7 +88,7 @@ export const getLanguageCode = (surveyLanguages: TSurveyLanguage[], languageCode
|
||||
return language?.default ? "default" : language?.language.code || "default";
|
||||
};
|
||||
|
||||
export const iso639Identifiers = iso639Languages.map((language) => language.alpha2);
|
||||
export const iso639Identifiers = iso639Languages.map((language) => language.code);
|
||||
|
||||
// Helper function to add language keys to a multi-language object (e.g. survey or question)
|
||||
// Iterates over the object recursively and adds empty strings for new language keys
|
||||
@@ -126,221 +126,88 @@ export const addMultiLanguageLabels = (object: unknown, languageSymbols: string[
|
||||
};
|
||||
|
||||
export const appLanguages = [
|
||||
{
|
||||
code: "en-US",
|
||||
label: {
|
||||
"en-US": "English (US)",
|
||||
"de-DE": "Englisch (US)",
|
||||
"pt-BR": "Inglês (EUA)",
|
||||
"fr-FR": "Anglais (États-Unis)",
|
||||
"zh-Hant-TW": "英文 (美國)",
|
||||
"pt-PT": "Inglês (EUA)",
|
||||
"ro-RO": "Engleză (SUA)",
|
||||
"ja-JP": "英語(米国)",
|
||||
"zh-Hans-CN": "英语(美国)",
|
||||
"nl-NL": "Engels (VS)",
|
||||
"es-ES": "Inglés (EE.UU.)",
|
||||
"sv-SE": "Engelska (USA)",
|
||||
"ru-RU": "Английский (США)",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "de-DE",
|
||||
label: {
|
||||
"en-US": "German",
|
||||
"de-DE": "Deutsch",
|
||||
"pt-BR": "Alemão",
|
||||
"fr-FR": "Allemand",
|
||||
"zh-Hant-TW": "德語",
|
||||
"pt-PT": "Alemão",
|
||||
"ro-RO": "Germană",
|
||||
"ja-JP": "ドイツ語",
|
||||
"zh-Hans-CN": "德语",
|
||||
"nl-NL": "Duits",
|
||||
"es-ES": "Alemán",
|
||||
"sv-SE": "Tyska",
|
||||
"ru-RU": "Немецкий",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "pt-BR",
|
||||
code: "en-US",
|
||||
label: {
|
||||
"en-US": "Portuguese (Brazil)",
|
||||
"de-DE": "Portugiesisch (Brasilien)",
|
||||
"pt-BR": "Português (Brasil)",
|
||||
"fr-FR": "Portugais (Brésil)",
|
||||
"zh-Hant-TW": "葡萄牙語 (巴西)",
|
||||
"pt-PT": "Português (Brasil)",
|
||||
"ro-RO": "Portugheză (Brazilia)",
|
||||
"ja-JP": "ポルトガル語(ブラジル)",
|
||||
"zh-Hans-CN": "葡萄牙语(巴西)",
|
||||
"nl-NL": "Portugees (Brazilië)",
|
||||
"es-ES": "Portugués (Brasil)",
|
||||
"sv-SE": "Portugisiska (Brasilien)",
|
||||
"ru-RU": "Португальский (Бразилия)",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "fr-FR",
|
||||
label: {
|
||||
"en-US": "French",
|
||||
"de-DE": "Französisch",
|
||||
"pt-BR": "Francês",
|
||||
"fr-FR": "Français",
|
||||
"zh-Hant-TW": "法語",
|
||||
"pt-PT": "Francês",
|
||||
"ro-RO": "Franceză",
|
||||
"ja-JP": "フランス語",
|
||||
"zh-Hans-CN": "法语",
|
||||
"nl-NL": "Frans",
|
||||
"es-ES": "Francés",
|
||||
"sv-SE": "Franska",
|
||||
"ru-RU": "Французский",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "zh-Hant-TW",
|
||||
label: {
|
||||
"en-US": "Chinese (Traditional)",
|
||||
"de-DE": "Chinesisch (Traditionell)",
|
||||
"pt-BR": "Chinês (Tradicional)",
|
||||
"fr-FR": "Chinois (Traditionnel)",
|
||||
"zh-Hant-TW": "繁體中文",
|
||||
"pt-PT": "Chinês (Tradicional)",
|
||||
"ro-RO": "Chineza (Tradițională)",
|
||||
"ja-JP": "中国語(繁体字)",
|
||||
"zh-Hans-CN": "繁体中文",
|
||||
"nl-NL": "Chinees (Traditioneel)",
|
||||
"es-ES": "Chino (Tradicional)",
|
||||
"sv-SE": "Kinesiska (traditionell)",
|
||||
"ru-RU": "Китайский (традиционный)",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "pt-PT",
|
||||
label: {
|
||||
"en-US": "Portuguese (Portugal)",
|
||||
"de-DE": "Portugiesisch (Portugal)",
|
||||
"pt-BR": "Português (Portugal)",
|
||||
"fr-FR": "Portugais (Portugal)",
|
||||
"zh-Hant-TW": "葡萄牙語 (葡萄牙)",
|
||||
"pt-PT": "Português (Portugal)",
|
||||
"ro-RO": "Portugheză (Portugalia)",
|
||||
"ja-JP": "ポルトガル語(ポルトガル)",
|
||||
"zh-Hans-CN": "葡萄牙语(葡萄牙)",
|
||||
"nl-NL": "Portugees (Portugal)",
|
||||
"es-ES": "Portugués (Portugal)",
|
||||
"sv-SE": "Portugisiska (Portugal)",
|
||||
"ru-RU": "Португальский (Португалия)",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "ro-RO",
|
||||
label: {
|
||||
"en-US": "Romanian",
|
||||
"de-DE": "Rumänisch",
|
||||
"pt-BR": "Romeno",
|
||||
"fr-FR": "Roumain",
|
||||
"zh-Hant-TW": "羅馬尼亞語",
|
||||
"pt-PT": "Romeno",
|
||||
"ro-RO": "Română",
|
||||
"ja-JP": "ルーマニア語",
|
||||
"zh-Hans-CN": "罗马尼亚语",
|
||||
"nl-NL": "Roemeens",
|
||||
"es-ES": "Rumano",
|
||||
"sv-SE": "Rumänska",
|
||||
"ru-RU": "Румынский",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "ja-JP",
|
||||
label: {
|
||||
"en-US": "Japanese",
|
||||
"de-DE": "Japanisch",
|
||||
"pt-BR": "Japonês",
|
||||
"fr-FR": "Japonais",
|
||||
"zh-Hant-TW": "日語",
|
||||
"pt-PT": "Japonês",
|
||||
"ro-RO": "Japoneză",
|
||||
"ja-JP": "日本語",
|
||||
"zh-Hans-CN": "日语",
|
||||
"nl-NL": "Japans",
|
||||
"es-ES": "Japonés",
|
||||
"sv-SE": "Japanska",
|
||||
"ru-RU": "Японский",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "zh-Hans-CN",
|
||||
label: {
|
||||
"en-US": "Chinese (Simplified)",
|
||||
"de-DE": "Chinesisch (Vereinfacht)",
|
||||
"pt-BR": "Chinês (Simplificado)",
|
||||
"fr-FR": "Chinois (Simplifié)",
|
||||
"zh-Hant-TW": "簡體中文",
|
||||
"pt-PT": "Chinês (Simplificado)",
|
||||
"ro-RO": "Chineza (Simplificată)",
|
||||
"ja-JP": "中国語(簡体字)",
|
||||
"zh-Hans-CN": "简体中文",
|
||||
"nl-NL": "Chinees (Vereenvoudigd)",
|
||||
"es-ES": "Chino (Simplificado)",
|
||||
"sv-SE": "Kinesiska (förenklad)",
|
||||
"ru-RU": "Китайский (упрощенный)",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "nl-NL",
|
||||
label: {
|
||||
"en-US": "Dutch",
|
||||
"de-DE": "Niederländisch",
|
||||
"pt-BR": "Holandês",
|
||||
"fr-FR": "Néerlandais",
|
||||
"zh-Hant-TW": "荷蘭語",
|
||||
"pt-PT": "Holandês",
|
||||
"ro-RO": "Olandeza",
|
||||
"ja-JP": "オランダ語",
|
||||
"zh-Hans-CN": "荷兰语",
|
||||
"nl-NL": "Nederlands",
|
||||
"es-ES": "Neerlandés",
|
||||
"sv-SE": "Nederländska",
|
||||
"ru-RU": "Голландский",
|
||||
"en-US": "English (US)",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "es-ES",
|
||||
label: {
|
||||
"en-US": "Spanish",
|
||||
"de-DE": "Spanisch",
|
||||
"pt-BR": "Espanhol",
|
||||
"fr-FR": "Espagnol",
|
||||
"zh-Hant-TW": "西班牙語",
|
||||
"pt-PT": "Espanhol",
|
||||
"ro-RO": "Spaniol",
|
||||
"ja-JP": "スペイン語",
|
||||
"zh-Hans-CN": "西班牙语",
|
||||
"nl-NL": "Spaans",
|
||||
"es-ES": "Español",
|
||||
"sv-SE": "Spanska",
|
||||
"ru-RU": "Испанский",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "fr-FR",
|
||||
label: {
|
||||
"en-US": "French",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "hu-HU",
|
||||
label: {
|
||||
"en-US": "Hungarian",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "ja-JP",
|
||||
label: {
|
||||
"en-US": "Japanese",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "nl-NL",
|
||||
label: {
|
||||
"en-US": "Dutch",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "pt-BR",
|
||||
label: {
|
||||
"en-US": "Portuguese (Brazil)",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "pt-PT",
|
||||
label: {
|
||||
"en-US": "Portuguese (Portugal)",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "ro-RO",
|
||||
label: {
|
||||
"en-US": "Romanian",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "ru-RU",
|
||||
label: {
|
||||
"en-US": "Russian",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "sv-SE",
|
||||
label: {
|
||||
"en-US": "Swedish",
|
||||
"de-DE": "Schwedisch",
|
||||
"pt-BR": "Sueco",
|
||||
"fr-FR": "Suédois",
|
||||
"zh-Hant-TW": "瑞典語",
|
||||
"pt-PT": "Sueco",
|
||||
"ro-RO": "Suedeză",
|
||||
"ja-JP": "スウェーデン語",
|
||||
"zh-Hans-CN": "瑞典语",
|
||||
"nl-NL": "Zweeds",
|
||||
"es-ES": "Sueco",
|
||||
"sv-SE": "Svenska",
|
||||
"ru-RU": "Шведский",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "zh-Hans-CN",
|
||||
label: {
|
||||
"en-US": "Chinese (Simplified)",
|
||||
},
|
||||
},
|
||||
{
|
||||
code: "zh-Hant-TW",
|
||||
label: {
|
||||
"en-US": "Chinese (Traditional)",
|
||||
},
|
||||
},
|
||||
];
|
||||
export { iso639Languages };
|
||||
|
||||
@@ -21,7 +21,7 @@ export type TInstanceInfo = {
|
||||
export const getInstanceInfo = reactCache(async (): Promise<TInstanceInfo | null> => {
|
||||
try {
|
||||
const oldestOrg = await prisma.organization.findFirst({
|
||||
orderBy: { createdAt: "asc" },
|
||||
orderBy: [{ createdAt: "asc" }, { id: "asc" }],
|
||||
select: { id: true, createdAt: true },
|
||||
});
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ describe("Project Service", () => {
|
||||
},
|
||||
placement: WidgetPlacement.bottomRight,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
styling: {
|
||||
allowStyleOverwrite: true,
|
||||
@@ -106,7 +106,7 @@ describe("Project Service", () => {
|
||||
},
|
||||
placement: WidgetPlacement.bottomRight,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
styling: {
|
||||
allowStyleOverwrite: true,
|
||||
@@ -171,7 +171,7 @@ describe("Project Service", () => {
|
||||
},
|
||||
placement: WidgetPlacement.bottomRight,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
styling: {
|
||||
allowStyleOverwrite: true,
|
||||
@@ -196,7 +196,7 @@ describe("Project Service", () => {
|
||||
},
|
||||
placement: WidgetPlacement.bottomRight,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
styling: {
|
||||
allowStyleOverwrite: true,
|
||||
@@ -250,7 +250,7 @@ describe("Project Service", () => {
|
||||
},
|
||||
placement: WidgetPlacement.bottomRight,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
styling: {
|
||||
allowStyleOverwrite: true,
|
||||
@@ -324,7 +324,7 @@ describe("Project Service", () => {
|
||||
},
|
||||
placement: WidgetPlacement.bottomRight,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
styling: {
|
||||
allowStyleOverwrite: true,
|
||||
@@ -378,7 +378,7 @@ describe("Project Service", () => {
|
||||
},
|
||||
placement: WidgetPlacement.bottomRight,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
styling: {
|
||||
allowStyleOverwrite: true,
|
||||
@@ -403,7 +403,7 @@ describe("Project Service", () => {
|
||||
},
|
||||
placement: WidgetPlacement.bottomRight,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
styling: {
|
||||
allowStyleOverwrite: true,
|
||||
@@ -448,7 +448,7 @@ describe("Project Service", () => {
|
||||
},
|
||||
placement: WidgetPlacement.bottomRight,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: false,
|
||||
overlay: "none",
|
||||
environments: [],
|
||||
styling: {
|
||||
allowStyleOverwrite: true,
|
||||
|
||||
@@ -22,10 +22,11 @@ const selectProject = {
|
||||
config: true,
|
||||
placement: true,
|
||||
clickOutsideClose: true,
|
||||
darkOverlay: true,
|
||||
overlay: true,
|
||||
environments: true,
|
||||
styling: true,
|
||||
logo: true,
|
||||
customHeadScripts: true,
|
||||
};
|
||||
|
||||
export const getUserProjects = reactCache(
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// https://github.com/airbnb/javascript/#naming--uppercase
|
||||
import { TProjectStyling } from "@formbricks/types/project";
|
||||
import { isLight, mixColor } from "@/lib/utils/colors";
|
||||
|
||||
export const COLOR_DEFAULTS = {
|
||||
brandColor: "#64748b",
|
||||
@@ -11,32 +12,174 @@ export const COLOR_DEFAULTS = {
|
||||
highlightBorderColor: "#64748b",
|
||||
} as const;
|
||||
|
||||
export const defaultStyling: TProjectStyling = {
|
||||
const DEFAULT_BRAND_COLOR = "#64748b";
|
||||
|
||||
/**
|
||||
* Derives a complete set of suggested color values from a single brand color.
|
||||
*
|
||||
* Used by the project-level "Suggest Colors" button **and** to build
|
||||
* `STYLE_DEFAULTS` so that a fresh install always has colours that are
|
||||
* visually cohesive with the default brand.
|
||||
*
|
||||
* The returned object is a flat map of form-field paths to values so it
|
||||
* can be spread directly into form defaults or applied via `form.setValue`.
|
||||
*/
|
||||
export const getSuggestedColors = (brandColor: string = DEFAULT_BRAND_COLOR) => {
|
||||
// Question / dark text: brand darkened with black (visible brand tint)
|
||||
const questionColor = mixColor(brandColor, "#000000", 0.35);
|
||||
// Input / option background: white with noticeable brand tint
|
||||
const inputBg = mixColor(brandColor, "#ffffff", 0.92);
|
||||
// Input border: visible brand-tinted border
|
||||
const inputBorder = mixColor(brandColor, "#ffffff", 0.6);
|
||||
// Card tones
|
||||
const cardBg = mixColor(brandColor, "#ffffff", 0.97);
|
||||
const cardBorder = mixColor(brandColor, "#ffffff", 0.8);
|
||||
// Page background
|
||||
const pageBg = mixColor(brandColor, "#ffffff", 0.855);
|
||||
|
||||
return {
|
||||
// General
|
||||
"brandColor.light": brandColor,
|
||||
"questionColor.light": questionColor,
|
||||
|
||||
// Headlines & Descriptions — use questionColor to match the legacy behaviour
|
||||
// where all text elements derived their color from questionColor.
|
||||
"elementHeadlineColor.light": questionColor,
|
||||
"elementDescriptionColor.light": questionColor,
|
||||
"elementUpperLabelColor.light": questionColor,
|
||||
|
||||
// Buttons — use the brand color so the button matches the user's intent.
|
||||
"buttonBgColor.light": brandColor,
|
||||
"buttonTextColor.light": isLight(brandColor) ? "#0f172a" : "#ffffff",
|
||||
|
||||
// Inputs
|
||||
"inputColor.light": inputBg,
|
||||
"inputBorderColor.light": inputBorder,
|
||||
"inputTextColor.light": questionColor,
|
||||
|
||||
// Options (Radio / Checkbox)
|
||||
"optionBgColor.light": inputBg,
|
||||
"optionLabelColor.light": questionColor,
|
||||
|
||||
// Card
|
||||
"cardBackgroundColor.light": cardBg,
|
||||
"cardBorderColor.light": cardBorder,
|
||||
|
||||
// Highlight / Focus
|
||||
"highlightBorderColor.light": mixColor(brandColor, "#ffffff", 0.25),
|
||||
|
||||
// Progress Bar — indicator uses the brand color; track is a lighter tint.
|
||||
"progressIndicatorBgColor.light": brandColor,
|
||||
"progressTrackBgColor.light": mixColor(brandColor, "#ffffff", 0.8),
|
||||
|
||||
// Background
|
||||
background: { bg: pageBg, bgType: "color" as const, brightness: 100 },
|
||||
};
|
||||
};
|
||||
|
||||
// Pre-compute colors derived from the default brand color.
|
||||
const _colors = getSuggestedColors(DEFAULT_BRAND_COLOR);
|
||||
|
||||
/**
|
||||
* Single source of truth for every styling default.
|
||||
*
|
||||
* Color values are derived from the default brand color (#64748b) via
|
||||
* `getSuggestedColors()`. Non-color values (dimensions, weights, sizes)
|
||||
* are hardcoded here and must be kept in sync with globals.css.
|
||||
*
|
||||
* Used everywhere: form defaults, preview rendering, email templates,
|
||||
* and as the reset target for "Restore defaults".
|
||||
*/
|
||||
export const STYLE_DEFAULTS: TProjectStyling = {
|
||||
allowStyleOverwrite: true,
|
||||
brandColor: {
|
||||
light: COLOR_DEFAULTS.brandColor,
|
||||
},
|
||||
questionColor: {
|
||||
light: COLOR_DEFAULTS.questionColor,
|
||||
},
|
||||
inputColor: {
|
||||
light: COLOR_DEFAULTS.inputColor,
|
||||
},
|
||||
inputBorderColor: {
|
||||
light: COLOR_DEFAULTS.inputBorderColor,
|
||||
},
|
||||
cardBackgroundColor: {
|
||||
light: COLOR_DEFAULTS.cardBackgroundColor,
|
||||
},
|
||||
cardBorderColor: {
|
||||
light: COLOR_DEFAULTS.cardBorderColor,
|
||||
},
|
||||
brandColor: { light: _colors["brandColor.light"] },
|
||||
questionColor: { light: _colors["questionColor.light"] },
|
||||
inputColor: { light: _colors["inputColor.light"] },
|
||||
inputBorderColor: { light: _colors["inputBorderColor.light"] },
|
||||
cardBackgroundColor: { light: _colors["cardBackgroundColor.light"] },
|
||||
cardBorderColor: { light: _colors["cardBorderColor.light"] },
|
||||
isLogoHidden: false,
|
||||
highlightBorderColor: undefined,
|
||||
highlightBorderColor: { light: _colors["highlightBorderColor.light"] },
|
||||
isDarkModeEnabled: false,
|
||||
roundness: 8,
|
||||
cardArrangement: {
|
||||
linkSurveys: "straight",
|
||||
appSurveys: "straight",
|
||||
},
|
||||
cardArrangement: { linkSurveys: "simple", appSurveys: "simple" },
|
||||
|
||||
// Headlines & Descriptions
|
||||
elementHeadlineColor: { light: _colors["elementHeadlineColor.light"] },
|
||||
elementHeadlineFontSize: 16,
|
||||
elementHeadlineFontWeight: 600,
|
||||
elementDescriptionColor: { light: _colors["elementDescriptionColor.light"] },
|
||||
elementDescriptionFontSize: 14,
|
||||
elementDescriptionFontWeight: 400,
|
||||
elementUpperLabelColor: { light: _colors["elementUpperLabelColor.light"] },
|
||||
elementUpperLabelFontSize: 12,
|
||||
elementUpperLabelFontWeight: 400,
|
||||
|
||||
// Inputs
|
||||
inputTextColor: { light: _colors["inputTextColor.light"] },
|
||||
inputBorderRadius: 8,
|
||||
inputHeight: 40,
|
||||
inputFontSize: 14,
|
||||
inputPaddingX: 16,
|
||||
inputPaddingY: 16,
|
||||
inputPlaceholderOpacity: 0.5,
|
||||
inputShadow: "0 1px 2px 0 rgb(0 0 0 / 0.05)",
|
||||
|
||||
// Buttons
|
||||
buttonBgColor: { light: _colors["buttonBgColor.light"] },
|
||||
buttonTextColor: { light: _colors["buttonTextColor.light"] },
|
||||
buttonBorderRadius: 8,
|
||||
buttonHeight: "auto",
|
||||
buttonFontSize: 16,
|
||||
buttonFontWeight: 500,
|
||||
buttonPaddingX: 12,
|
||||
buttonPaddingY: 12,
|
||||
|
||||
// Options
|
||||
optionBgColor: { light: _colors["optionBgColor.light"] },
|
||||
optionLabelColor: { light: _colors["optionLabelColor.light"] },
|
||||
optionBorderRadius: 8,
|
||||
optionPaddingX: 16,
|
||||
optionPaddingY: 16,
|
||||
optionFontSize: 14,
|
||||
|
||||
// Progress Bar
|
||||
progressTrackHeight: 8,
|
||||
progressTrackBgColor: { light: _colors["progressTrackBgColor.light"] },
|
||||
progressIndicatorBgColor: { light: _colors["progressIndicatorBgColor.light"] },
|
||||
};
|
||||
|
||||
/**
|
||||
* Builds a complete TProjectStyling object from a single brand color.
|
||||
*
|
||||
* Uses STYLE_DEFAULTS for all non-color properties (dimensions, weights, etc.)
|
||||
* and derives every color from the given brand color via getSuggestedColors().
|
||||
*
|
||||
* Useful when only a brand color is known (e.g. onboarding) and a fully
|
||||
* coherent styling object is needed for both preview rendering and persistence.
|
||||
*/
|
||||
export const buildStylingFromBrandColor = (brandColor: string = DEFAULT_BRAND_COLOR): TProjectStyling => {
|
||||
const colors = getSuggestedColors(brandColor);
|
||||
|
||||
return {
|
||||
...STYLE_DEFAULTS,
|
||||
brandColor: { light: colors["brandColor.light"] },
|
||||
questionColor: { light: colors["questionColor.light"] },
|
||||
elementHeadlineColor: { light: colors["elementHeadlineColor.light"] },
|
||||
elementDescriptionColor: { light: colors["elementDescriptionColor.light"] },
|
||||
elementUpperLabelColor: { light: colors["elementUpperLabelColor.light"] },
|
||||
buttonBgColor: { light: colors["buttonBgColor.light"] },
|
||||
buttonTextColor: { light: colors["buttonTextColor.light"] },
|
||||
inputColor: { light: colors["inputColor.light"] },
|
||||
inputBorderColor: { light: colors["inputBorderColor.light"] },
|
||||
inputTextColor: { light: colors["inputTextColor.light"] },
|
||||
optionBgColor: { light: colors["optionBgColor.light"] },
|
||||
optionLabelColor: { light: colors["optionLabelColor.light"] },
|
||||
cardBackgroundColor: { light: colors["cardBackgroundColor.light"] },
|
||||
cardBorderColor: { light: colors["cardBorderColor.light"] },
|
||||
highlightBorderColor: { light: colors["highlightBorderColor.light"] },
|
||||
progressIndicatorBgColor: { light: colors["progressIndicatorBgColor.light"] },
|
||||
progressTrackBgColor: { light: colors["progressTrackBgColor.light"] },
|
||||
background: colors.background,
|
||||
};
|
||||
};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user