Compare commits

...

2 Commits

Author SHA1 Message Date
Piotr Gaczkowski
4c9b936f4b feat: Create app-credentials for DB 2025-08-15 14:21:43 +02:00
Piotr Gaczkowski
dceda3f6f2 feat: Create Database credentials 2025-08-13 17:53:47 +02:00
18 changed files with 1244 additions and 37 deletions

15
infra/README.md Normal file
View File

@@ -0,0 +1,15 @@
### Nix Flakes
This project uses Nix Flakes via direnv.
Ensure your `~/.config/nix/nix.conf` (or `/etc/nix/nix.conf`) contains:
```bash
experimental-features = nix-command flakes
```
If your environment does not support flakes, you can still enter the development shell with:
```bash
nix develop
```

View File

@@ -16,12 +16,28 @@
inherit system;
config.allowUnfree = true;
};
helm-with-plugins = (
pkgs.wrapHelm pkgs.kubernetes-helm {
plugins = with pkgs.kubernetes-helmPlugins; [
helm-secrets
helm-diff
helm-s3
helm-git
];
}
);
helmfile-with-plugins = pkgs.helmfile-wrapped.override {
inherit (helm-with-plugins) pluginsDir;
};
in
with pkgs;
{
devShells.default = mkShell {
buildInputs = [
awscli
kubectl
helm-with-plugins
helmfile-with-plugins
terraform
];
};

View File

@@ -70,6 +70,9 @@ deployment:
app-env:
nameSuffix: app-env
type: secret
db-secrets:
nameSuffix: db-secrets
type: secret
nodeSelector:
karpenter.sh/capacity-type: spot
reloadOnChange: true
@@ -103,6 +106,9 @@ externalSecret:
app-secrets:
dataFrom:
key: stage/formbricks/secrets
db-secrets:
dataFrom:
key: stage/formbricks/terraform/rds/credentials
refreshInterval: 1m
secretStore:
kind: ClusterSecretStore

View File

@@ -0,0 +1,121 @@
resource "aws_sns_topic" "this" {
name = "lambda-metrics-alarm"
}
module "alarm" {
source = "terraform-aws-modules/cloudwatch/aws//modules/metric-alarm"
version = "~> 3.0"
alarm_name = "lambda-duration-lbda-rotate-db-secret"
alarm_description = "Lambda duration is too high"
comparison_operator = "GreaterThanOrEqualToThreshold"
evaluation_periods = 1
threshold = 10
period = 60
unit = "Milliseconds"
namespace = "AWS/Lambda"
metric_name = "Duration"
statistic = "Maximum"
dimensions = {
FunctionName = module.lambda_rotate_db_secret.lambda_function_name
}
alarm_actions = [aws_sns_topic.this.arn]
}
module "alarm_metric_query" {
source = "terraform-aws-modules/cloudwatch/aws//modules/metric-alarm"
version = "~> 3.0"
alarm_name = "mq-lambda-duration-lbda-rotate-db-secret"
alarm_description = "Lambda error rate is too high"
comparison_operator = "GreaterThanOrEqualToThreshold"
evaluation_periods = 1
threshold = 10
metric_query = [{
id = "e1"
return_data = true
expression = "m2/m1*100"
label = "Error Rate"
}, {
id = "m1"
metric = [{
namespace = "AWS/Lambda"
metric_name = "Invocations"
period = 60
stat = "Sum"
unit = "Count"
dimensions = {
FunctionName = module.lambda_rotate_db_secret.lambda_function_name
}
}]
}, {
id = "m2"
metric = [{
namespace = "AWS/Lambda"
metric_name = "Errors"
period = 60
stat = "Sum"
unit = "Count"
dimensions = {
FunctionName = module.lambda_rotate_db_secret.lambda_function_name
}
}]
}]
alarm_actions = [aws_sns_topic.this.arn]
tags = {
Secure = "maybe"
}
}
module "alarm_anomaly" {
source = "terraform-aws-modules/cloudwatch/aws//modules/metric-alarm"
version = "~> 3.0"
alarm_name = "lambda-invocations-anomaly-lbda-rotate-db-secret"
alarm_description = "Lambda invocations anomaly"
comparison_operator = "LessThanLowerOrGreaterThanUpperThreshold"
evaluation_periods = 1
threshold_metric_id = "ad1"
metric_query = [{
id = "ad1"
return_data = true
expression = "ANOMALY_DETECTION_BAND(m1, 2)"
label = "Invocations (expected)"
return_data = "true"
},
{
id = "m1"
metric = [{
namespace = "AWS/Lambda"
metric_name = "Invocations"
period = 60
stat = "Sum"
unit = "Count"
dimensions = {
FunctionName = module.lambda_rotate_db_secret.lambda_function_name
}
}]
return_data = "true"
}]
alarm_actions = [aws_sns_topic.this.arn]
tags = {
Secure = "maybe"
}
}

View File

@@ -0,0 +1,20 @@
data "aws_region" "selected" {}
data "aws_secretsmanager_secret" "rds_credentials" {
arn = data.terraform_remote_state.main.outputs.rds_secret_staging_arn
}
# Default KMS key for Secrets Manager
data "aws_kms_key" "secretsmanager" {
key_id = "alias/aws/secretsmanager"
}
data "terraform_remote_state" "main" {
backend = "s3"
config = {
bucket = "715841356175-terraform"
key = "terraform.tfstate"
region = "eu-central-1"
}
}

View File

@@ -0,0 +1,71 @@
resource "aws_lambda_layer_version" "psycopg2_layer" {
layer_name = "psycopg2-layer"
description = "Psycopg2 PostgreSQL driver for AWS Lambda"
compatible_runtimes = ["python3.9"]
filename = "./lambda/deps/psycopg2-layer.zip"
}
module "lambda_rotate_db_secret" {
source = "terraform-aws-modules/lambda/aws"
version = "7.20.1"
function_name = "lbda-rotate-db-secret"
description = "Rotate Aurora Serverless PostgreSQL DB secret"
handler = "lambda_function.lambda_handler"
source_path = "./lambda/src/lambda_function.py"
create_package = true
package_type = "Zip"
runtime = "python3.9"
timeout = 30
memory_size = 128
layers = [aws_lambda_layer_version.psycopg2_layer.arn]
create_role = true
role_name = "iamr-lbda-rotate-db-secret-role"
policy_name = "iamp-lbda-rotate-db-secret-policy"
attach_policy_json = true
policy_json = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = [
"kms:GenerateDataKey",
"kms:Encrypt",
"kms:DescribeKey",
"kms:Decrypt"
]
Effect = "Allow"
Resource = "*"
Sid = "AllowKMS"
},
{
Action = [
"secretsmanager:UpdateSecretVersionStage",
"secretsmanager:PutSecretValue",
"secretsmanager:GetSecretValue",
"secretsmanager:DescribeSecret"
]
Effect = "Allow"
Resource = "*"
Sid = "AllowSecretsManager"
},
{
Action = "secretsmanager:GetRandomPassword"
Effect = "Allow"
Resource = "*"
Sid = "AllowSecretsManagerRandomPassword"
}
]
})
tags = {
Environment = "dev"
Project = "aurora-serverless"
Zone = "db-zone"
}
}
resource "aws_lambda_permission" "AllowSecretsManager" {
statement_id = "AllowSecretsManager"
action = "lambda:InvokeFunction"
function_name = module.lambda_rotate_db_secret.lambda_function_name
principal = "secretsmanager.amazonaws.com"
}

View File

@@ -0,0 +1,589 @@
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# https://github.com/aws-samples/aws-secrets-manager-rotation-lambdas/blob/master/SecretsManagerRDSPostgreSQLRotationSingleUser/lambda_function.py
# Updated this function library from pg, pgdb to psycopg2 to support python3.9
import re
import boto3
import json
import logging
import os
import psycopg2
from psycopg2 import sql
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
"""Secrets Manager RDS PostgreSQL Handler
This handler uses the single-user rotation scheme to rotate an RDS PostgreSQL user credential. This rotation
scheme logs into the database as the user and rotates the user's own password, immediately invalidating the
user's previous password.
The Secret SecretString is expected to be a JSON string with the following format:
{
'engine': <required: must be set to 'postgres'>,
'host': <required: instance host name>,
'username': <required: username>,
'password': <required: password>,
'dbname': <optional: database name, default to 'postgres'>,
'port': <optional: if not specified, default port 5432 will be used>
}
Args:
event (dict): Lambda dictionary of event parameters. These keys must include the following:
- SecretId: The secret ARN or identifier
- ClientRequestToken: The ClientRequestToken of the secret version
- Step: The rotation step (one of createSecret, setSecret, testSecret, or finishSecret)
context (LambdaContext): The Lambda runtime information
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not properly configured for rotation
KeyError: If the secret json does not contain the expected keys
"""
arn = event["SecretId"]
token = event["ClientRequestToken"]
step = event["Step"]
# Setup the client
service_client = boto3.client(
"secretsmanager", endpoint_url=os.environ["SECRETS_MANAGER_ENDPOINT"]
)
# Make sure the version is staged correctly
metadata = service_client.describe_secret(SecretId=arn)
if "RotationEnabled" in metadata and not metadata["RotationEnabled"]:
logger.error("Secret %s is not enabled for rotation" % arn)
raise ValueError("Secret %s is not enabled for rotation" % arn)
versions = metadata["VersionIdsToStages"]
if token not in versions:
logger.error(
"Secret version %s has no stage for rotation of secret %s." % (token, arn)
)
raise ValueError(
"Secret version %s has no stage for rotation of secret %s." % (token, arn)
)
if "AWSCURRENT" in versions[token]:
logger.info(
"Secret version %s already set as AWSCURRENT for secret %s." % (token, arn)
)
return
elif "AWSPENDING" not in versions[token]:
logger.error(
"Secret version %s not set as AWSPENDING for rotation of secret %s."
% (token, arn)
)
raise ValueError(
"Secret version %s not set as AWSPENDING for rotation of secret %s."
% (token, arn)
)
# Call the appropriate step
if step == "createSecret":
create_secret(service_client, arn, token)
elif step == "setSecret":
set_secret(service_client, arn, token)
elif step == "testSecret":
test_secret(service_client, arn, token)
elif step == "finishSecret":
finish_secret(service_client, arn, token)
else:
logger.error(
"lambda_handler: Invalid step parameter %s for secret %s" % (step, arn)
)
raise ValueError("Invalid step parameter %s for secret %s" % (step, arn))
def create_secret(service_client, arn, token):
"""Generate a new secret
This method first checks for the existence of a secret for the passed in token. If one does not exist, it will generate a
new secret and put it with the passed in token.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ValueError: If the current secret is not valid JSON
KeyError: If the secret json does not contain the expected keys
"""
# Make sure the current secret exists
current_dict = get_secret_dict(service_client, arn, "AWSCURRENT")
# Now try to get the secret version, if that fails, put a new secret
try:
get_secret_dict(service_client, arn, "AWSPENDING", token)
logger.info("createSecret: Successfully retrieved secret for %s." % arn)
except service_client.exceptions.ResourceNotFoundException:
# Generate a random password
current_dict["password"] = get_random_password(service_client)
# Put the secret
service_client.put_secret_value(
SecretId=arn,
ClientRequestToken=token,
SecretString=json.dumps(current_dict),
VersionStages=["AWSPENDING"],
)
logger.info(
"createSecret: Successfully put secret for ARN %s and version %s."
% (arn, token)
)
def set_secret(service_client, arn, token):
"""Set the pending secret in the database
This method tries to login to the database with the AWSPENDING secret and returns on success. If that fails, it
tries to login with the AWSCURRENT and AWSPREVIOUS secrets. If either one succeeds, it sets the AWSPENDING password
as the user password in the database. Else, it throws a ValueError.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not valid JSON or valid credentials are found to login to the database
KeyError: If the secret json does not contain the expected keys
"""
try:
previous_dict = get_secret_dict(service_client, arn, "AWSPREVIOUS")
except (service_client.exceptions.ResourceNotFoundException, KeyError):
previous_dict = None
current_dict = get_secret_dict(service_client, arn, "AWSCURRENT")
pending_dict = get_secret_dict(service_client, arn, "AWSPENDING", token)
# First try to login with the pending secret, if it succeeds, return
conn = get_connection(pending_dict)
if conn:
conn.close()
logger.info(
"setSecret: AWSPENDING secret is already set as password in PostgreSQL DB for secret arn %s."
% arn
)
return
# Make sure the user from current and pending match
if current_dict["username"] != pending_dict["username"]:
logger.error(
"setSecret: Attempting to modify user %s other than current user %s"
% (pending_dict["username"], current_dict["username"])
)
raise ValueError(
"Attempting to modify user %s other than current user %s"
% (pending_dict["username"], current_dict["username"])
)
# Make sure the host from current and pending match
if current_dict["host"] != pending_dict["host"]:
logger.error(
"setSecret: Attempting to modify user for host %s other than current host %s"
% (pending_dict["host"], current_dict["host"])
)
raise ValueError(
"Attempting to modify user for host %s other than current host %s"
% (pending_dict["host"], current_dict["host"])
)
# Now try the current password
conn = get_connection(current_dict)
# If both current and pending do not work, try previous
if not conn and previous_dict:
# Update previous_dict to leverage current SSL settings
previous_dict.pop("ssl", None)
if "ssl" in current_dict:
previous_dict["ssl"] = current_dict["ssl"]
conn = get_connection(previous_dict)
# Make sure the user/host from previous and pending match
if previous_dict["username"] != pending_dict["username"]:
logger.error(
"setSecret: Attempting to modify user %s other than previous valid user %s"
% (pending_dict["username"], previous_dict["username"])
)
raise ValueError(
"Attempting to modify user %s other than previous valid user %s"
% (pending_dict["username"], previous_dict["username"])
)
if previous_dict["host"] != pending_dict["host"]:
logger.error(
"setSecret: Attempting to modify user for host %s other than previous valid host %s"
% (pending_dict["host"], previous_dict["host"])
)
raise ValueError(
"Attempting to modify user for host %s other than current previous valid %s"
% (pending_dict["host"], previous_dict["host"])
)
# If we still don't have a connection, raise a ValueError
if not conn:
logger.error(
"setSecret: Unable to log into database with previous, current, or pending secret of secret arn %s"
% arn
)
raise ValueError(
"Unable to log into database with previous, current, or pending secret of secret arn %s"
% arn
)
# Now set the password to the pending password
try:
with conn.cursor() as cur:
# Get escaped username via quote_ident
cur.execute("SELECT quote_ident(%s)", (pending_dict["username"],))
escaped_username = cur.fetchone()[0]
alter_role = "ALTER USER %s" % escaped_username
cur.execute(alter_role + " WITH PASSWORD %s", (pending_dict["password"],))
conn.commit()
logger.info(
"setSecret: Successfully set password for user %s in PostgreSQL DB for secret arn %s."
% (pending_dict["username"], arn)
)
finally:
conn.close()
def test_secret(service_client, arn, token):
"""Test the pending secret against the database
This method tries to log into the database with the secrets staged with AWSPENDING and runs
a permissions check to ensure the user has the corrrect permissions.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not valid JSON or valid credentials are found to login to the database
KeyError: If the secret json does not contain the expected keys
"""
# Try to login with the pending secret, if it succeeds, return
conn = get_connection(get_secret_dict(service_client, arn, "AWSPENDING", token))
if conn:
# This is where the lambda will validate the user's permissions. Uncomment/modify the below lines to
# tailor these validations to your needs
try:
with conn.cursor() as cur:
cur.execute("SELECT NOW()")
conn.commit()
finally:
conn.close()
logger.info(
"testSecret: Successfully signed into PostgreSQL DB with AWSPENDING secret in %s."
% arn
)
return
else:
logger.error(
"testSecret: Unable to log into database with pending secret of secret ARN %s"
% arn
)
raise ValueError(
"Unable to log into database with pending secret of secret ARN %s" % arn
)
def finish_secret(service_client, arn, token):
"""Finish the rotation by marking the pending secret as current
This method finishes the secret rotation by staging the secret staged AWSPENDING with the AWSCURRENT stage.
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version
"""
# First describe the secret to get the current version
metadata = service_client.describe_secret(SecretId=arn)
current_version = None
for version in metadata["VersionIdsToStages"]:
if "AWSCURRENT" in metadata["VersionIdsToStages"][version]:
if version == token:
# The correct version is already marked as current, return
logger.info(
"finishSecret: Version %s already marked as AWSCURRENT for %s"
% (version, arn)
)
return
current_version = version
break
# Finalize by staging the secret version current
service_client.update_secret_version_stage(
SecretId=arn,
VersionStage="AWSCURRENT",
MoveToVersionId=token,
RemoveFromVersionId=current_version,
)
logger.info(
"finishSecret: Successfully set AWSCURRENT stage to version %s for secret %s."
% (token, arn)
)
def get_connection(secret_dict):
"""Gets a connection to PostgreSQL DB from a secret dictionary
This helper function uses connectivity information from the secret dictionary to initiate
connection attempt(s) to the database. Will attempt a fallback, non-SSL connection when
initial connection fails using SSL and fall_back is True.
Args:
secret_dict (dict): The Secret Dictionary
Returns:
Connection: The psycopg2 connection object if successful. None otherwise
Raises:
KeyError: If the secret json does not contain the expected keys
"""
# Parse and validate the secret JSON string
port = int(secret_dict.get("port", 5432))
dbname = secret_dict.get("dbname", "postgres")
# Get SSL connectivity configuration
use_ssl, fall_back = get_ssl_config(secret_dict)
# Attempt initial connection
conn = connect_and_authenticate(secret_dict, port, dbname, use_ssl)
if conn or not fall_back:
return conn
# Attempt fallback connection without SSL
return connect_and_authenticate(secret_dict, port, dbname, False)
def get_ssl_config(secret_dict):
"""Gets the desired SSL and fall back behavior using a secret dictionary
This helper function uses the existance and value the 'ssl' key in a secret dictionary
to determine desired SSL connectivity configuration. Its behavior is as follows:
- 'ssl' key DNE or invalid type/value: return True, True
- 'ssl' key is bool: return secret_dict['ssl'], False
- 'ssl' key equals "true" ignoring case: return True, False
- 'ssl' key equals "false" ignoring case: return False, False
Args:
secret_dict (dict): The Secret Dictionary
Returns:
Tuple(use_ssl, fall_back): SSL configuration
- use_ssl (bool): Flag indicating if an SSL connection should be attempted
- fall_back (bool): Flag indicating if non-SSL connection should be attempted if SSL connection fails
"""
# Default to True for SSL and fall_back mode if 'ssl' key DNE
if "ssl" not in secret_dict:
return True, True
# Handle type bool
if isinstance(secret_dict["ssl"], bool):
return secret_dict["ssl"], False
# Handle type string
if isinstance(secret_dict["ssl"], str):
ssl = secret_dict["ssl"].lower()
if ssl == "true":
return True, False
elif ssl == "false":
return False, False
else:
# Invalid string value, default to True for both SSL and fall_back mode
return True, True
# Invalid type, default to True for both SSL and fall_back mode
return True, True
def connect_and_authenticate(secret_dict, port, dbname, use_ssl):
"""Attempt to connect and authenticate to a PostgreSQL instance using psycopg2
Args:
secret_dict (dict): The Secret Dictionary
port (int): The database port to connect to
dbname (str): Name of the database
use_ssl (bool): Flag indicating whether connection should use SSL/TLS
Returns:
Connection: The psycopg2 connection object if successful. None otherwise
"""
try:
conn_params = {
"host": secret_dict["host"],
"user": secret_dict["username"],
"password": secret_dict["password"],
"dbname": dbname,
"port": port,
"connect_timeout": 5,
}
if use_ssl:
conn_params.update(
{"sslmode": "verify-full", "sslrootcert": "/etc/pki/tls/cert.pem"}
)
else:
conn_params["sslmode"] = "disable"
conn = psycopg2.connect(**conn_params)
logging.info(
"Successfully established %s connection as user '%s' with host: '%s'",
"SSL/TLS" if use_ssl else "non SSL/TLS",
secret_dict["username"],
secret_dict["host"],
)
return conn
except psycopg2.OperationalError as e:
error_message = str(e)
if "server does not support SSL, but SSL was required" in error_message:
logging.error(
"Unable to establish SSL/TLS handshake, SSL/TLS is not enabled on the host: %s",
secret_dict["host"],
)
elif re.search(
r'server common name ".+" does not match host name ".+"', error_message
):
logging.error(
"Hostname verification failed when establishing SSL/TLS Handshake with host: %s",
secret_dict["host"],
)
elif re.search(r'no pg_hba.conf entry for host ".+", SSL off', error_message):
logging.error(
"Unable to establish SSL/TLS handshake, SSL/TLS is enforced on the host: %s",
secret_dict["host"],
)
return None
def get_secret_dict(service_client, arn, stage, token=None):
"""Gets the secret dictionary corresponding for the secret arn, stage, and token
This helper function gets credentials for the arn and stage passed in and returns the dictionary by parsing the JSON string
Args:
service_client (client): The secrets manager service client
arn (string): The secret ARN or other identifier
token (string): The ClientRequestToken associated with the secret version, or None if no validation is desired
stage (string): The stage identifying the secret version
Returns:
SecretDictionary: Secret dictionary
Raises:
ResourceNotFoundException: If the secret with the specified arn and stage does not exist
ValueError: If the secret is not valid JSON
"""
required_fields = ["host", "username", "password"]
# Only do VersionId validation against the stage if a token is passed in
if token:
secret = service_client.get_secret_value(
SecretId=arn, VersionId=token, VersionStage=stage
)
else:
secret = service_client.get_secret_value(SecretId=arn, VersionStage=stage)
plaintext = secret["SecretString"]
secret_dict = json.loads(plaintext)
# Run validations against the secret
supported_engines = ["postgres", "aurora-postgresql"]
if "engine" not in secret_dict or secret_dict["engine"] not in supported_engines:
raise KeyError(
"Database engine must be set to 'postgres' in order to use this rotation lambda"
)
for field in required_fields:
if field not in secret_dict:
raise KeyError("%s key is missing from secret JSON" % field)
# Parse and return the secret JSON string
return secret_dict
def get_environment_bool(variable_name, default_value):
"""Loads the environment variable and converts it to the boolean.
Args:
variable_name (string): Name of environment variable
default_value (bool): The result will fallback to the default_value when the environment variable with the given name doesn't exist.
Returns:
bool: True when the content of environment variable contains either 'true', '1', 'y' or 'yes'
"""
variable = os.environ.get(variable_name, str(default_value))
return variable.lower() in ["true", "1", "y", "yes"]
def get_random_password(service_client):
"""Generates a random new password. Generator loads parameters that affects the content of the resulting password from the environment
variables. When environment variable is missing sensible defaults are chosen.
Supported environment variables:
- EXCLUDE_CHARACTERS
- PASSWORD_LENGTH
- EXCLUDE_NUMBERS
- EXCLUDE_PUNCTUATION
- EXCLUDE_UPPERCASE
- EXCLUDE_LOWERCASE
- REQUIRE_EACH_INCLUDED_TYPE
Args:
service_client (client): The secrets manager service client
Returns:
string: The randomly generated password.
"""
passwd = service_client.get_random_password(
ExcludeCharacters=os.environ.get("EXCLUDE_CHARACTERS", ":/@\"'\\"),
PasswordLength=int(os.environ.get("PASSWORD_LENGTH", 32)),
ExcludeNumbers=get_environment_bool("EXCLUDE_NUMBERS", False),
ExcludePunctuation=get_environment_bool("EXCLUDE_PUNCTUATION", True),
ExcludeUppercase=get_environment_bool("EXCLUDE_UPPERCASE", False),
ExcludeLowercase=get_environment_bool("EXCLUDE_LOWERCASE", False),
RequireEachIncludedType=get_environment_bool(
"REQUIRE_EACH_INCLUDED_TYPE", True
),
)
return passwd["RandomPassword"]

View File

@@ -0,0 +1,173 @@
locals {
env_roles = {
staging = { dev_users = "ro", ops_users = "rw", sa_rw_users = "rw", sa_ro_users = "ro", admin_users = "admin" }
production = { dev_users = "ro", ops_users = "ro", sa_rw_users = "rw", sa_ro_users = "ro", admin_users = "admin" }
}
# List of application user identities
app_users = {
dev_users = [
"harsh",
]
ops_users = [
"piotr",
]
admin_users = [
"johannes",
"matti",
]
sa_rw_users = [
"formbricks-app",
]
}
# Flatten users across all teams, creating a map of username => role
db_users = merge([
for team, users in local.app_users : {
for user in users : user => {
role = local.env_roles[var.env_name][team]
}
}
]...)
# FIXME: this shouldn't be hardcoded here
rds_database_name = "formbricks-cloud"
role_prefix = replace(local.rds_database_name, "-", "_")
# Map of username => role
sql_users_map = merge([
for team, users in local.app_users : {
for user in users : user => {
role = "${local.role_prefix}_user_${local.env_roles[var.env_name][team]}"
}
}
]...)
# SQL to create read-only role
sql_create_read_only_role = {
sql = <<EOF
DO
\$\$
DECLARE
schema_name TEXT;
BEGIN
-- Create the read-only role if it doesn't exist
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${local.role_prefix}_user_ro') THEN
CREATE ROLE ${local.role_prefix}_user_ro;
END IF;
-- Loop through all schemas in the database, excluding system schemas
FOR schema_name IN
SELECT schemata.schema_name
FROM information_schema.schemata AS schemata
WHERE schemata.catalog_name = '${local.rds_database_name}'
AND schemata.schema_name NOT IN ('pg_catalog', 'information_schema')
LOOP
-- Grant USAGE on the schema
EXECUTE format('GRANT USAGE ON SCHEMA %I TO ${local.role_prefix}_user_ro;', schema_name);
-- Grant SELECT on all tables in the schema
EXECUTE format('GRANT SELECT ON ALL TABLES IN SCHEMA %I TO ${local.role_prefix}_user_ro;', schema_name);
END LOOP;
END
\$\$;
EOF
}
# SQL to create read-write role
sql_create_read_write_role = {
sql = <<EOF
DO
\$\$
DECLARE
schema_name TEXT;
BEGIN
-- Create the read-write role if it doesn't exist
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${local.role_prefix}_user_rw') THEN
CREATE ROLE ${local.role_prefix}_user_rw;
END IF;
-- Loop through all schemas in the database, excluding system schemas
FOR schema_name IN
SELECT schemata.schema_name
FROM information_schema.schemata AS schemata
WHERE schemata.catalog_name = '${local.rds_database_name}'
AND schemata.schema_name NOT IN ('pg_catalog', 'information_schema')
LOOP
-- Grant USAGE and CREATE on the schema
EXECUTE format('GRANT USAGE, CREATE ON SCHEMA %I TO ${local.role_prefix}_user_rw;', schema_name);
-- Grant CRUD permissions on all existing tables
EXECUTE format('GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA %I TO ${local.role_prefix}_user_rw;', schema_name);
END LOOP;
END
\$\$;
EOF
}
# SQL to create admin role
sql_create_admin_role = {
sql = <<EOF
DO
\$\$
DECLARE
schema_name TEXT;
BEGIN
-- Create the admin role if it doesn't exist
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${local.role_prefix}_user_admin') THEN
CREATE ROLE ${local.role_prefix}_user_admin;
END IF;
-- Loop through all schemas in the database, excluding system schemas
FOR schema_name IN
SELECT schemata.schema_name
FROM information_schema.schemata AS schemata
WHERE schemata.catalog_name = '${local.rds_database_name}'
AND schemata.schema_name NOT IN ('pg_catalog', 'information_schema')
LOOP
-- Grant USAGE and CREATE on the schema (allowing schema usage and object creation)
EXECUTE format('GRANT USAGE, CREATE ON SCHEMA %I TO ${local.role_prefix}_user_admin;', schema_name);
-- Grant INSERT, UPDATE, DELETE on existing tables in the schema
EXECUTE format('GRANT INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA %I TO ${local.role_prefix}_user_admin;', schema_name);
-- Grant full privileges on schema (implicitly includes ability to alter the schema)
EXECUTE format('GRANT ALL PRIVILEGES ON SCHEMA %I TO ${local.role_prefix}_user_admin;', schema_name);
-- Grant the ability to drop tables (delete tables) by owning the tables
EXECUTE format('GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA %I TO ${local.role_prefix}_user_admin;', schema_name);
END LOOP;
END
\$\$;
EOF
}
# Generate SQL statements to create users and set passwords
sql_create_user = {
for user, user_info in local.sql_users_map : user => {
sql = <<EOF
DO
\$\$
BEGIN
-- Create user if it does not exist
IF NOT EXISTS (SELECT FROM pg_user WHERE usename = '${user}') THEN
EXECUTE format('CREATE USER %I WITH PASSWORD %L;', '${user}', '${random_password.db_user_secrets[user].result}');
ELSE
-- Update password if the user already exists
EXECUTE format('ALTER USER %I WITH PASSWORD %L;', '${user}', '${random_password.db_user_secrets[user].result}');
END IF;
-- Ensure role exists
IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = '${user_info.role}') THEN
RAISE EXCEPTION 'Role ${user_info.role} does not exist';
END IF;
-- Assign role to the user
EXECUTE format('GRANT %I TO %I;', '${user_info.role}', '${user}');
END
\$\$;
EOF
}
}
}

View File

@@ -0,0 +1,12 @@
provider "aws" {
region = "eu-central-1"
}
terraform {
backend "s3" {
bucket = "715841356175-terraform"
key = "formbricks/db_users/terraform.tfstate"
region = "eu-central-1"
dynamodb_table = "terraform-lock"
}
}

View File

@@ -0,0 +1,77 @@
module "create_postgres_user_read_only_role" {
source = "digitickets/cli/aws"
version = "7.0.0"
role_session_name = "CreatePostgresUserRoles"
aws_cli_commands = [
"rds-data", "execute-statement",
format("--resource-arn=%s", data.terraform_remote_state.main.outputs.rds["stage"].cluster_arn),
format("--secret-arn=%s", data.aws_secretsmanager_secret.rds_credentials.arn),
format("--region=%s", data.aws_region.selected.name),
format("--database=%s", local.rds_database_name),
format("--sql=\"%s\"", local.sql_create_read_only_role.sql)
]
}
module "create_postgres_user_read_write_role" {
source = "digitickets/cli/aws"
version = "7.0.0"
role_session_name = "CreatePostgresUserRoles"
aws_cli_commands = [
"rds-data", "execute-statement",
format("--resource-arn=%s", data.terraform_remote_state.main.outputs.rds["stage"].cluster_arn),
format("--secret-arn=%s", data.aws_secretsmanager_secret.rds_credentials.arn),
format("--region=%s", data.aws_region.selected.name),
format("--database=%s", local.rds_database_name),
format("--sql=\"%s\"", local.sql_create_read_write_role.sql)
]
depends_on = [
module.create_postgres_user_read_only_role
]
}
module "create_postgres_user_admin_role" {
source = "digitickets/cli/aws"
version = "7.0.0"
role_session_name = "CreatePostgresUserRoles"
aws_cli_commands = [
"rds-data", "execute-statement",
format("--resource-arn=%s", data.terraform_remote_state.main.outputs.rds["stage"].cluster_arn),
format("--secret-arn=%s", data.aws_secretsmanager_secret.rds_credentials.arn),
format("--region=%s", data.aws_region.selected.name),
format("--database=%s", local.rds_database_name),
format("--sql=\"%s\"", local.sql_create_admin_role.sql)
]
depends_on = [
module.create_postgres_user_read_write_role
]
}
# Create a SQL users
module "create_postgres_user" {
for_each = {
for user, user_info in local.sql_users_map :
user => user_info
if var.env_name != "localstack"
}
source = "digitickets/cli/aws"
version = "7.0.0"
role_session_name = "CreatePostgresUser"
aws_cli_commands = [
"rds-data", "execute-statement",
format("--resource-arn=%s", data.terraform_remote_state.main.outputs.rds["stage"].cluster_arn),
format("--secret-arn=%s", data.aws_secretsmanager_secret.rds_credentials.arn),
format("--region=%s", data.aws_region.selected.name),
format("--database=%s", local.rds_database_name),
format("--sql=\"%s\"", local.sql_create_user[each.key].sql)
]
}

View File

@@ -0,0 +1,63 @@
resource "random_password" "db_user_secrets" {
for_each = local.db_users
length = 32
numeric = true
upper = true
special = false
}
resource "aws_secretsmanager_secret" "db_user_secrets" {
for_each = local.db_users
name = "rds-db-credentials/${data.terraform_remote_state.main.outputs.rds["stage"].cluster_resource_id}/${each.key}"
description = "RDS database ${data.terraform_remote_state.main.outputs.rds["stage"].cluster_id} credentials for ${each.key}"
kms_key_id = data.aws_kms_key.secretsmanager.id
}
resource "aws_secretsmanager_secret_version" "db_user_secrets" {
for_each = aws_secretsmanager_secret.db_user_secrets
secret_id = each.value.id
secret_string = jsonencode({
engine = "postgres"
host = data.terraform_remote_state.main.outputs.rds["stage"].cluster_endpoint
username = each.key
password = random_password.db_user_secrets[each.key].result
dbname = local.rds_database_name
port = data.terraform_remote_state.main.outputs.rds["stage"].cluster_port
})
}
resource "aws_secretsmanager_secret_policy" "db_user_secrets" {
for_each = aws_secretsmanager_secret.db_user_secrets
secret_arn = each.value.arn
policy = jsonencode({
Version = "2012-10-17",
Statement = [
{
Effect = "Deny",
Principal = "*",
Action = ["secretsmanager:GetSecretValue"],
Resource = each.value.arn,
Condition = {
StringNotLike = {
"aws:userId" = flatten(concat([
"*:${each.key}@formbricks.com", "*:piotr@formbricks.com"
]))
},
ArnNotEquals = {
"aws:PrincipalArn" = module.lambda_rotate_db_secret.lambda_function_arn
}
}
}
]
})
}
resource "aws_secretsmanager_secret_rotation" "db_user_secrets" {
for_each = aws_secretsmanager_secret.db_user_secrets
secret_id = each.value.id
rotation_lambda_arn = module.lambda_rotate_db_secret.lambda_function_arn
rotation_rules {
automatically_after_days = 1
}
}

View File

@@ -0,0 +1,6 @@
#
variable "env_name" {
description = "env_name"
type = string
default = "staging"
}

View File

@@ -0,0 +1,10 @@
terraform {
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 5.46"
}
}
}

34
infra/terraform/locals.tf Normal file
View File

@@ -0,0 +1,34 @@
locals {
project = "formbricks"
environment = "prod"
name = "${local.project}-${local.environment}"
envs = {
prod = "${local.project}-prod"
stage = "${local.project}-stage"
}
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Project = local.project
Environment = local.environment
ManagedBy = "Terraform"
Blueprint = local.name
}
tags_map = {
prod = {
Project = local.project
Environment = "prod"
ManagedBy = "Terraform"
Blueprint = "${local.project}-prod"
}
stage = {
Project = local.project
Environment = "stage"
ManagedBy = "Terraform"
Blueprint = "${local.project}-stage"
}
}
domain = "k8s.formbricks.com"
karpetner_helm_version = "1.3.1"
karpenter_namespace = "karpenter"
}

View File

@@ -1,38 +1,3 @@
locals {
project = "formbricks"
environment = "prod"
name = "${local.project}-${local.environment}"
envs = {
prod = "${local.project}-prod"
stage = "${local.project}-stage"
}
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Project = local.project
Environment = local.environment
ManagedBy = "Terraform"
Blueprint = local.name
}
tags_map = {
prod = {
Project = local.project
Environment = "prod"
ManagedBy = "Terraform"
Blueprint = "${local.project}-prod"
}
stage = {
Project = local.project
Environment = "stage"
ManagedBy = "Terraform"
Blueprint = "${local.project}-stage"
}
}
domain = "k8s.formbricks.com"
karpetner_helm_version = "1.3.1"
karpenter_namespace = "karpenter"
}
################################################################################
# Route53 Hosted Zone
################################################################################
@@ -156,7 +121,7 @@ module "eks" {
most_recent = true
}
vpc-cni = {
most_recent = true
addon_version = "v1.20.0-eksbuild.1"
}
}

View File

@@ -0,0 +1,10 @@
output "rds" {
description = "RDS created for cluster"
value = module.rds-aurora
sensitive = true
}
output "rds_secret_staging_arn" {
description = "RDS secret created for cluster"
value = aws_secretsmanager_secret.rds_credentials["stage"].arn
}

View File

@@ -75,5 +75,4 @@ module "rds-aurora" {
}
tags = local.tags_map[each.key]
}

View File

@@ -22,3 +22,23 @@ resource "aws_secretsmanager_secret_version" "formbricks_app_secrets" {
})
}
resource "aws_secretsmanager_secret" "rds_credentials" {
for_each = local.envs
name = "${each.key}/formbricks/terraform/rds/credentials"
}
resource "aws_secretsmanager_secret_version" "rds_credentials" {
for_each = local.envs
secret_id = aws_secretsmanager_secret.rds_credentials[each.key].id
secret_string = <<EOF
{
"username": "${module.rds-aurora[each.key].cluster_master_username}",
"password": "${random_password.postgres[each.key].result}",
"engine": data.aws_rds_engine_version.postgresql.engine,
"host": "${module.rds-aurora[each.key].cluster_endpoint}",
"port": ${module.rds-aurora[each.key].cluster_port},
"dbClusterIdentifier": "${module.rds-aurora[each.key].cluster_id}"
}
EOF
}