mirror of
https://github.com/keycloak/keycloak.git
synced 2026-04-30 04:02:52 -05:00
Add Multi-AZ Aurora DB to CI store-integration-tests
Closes #26730 Signed-off-by: Ryan Emerson <remerson@redhat.com>
This commit is contained in:
@@ -0,0 +1,7 @@
|
||||
# Ansible
|
||||
###########
|
||||
*_inventory.yml
|
||||
*.pem
|
||||
ansible.log
|
||||
files/
|
||||
env.yml
|
||||
@@ -0,0 +1,8 @@
|
||||
[defaults]
|
||||
#log_path = ./ansible.log
|
||||
host_key_checking=False
|
||||
transport = ssh
|
||||
forks = 50
|
||||
|
||||
[ssh_connection]
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o ForwardAgent=yes -o StrictHostKeyChecking=no -o IdentitiesOnly=yes
|
||||
Executable
+26
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd $(dirname "${BASH_SOURCE[0]}")
|
||||
|
||||
if [[ "$RUNNER_DEBUG" == "1" ]]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
OPERATION=$1
|
||||
REGION=$2
|
||||
|
||||
case $OPERATION in
|
||||
requirements)
|
||||
ansible-galaxy collection install -r requirements.yml
|
||||
pip3 install --user ansible boto3 botocore
|
||||
;;
|
||||
create|delete|start|stop)
|
||||
if [ -f "env.yml" ]; then ANSIBLE_CUSTOM_VARS_ARG="-e @env.yml"; fi
|
||||
CLUSTER_NAME=${CLUSTER_NAME:-"keycloak_$(whoami)"}
|
||||
ansible-playbook aws_ec2.yml -v -e "region=$REGION" -e "operation=$OPERATION" -e "cluster_name=$CLUSTER_NAME" $ANSIBLE_CUSTOM_VARS_ARG "${@:3}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid option!"
|
||||
echo "Available operations: requirements, create, delete, start, stop."
|
||||
;;
|
||||
esac
|
||||
@@ -0,0 +1,3 @@
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
roles: [aws_ec2]
|
||||
@@ -0,0 +1,2 @@
|
||||
- hosts: keycloak
|
||||
roles: [keycloak_ec2_installer]
|
||||
+15
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd $(dirname "${BASH_SOURCE[0]}")
|
||||
|
||||
if [[ "$RUNNER_DEBUG" == "1" ]]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
REGION=$1
|
||||
KEYCLOAK_SRC=$2
|
||||
|
||||
CLUSTER_NAME=${CLUSTER_NAME:-"keycloak_$(whoami)"}
|
||||
|
||||
ansible-playbook -i ${CLUSTER_NAME}_${REGION}_inventory.yml keycloak.yml \
|
||||
-e "keycloak_src=\"${KEYCLOAK_SRC}\""
|
||||
@@ -0,0 +1,2 @@
|
||||
- hosts: keycloak
|
||||
roles: [mvn_ec2_runner]
|
||||
Executable
+15
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
cd $(dirname "${BASH_SOURCE[0]}")
|
||||
|
||||
if [[ "$RUNNER_DEBUG" == "1" ]]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
REGION=$1
|
||||
MVN_PARAMS=${@:2}
|
||||
|
||||
CLUSTER_NAME=${CLUSTER_NAME:-"keycloak_$(whoami)"}
|
||||
|
||||
ansible-playbook -i ${CLUSTER_NAME}_${REGION}_inventory.yml mvn.yml \
|
||||
-e "mvn_params=\"${MVN_PARAMS}\""
|
||||
@@ -0,0 +1,3 @@
|
||||
collections:
|
||||
- name: amazon.aws
|
||||
version: 6.0.0
|
||||
@@ -0,0 +1,91 @@
|
||||
# Ansible Role `aws_ec2`
|
||||
|
||||
Ansible role for creating, deleting, stopping and starting AWS EC2 instances
|
||||
for running keycloak tests.
|
||||
|
||||
## Prerequisities
|
||||
|
||||
Role requires Ansible Collection `amazon.aws` version `6.0.0` or higher.
|
||||
|
||||
Role assumes that user is authenticated to use AWS CLI, ie. that authentication
|
||||
variables `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` are set in the environment.
|
||||
|
||||
|
||||
## Parameters
|
||||
- `region`: AWS region for the resources to be created in.
|
||||
- `cluster_name`: Unique name of the instance cluster within the region. Defaults to `keycloak_{{ cluster_identifier }}`.
|
||||
- `cluster_identifier`: Identifier to distingish multiple clusters within the region. Defaults to `${USER}`.
|
||||
- `cluster_size`: Number of EC2 instances to be created.
|
||||
- `ami_name`: Name of the AMI image to be used for spawning instances.
|
||||
- `instance_type`: [AWS instance type](https://aws.amazon.com/ec2/instance-types/).
|
||||
- `instance_volume_size`: Size of instance storage device.
|
||||
- `instance_device`: Path to Linux storage device.
|
||||
|
||||
For defaults see `defaults/main.yml`.
|
||||
|
||||
|
||||
## Example Playbook
|
||||
|
||||
Example playbook `aws_ec2.yml`:
|
||||
```
|
||||
- hosts: localhost
|
||||
connection: local
|
||||
roles: [aws_ec2_client]
|
||||
```
|
||||
|
||||
## Create Instances
|
||||
|
||||
Using the example playbook run:
|
||||
```
|
||||
ansible-playbook aws_ec2.yml -e region=<REGION> -e operation=create
|
||||
```
|
||||
|
||||
Replace <REGION> with actual value, e.g. `us-west-1`.
|
||||
|
||||
Optionally you can override other parameters by `-e PARAMETER=VALUE` or `-e @PARAMS.yml`.
|
||||
|
||||
This operation will create the following 2 files:
|
||||
- `{{ cluster_name }}_{{ region }}.pem` - private SSH key.
|
||||
- `{{ cluster_name }}_{{ region }}_inventory.yml` - an Ansible host inventory file.
|
||||
|
||||
```
|
||||
keycloak:
|
||||
children:
|
||||
"{{ cluster_name }}_{{ region }}":
|
||||
vars:
|
||||
ansible_user: ec2-user
|
||||
ansible_become: yes
|
||||
ansible_ssh_private_key_file: "{{ cluster_name }}_{{ region }}.pem"
|
||||
hosts:
|
||||
host-1-ip-address:
|
||||
host-2-ip-address:
|
||||
...
|
||||
```
|
||||
|
||||
Notice that the created hosts will be included in Ansible group `keycloak`
|
||||
and subgroup `{{ cluster_name }}_{{ region }}`.
|
||||
|
||||
|
||||
## Stop and Start instances
|
||||
|
||||
Using the example playbook run:
|
||||
```
|
||||
ansible-playbook aws_ec2.yml -e region=<REGION> -e operation=stop
|
||||
```
|
||||
|
||||
After the instances are stopped their public IP addresses will be de-allocated.
|
||||
|
||||
```
|
||||
ansible-playbook aws_ec2.yml -e region=<REGION> -e operation=start
|
||||
```
|
||||
|
||||
After the instances are started again the role will re-create the host inventory file with updated public IP addresses.
|
||||
|
||||
|
||||
## Delete Instances
|
||||
Using the example playbook run:
|
||||
```
|
||||
ansible-playbook aws_ec2.yml -e region=<REGION> -e operation=delete
|
||||
```
|
||||
|
||||
This will remove created AWS resources and delete the host inventory file and private key.
|
||||
@@ -0,0 +1,13 @@
|
||||
cluster_identifier: "{{ lookup('env', 'USER') }}"
|
||||
cluster_name: "keycloak_{{ cluster_identifier }}"
|
||||
cluster_size: 1
|
||||
|
||||
cidr_ip: "{{ control_host_ip.stdout }}/32"
|
||||
|
||||
ami_name: RHEL-8.8.0_HVM-20230503-x86_64-54-Hourly2-GP2
|
||||
|
||||
instance_type: t3.large
|
||||
instance_volume_size: 20
|
||||
instance_device: /dev/sda1
|
||||
|
||||
no_log_sensitive: true
|
||||
@@ -0,0 +1,68 @@
|
||||
- name: Get Ansible Control Host's public IP
|
||||
shell: curl -ks --ipv4 https://ifconfig.me
|
||||
register: control_host_ip
|
||||
no_log: "{{ no_log_sensitive }}"
|
||||
|
||||
- debug: var=cidr_ip
|
||||
|
||||
- name: Create Security Group
|
||||
amazon.aws.ec2_group:
|
||||
state: present
|
||||
region: '{{ region }}'
|
||||
name: '{{ cluster_name }}'
|
||||
description: '{{ cluster_name }}'
|
||||
rules:
|
||||
- proto: tcp
|
||||
from_port: 22
|
||||
to_port: 22
|
||||
cidr_ip: '{{cidr_ip}}'
|
||||
register: group
|
||||
no_log: "{{ no_log_sensitive }}"
|
||||
|
||||
- name: Create Key
|
||||
amazon.aws.ec2_key:
|
||||
state: present
|
||||
region: '{{ region }}'
|
||||
name: '{{ cluster_name }}'
|
||||
register: key
|
||||
no_log: "{{ no_log_sensitive }}"
|
||||
|
||||
- name: Save Private Key on Ansible Control Machine
|
||||
when: key.changed
|
||||
copy:
|
||||
content: '{{ key.key.private_key }}'
|
||||
dest: '{{ cluster_name }}_{{ region }}.pem'
|
||||
mode: 0600
|
||||
no_log: "{{ no_log_sensitive }}"
|
||||
|
||||
- name: Look up AMI '{{ ami_name }}'
|
||||
amazon.aws.ec2_ami_info:
|
||||
region: '{{ region}}'
|
||||
filters:
|
||||
name: '{{ ami_name }}'
|
||||
register: ami_info
|
||||
|
||||
- name: Create {{ cluster_size }} EC2 Instances
|
||||
amazon.aws.ec2_instance:
|
||||
state: started
|
||||
region: '{{ region }}'
|
||||
name: "{{ cluster_name }}"
|
||||
exact_count: "{{ cluster_size }}"
|
||||
instance_type: '{{ instance_type }}'
|
||||
image_id: '{{ ami_info.images[0].image_id }}'
|
||||
key_name: '{{ cluster_name }}'
|
||||
security_group: '{{ group.group_id }}'
|
||||
network:
|
||||
assign_public_ip: yes
|
||||
volumes:
|
||||
- device_name: '{{ instance_device }}'
|
||||
ebs:
|
||||
volume_size: '{{ instance_volume_size }}'
|
||||
delete_on_termination: true
|
||||
register: instances
|
||||
no_log: "{{ no_log_sensitive }}"
|
||||
|
||||
- name: Create Inventory File
|
||||
template:
|
||||
src: inventory.yml.j2
|
||||
dest: '{{ cluster_name }}_{{ region }}_inventory.yml'
|
||||
@@ -0,0 +1,26 @@
|
||||
- name: 'Delete EC2 instances'
|
||||
amazon.aws.ec2_instance:
|
||||
state: absent
|
||||
region: '{{ region }}'
|
||||
filters:
|
||||
"tag:Name": '{{ cluster_name }}*'
|
||||
|
||||
- name: 'Delete EC2 security group'
|
||||
amazon.aws.ec2_group:
|
||||
state: absent
|
||||
region: '{{ region }}'
|
||||
name: '{{ cluster_name }}'
|
||||
|
||||
- name: 'Delete Key'
|
||||
amazon.aws.ec2_key:
|
||||
state: absent
|
||||
region: '{{ region }}'
|
||||
name: '{{ cluster_name }}'
|
||||
|
||||
- name: 'Delete inventory, key, and log'
|
||||
file:
|
||||
state: absent
|
||||
path: '{{ item }}'
|
||||
with_items:
|
||||
- '{{ cluster_name }}_{{ region }}_inventory.yml'
|
||||
- '{{ cluster_name }}_{{ region }}.pem'
|
||||
@@ -0,0 +1,12 @@
|
||||
- debug: var=cluster_identifier
|
||||
- debug: var=region
|
||||
- debug: var=cluster_name
|
||||
|
||||
- include_tasks: create-resources.yml
|
||||
when: operation == "create"
|
||||
|
||||
- include_tasks: manage-instances.yml
|
||||
when: operation == "start" or operation == "stop"
|
||||
|
||||
- include_tasks: delete-resources.yml
|
||||
when: operation == "delete"
|
||||
@@ -0,0 +1,26 @@
|
||||
# Start or Stop Instances
|
||||
- name: "{{ operation[0]|upper }}{{ operation[1:] }} Instances"
|
||||
amazon.aws.ec2_instance:
|
||||
state: '{{ "stopped" if operation == "stop" else "started" }}'
|
||||
region: '{{ region }}'
|
||||
filters:
|
||||
"tag:Name": '{{ cluster_name }}*'
|
||||
instance-state-name: ['running', 'stopped', 'stopping']
|
||||
no_log: "{{ no_log_sensitive }}"
|
||||
|
||||
- when: operation == "start"
|
||||
block:
|
||||
# When starting instances via `ec2_instance` module sometimes the `public_ip_address` is missing in the result.
|
||||
# Added additional `ec2_instance_info` step to work around the issue.
|
||||
- name: Get Instance Information
|
||||
amazon.aws.ec2_instance_info:
|
||||
region: '{{ region }}'
|
||||
filters:
|
||||
"tag:Name": '{{ cluster_name }}*'
|
||||
instance-state-name: ['running']
|
||||
register: instances
|
||||
no_log: "{{ no_log_sensitive }}"
|
||||
- name: Recreate Inventory File
|
||||
template:
|
||||
src: inventory.yml.j2
|
||||
dest: '{{ cluster_name }}_{{ region }}_inventory.yml'
|
||||
@@ -0,0 +1,11 @@
|
||||
keycloak:
|
||||
children:
|
||||
{{ cluster_name }}_{{ region | replace('-','_') }}:
|
||||
vars:
|
||||
ansible_user: ec2-user
|
||||
ansible_become: yes
|
||||
ansible_ssh_private_key_file: {{ cluster_name }}_{{ region }}.pem
|
||||
hosts:
|
||||
{% for instance in instances.instances %}
|
||||
{{ instance.public_ip_address }}:
|
||||
{% endfor %}
|
||||
@@ -0,0 +1,35 @@
|
||||
# Ansible Role `keycloak_ec2_installer`
|
||||
|
||||
Ansible role for installing Keycloak sources and build dependencies on remote nodes.
|
||||
|
||||
Role assumes presence of host inventory file and a matching SSH key for "sudoer" access to the hosts.
|
||||
The hosts are expected to be included in `keycloak` group.
|
||||
|
||||
## Parameters
|
||||
|
||||
See `defaults/main.yml` for default values.
|
||||
|
||||
### Execution
|
||||
- `keycloak_src`: Path to a local `*.zip` file containing the Keycloak src
|
||||
|
||||
### Other
|
||||
- `update_system_packages`: Whether to update the system packages. Defaults to `no`.
|
||||
- `install_java`: Whether to install OpenJDK on the system. Defaults to `yes`.
|
||||
- `java_version`: Version of OpenJDK to be installed. Defaults to `17`.
|
||||
|
||||
|
||||
## Example Playbook
|
||||
|
||||
An example playbook `keycloak.yml` that applies the role to hosts in the `keycloak` group:
|
||||
```
|
||||
- hosts: keycloak
|
||||
roles: [keycloak]
|
||||
```
|
||||
|
||||
## Run keycloak-benchmark
|
||||
|
||||
Run:
|
||||
```
|
||||
ansible-playbook -i ${CLUSTER_NAME}_${REGION}_inventory.yml keycloak.yml \
|
||||
-e "keycloak_src=\"/tmp/keycloak.zip\""
|
||||
```
|
||||
@@ -0,0 +1,7 @@
|
||||
# This should match the user in the *_inventory.yml
|
||||
ansible_ssh_user: ec2-user
|
||||
# Workspace on the remote hosts
|
||||
kc_home: /opt/keycloak
|
||||
update_system_packages: no
|
||||
install_java: yes
|
||||
java_version: 17
|
||||
@@ -0,0 +1,29 @@
|
||||
- name: Update system packages on the remote hosts
|
||||
when: update_system_packages
|
||||
package:
|
||||
name: "*"
|
||||
state: latest
|
||||
|
||||
- name: Install Java {{ java_version }} packages on the remote hosts
|
||||
when: install_java
|
||||
package:
|
||||
name:
|
||||
- "java-{{ java_version }}-openjdk"
|
||||
- "java-{{ java_version }}-openjdk-devel"
|
||||
state: present
|
||||
|
||||
- name: Install dependencies on the remote hosts
|
||||
package: name={{item}} state=present
|
||||
with_items:
|
||||
- unzip
|
||||
|
||||
- name: Create Keycloak src dir
|
||||
file:
|
||||
path: "{{ kc_home }}"
|
||||
state: directory
|
||||
|
||||
- name: Install Keycloak src on the remote hosts
|
||||
unarchive:
|
||||
src: "{{ keycloak_src }}"
|
||||
dest: "{{ kc_home }}"
|
||||
owner: "{{ ansible_ssh_user }}"
|
||||
@@ -0,0 +1,3 @@
|
||||
- include_tasks: install.yml
|
||||
vars:
|
||||
ansible_become: yes
|
||||
@@ -0,0 +1,33 @@
|
||||
# Ansible Role `mvn_ec2_runner`
|
||||
|
||||
Ansible role for executing `mvn` commands against a Keycloak src on a remote node.
|
||||
|
||||
Role assumes presence of host inventory file and a matching SSH key for "sudoer" access to the hosts.
|
||||
The hosts are expected to be included in `keycloak` group.
|
||||
|
||||
## Parameters
|
||||
|
||||
See `defaults/main.yml` for default values.
|
||||
|
||||
### Execution
|
||||
- `mvn_params`: The `mvn` command to execute on the remote nodes.
|
||||
|
||||
### Other
|
||||
- `kc_home`: Location of the Keycloak src on the remote node.
|
||||
|
||||
|
||||
## Example Playbook
|
||||
|
||||
An example playbook `keycloak.yml` that applies the role to hosts in the `keycloak` group:
|
||||
```
|
||||
- hosts: keycloak
|
||||
roles: [mvn]
|
||||
```
|
||||
|
||||
## Run keycloak-benchmark
|
||||
|
||||
Run:
|
||||
```
|
||||
ansible-playbook -i ${CLUSTER_NAME}_${REGION}_inventory.yml mvn.yml \
|
||||
-e "mvn_params=\"mvn clean install\""
|
||||
```
|
||||
@@ -0,0 +1,5 @@
|
||||
# Workspace on the localhost
|
||||
local_workspace: files/keycloak
|
||||
|
||||
# Workspace on the remote hosts
|
||||
kc_home: /opt/keycloak
|
||||
@@ -0,0 +1 @@
|
||||
- include_tasks: run.yml
|
||||
@@ -0,0 +1,25 @@
|
||||
- name: Initialization
|
||||
run_once: yes
|
||||
block:
|
||||
- debug: msg="Variable `mvn_params` must be set."
|
||||
failed_when: mvn_params == ""
|
||||
- set_fact: local_results_dir="{{ local_workspace }}/results/{{ '%Y%m%d%H%M%S' | strftime }}"
|
||||
- debug: var=local_results_dir
|
||||
|
||||
- name: Cleanup Previous Runs
|
||||
# Kill any currently running Java process from a previous (possibly aborted) run before starting the next.
|
||||
shell: |
|
||||
killall java
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Run mvn command on the remote hosts
|
||||
# Kill any currently running Java process from a previous (possibly aborted) run before starting the next.
|
||||
shell: |
|
||||
cd {{ kc_home }}
|
||||
./mvnw {{ mvn_params }}
|
||||
# Executing load run can be scheduled for hours. To prevent the test from failing when the SSH connection breaks, use asynchronous polling.
|
||||
async: 86400
|
||||
poll: 10
|
||||
register: result
|
||||
|
||||
- debug: var=result
|
||||
Executable
+27
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
function requiredEnv() {
|
||||
for ENV in $@; do
|
||||
if [ -z "${!ENV}" ]; then
|
||||
echo "${ENV} variable must be set"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
requiredEnv AURORA_CLUSTER AURORA_REGION
|
||||
|
||||
export AURORA_ENGINE=${AURORA_ENGINE:-"aurora-postgresql"}
|
||||
export AURORA_ENGINE_VERSION=${AURORA_ENGINE_VERSION:-"15.3"}
|
||||
export AURORA_INSTANCES=${AURORA_INSTANCES:-"2"}
|
||||
export AURORA_INSTANCE_CLASS=${AURORA_INSTANCE_CLASS:-"db.t4g.large"}
|
||||
export AURORA_PASSWORD=${AURORA_PASSWORD:-"secret99"}
|
||||
export AURORA_SECURITY_GROUP_NAME=${AURORA_SECURITY_GROUP_NAME:-"${AURORA_CLUSTER}-security-group"}
|
||||
export AURORA_SUBNET_A_CIDR=${AURORA_SUBNET_A_CIDR:-"192.168.0.0/19"}
|
||||
export AURORA_SUBNET_B_CIDR=${AURORA_SUBNET_B_CIDR:-"192.168.32.0/19"}
|
||||
export AURORA_SUBNET_GROUP_NAME=${AURORA_SUBNET_GROUP_NAME:-"${AURORA_CLUSTER}-subnet-group"}
|
||||
export AURORA_VPC_CIDR=${AURORA_VPC_CIDR:-"192.168.0.0/16"}
|
||||
export AURORA_USERNAME=${AURORA_USERNAME:-"keycloak"}
|
||||
export AWS_REGION=${AWS_REGION:-${AURORA_REGION}}
|
||||
export AWS_PAGER=""
|
||||
Executable
+135
@@ -0,0 +1,135 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if [[ "$RUNNER_DEBUG" == "1" ]]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
source ${SCRIPT_DIR}/aurora_common.sh
|
||||
|
||||
EXISTING_INSTANCES=$(aws rds describe-db-instances \
|
||||
--query "DBInstances[?starts_with(DBInstanceIdentifier, '${AURORA_CLUSTER}')].DBInstanceIdentifier" \
|
||||
--output text
|
||||
)
|
||||
if [ -n "${EXISTING_INSTANCES}" ]; then
|
||||
echo "Aurora instances '${EXISTING_INSTANCES}' already exist in the '${AWS_REGION}' region"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the Aurora VPC
|
||||
AURORA_VPC=$(aws ec2 create-vpc \
|
||||
--cidr-block ${AURORA_VPC_CIDR} \
|
||||
--tag-specifications "ResourceType=vpc, Tags=[{Key=AuroraCluster,Value=${AURORA_CLUSTER}},{Key=Name,Value=Aurora Cluster ${AURORA_CLUSTER}}]" \
|
||||
--query "Vpc.VpcId" \
|
||||
--output text
|
||||
)
|
||||
|
||||
# Each region may have different availability-zones, so we need to ensure that we use an az that exists
|
||||
IFS=' ' read -a AZS <<< "$(aws ec2 describe-availability-zones --region ${AURORA_REGION} --query "AvailabilityZones[].ZoneName" --output text)"
|
||||
|
||||
# Create the Aurora Subnets
|
||||
SUBNET_A=$(aws ec2 create-subnet \
|
||||
--availability-zone "${AZS[0]}" \
|
||||
--vpc-id ${AURORA_VPC} \
|
||||
--cidr-block ${AURORA_SUBNET_A_CIDR} \
|
||||
--query "Subnet.SubnetId" \
|
||||
--output text
|
||||
)
|
||||
|
||||
SUBNET_B=$(aws ec2 create-subnet \
|
||||
--availability-zone "${AZS[1]}" \
|
||||
--vpc-id ${AURORA_VPC} \
|
||||
--cidr-block ${AURORA_SUBNET_B_CIDR} \
|
||||
--query "Subnet.SubnetId" \
|
||||
--output text
|
||||
)
|
||||
|
||||
AURORA_PUBLIC_ROUTE_TABLE_ID=$(aws ec2 describe-route-tables \
|
||||
--filters Name=vpc-id,Values=${AURORA_VPC} \
|
||||
--query "RouteTables[0].RouteTableId" \
|
||||
--output text
|
||||
)
|
||||
|
||||
aws ec2 associate-route-table \
|
||||
--route-table-id ${AURORA_PUBLIC_ROUTE_TABLE_ID} \
|
||||
--subnet-id ${SUBNET_A}
|
||||
|
||||
aws ec2 associate-route-table \
|
||||
--route-table-id ${AURORA_PUBLIC_ROUTE_TABLE_ID} \
|
||||
--subnet-id ${SUBNET_B}
|
||||
|
||||
# Create Aurora Subnet Group
|
||||
aws rds create-db-subnet-group \
|
||||
--db-subnet-group-name ${AURORA_SUBNET_GROUP_NAME} \
|
||||
--db-subnet-group-description "Aurora DB Subnet Group" \
|
||||
--subnet-ids ${SUBNET_A} ${SUBNET_B}
|
||||
|
||||
# Create an Aurora VPC Security Group
|
||||
AURORA_SECURITY_GROUP_ID=$(aws ec2 create-security-group \
|
||||
--group-name ${AURORA_SECURITY_GROUP_NAME} \
|
||||
--description "Aurora DB Security Group" \
|
||||
--vpc-id ${AURORA_VPC} \
|
||||
--query "GroupId" \
|
||||
--output text
|
||||
)
|
||||
|
||||
# Make the Aurora endpoint accessible outside the VPC
|
||||
## Create Internet gateway
|
||||
INTERNET_GATEWAY=$(aws ec2 create-internet-gateway \
|
||||
--tag-specifications "ResourceType=internet-gateway, Tags=[{Key=AuroraCluster,Value=${AURORA_CLUSTER}},{Key=Name,Value=Aurora Cluster ${AURORA_CLUSTER}}]" \
|
||||
--query "InternetGateway.InternetGatewayId" \
|
||||
--output text
|
||||
)
|
||||
|
||||
aws ec2 attach-internet-gateway \
|
||||
--internet-gateway-id ${INTERNET_GATEWAY} \
|
||||
--vpc-id ${AURORA_VPC}
|
||||
|
||||
aws ec2 create-route \
|
||||
--route-table-id ${AURORA_PUBLIC_ROUTE_TABLE_ID} \
|
||||
--destination-cidr-block 0.0.0.0/0 \
|
||||
--gateway-id ${INTERNET_GATEWAY}
|
||||
|
||||
## Enable DNS hostnames required for publicly accessible Aurora instances
|
||||
aws ec2 modify-vpc-attribute \
|
||||
--vpc-id ${AURORA_VPC} \
|
||||
--enable-dns-hostnames
|
||||
|
||||
## Ensure the Postgres port is accessible outside the VPC
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-id ${AURORA_SECURITY_GROUP_ID} \
|
||||
--ip-permissions "FromPort=5432,ToPort=5432,IpProtocol=tcp,IpRanges=[{CidrIp=0.0.0.0/0}]"
|
||||
|
||||
# Create the Aurora DB cluster and instance
|
||||
aws rds create-db-cluster \
|
||||
--db-cluster-identifier ${AURORA_CLUSTER} \
|
||||
--database-name keycloak \
|
||||
--engine ${AURORA_ENGINE} \
|
||||
--engine-version ${AURORA_ENGINE_VERSION} \
|
||||
--master-username ${AURORA_USERNAME} \
|
||||
--master-user-password ${AURORA_PASSWORD} \
|
||||
--vpc-security-group-ids ${AURORA_SECURITY_GROUP_ID} \
|
||||
--db-subnet-group-name ${AURORA_SUBNET_GROUP_NAME} \
|
||||
--tags "Key=keepalive" # Add keepalive tag to prevent keycloak-benchmark reaper from removing DB during nightly runs
|
||||
|
||||
# For now only two AZs in each region are supported due to the two subnets created above
|
||||
for i in $( seq ${AURORA_INSTANCES} ); do
|
||||
aws rds create-db-instance \
|
||||
--db-cluster-identifier ${AURORA_CLUSTER} \
|
||||
--db-instance-identifier "${AURORA_CLUSTER}-instance-${i}" \
|
||||
--db-instance-class ${AURORA_INSTANCE_CLASS} \
|
||||
--engine ${AURORA_ENGINE} \
|
||||
--availability-zone "${AZS[$(((i - 1) % ${#AZS[@]}))]}" \
|
||||
--publicly-accessible
|
||||
done
|
||||
|
||||
for i in $( seq ${AURORA_INSTANCES} ); do
|
||||
aws rds wait db-instance-available --db-instance-identifier "${AURORA_CLUSTER}-instance-${i}"
|
||||
done
|
||||
|
||||
export AURORA_ENDPOINT=$(aws rds describe-db-clusters \
|
||||
--db-cluster-identifier ${AURORA_CLUSTER} \
|
||||
--query "DBClusters[*].Endpoint" \
|
||||
--output text
|
||||
)
|
||||
Executable
+79
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if [[ "$RUNNER_DEBUG" == "1" ]]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
source ${SCRIPT_DIR}/aurora_common.sh
|
||||
|
||||
AURORA_VPC=$(aws ec2 describe-vpcs \
|
||||
--filters "Name=tag:AuroraCluster,Values=${AURORA_CLUSTER}" \
|
||||
--query "Vpcs[*].VpcId" \
|
||||
--output text
|
||||
)
|
||||
|
||||
# Delete the Aurora DB cluster and instances
|
||||
for i in $( aws rds describe-db-clusters --db-cluster-identifier ${AURORA_CLUSTER} --query "DBClusters[0].DBClusterMembers[].DBInstanceIdentifier" --output text ); do
|
||||
echo "Deleting Aurora DB instance ${i}"
|
||||
aws rds delete-db-instance --db-instance-identifier "${i}" --skip-final-snapshot || true
|
||||
done
|
||||
|
||||
aws rds delete-db-cluster \
|
||||
--db-cluster-identifier ${AURORA_CLUSTER} \
|
||||
--skip-final-snapshot \
|
||||
|| true
|
||||
|
||||
for i in $( aws rds describe-db-clusters --db-cluster-identifier ${AURORA_CLUSTER} --query "DBClusters[0].DBClusterMembers[].DBInstanceIdentifier" --output text ); do
|
||||
aws rds wait db-instance-deleted --db-instance-identifier "${i}"
|
||||
done
|
||||
|
||||
aws rds wait db-cluster-deleted --db-cluster-identifier ${AURORA_CLUSTER} || true
|
||||
|
||||
# Delete the Aurora subnet group
|
||||
aws rds delete-db-subnet-group --db-subnet-group-name ${AURORA_SUBNET_GROUP_NAME} || true
|
||||
|
||||
# Delete the Aurora subnets
|
||||
AURORA_SUBNETS=$(aws ec2 describe-subnets \
|
||||
--filters "Name=vpc-id,Values=${AURORA_VPC}" \
|
||||
--query "Subnets[*].SubnetId" \
|
||||
--output text
|
||||
)
|
||||
for AURORA_SUBNET in ${AURORA_SUBNETS}; do
|
||||
aws ec2 delete-subnet --subnet-id ${AURORA_SUBNET}
|
||||
done
|
||||
|
||||
# Delete the Aurora VPC Security Group
|
||||
AURORA_SECURITY_GROUP_ID=$(aws ec2 describe-security-groups \
|
||||
--filters "Name=vpc-id,Values=${AURORA_VPC}" "Name=group-name,Values=${AURORA_SECURITY_GROUP_NAME}" \
|
||||
--query "SecurityGroups[*].GroupId" \
|
||||
--output text
|
||||
)
|
||||
if [ -n "${AURORA_SECURITY_GROUP_ID}" ]; then
|
||||
aws ec2 delete-security-group --group-id ${AURORA_SECURITY_GROUP_ID} --region ${AURORA_REGION}
|
||||
fi
|
||||
|
||||
# Detach the internet gateway from the VPC and remove
|
||||
INTERNET_GATEWAY=$(aws ec2 describe-internet-gateways \
|
||||
--filters "Name=tag:AuroraCluster,Values=${AURORA_CLUSTER}" \
|
||||
--query "InternetGateways[*].InternetGatewayId" \
|
||||
--output text
|
||||
)
|
||||
|
||||
aws ec2 detach-internet-gateway \
|
||||
--internet-gateway-id ${INTERNET_GATEWAY} \
|
||||
--vpc-id ${AURORA_VPC} \
|
||||
|| true
|
||||
|
||||
aws ec2 delete-internet-gateway --internet-gateway-id ${INTERNET_GATEWAY} || true
|
||||
|
||||
# Delete the Aurora VPC, retrying 5 times in case that dependencies are not removed instantly
|
||||
n=0
|
||||
until [ "$n" -ge 20 ]
|
||||
do
|
||||
aws ec2 delete-vpc --vpc-id ${AURORA_VPC} && break
|
||||
n=$((n+1))
|
||||
echo "Unable to remove VPC ${AURORA_VPC}. Attempt ${n}"
|
||||
sleep 10
|
||||
done
|
||||
Reference in New Issue
Block a user