mirror of
https://github.com/biersoeckli/QuickStack.git
synced 2026-01-02 01:30:38 -06:00
added redirect for ingress
This commit is contained in:
33
prisma/migrations/20241107155300_migration/migration.sql
Normal file
33
prisma/migrations/20241107155300_migration/migration.sql
Normal file
@@ -0,0 +1,33 @@
|
||||
-- RedefineTables
|
||||
PRAGMA defer_foreign_keys=ON;
|
||||
PRAGMA foreign_keys=OFF;
|
||||
CREATE TABLE "new_AppDomain" (
|
||||
"id" TEXT NOT NULL PRIMARY KEY,
|
||||
"hostname" TEXT NOT NULL,
|
||||
"port" INTEGER NOT NULL,
|
||||
"useSsl" BOOLEAN NOT NULL DEFAULT true,
|
||||
"redirectHttps" BOOLEAN NOT NULL DEFAULT true,
|
||||
"appId" TEXT NOT NULL,
|
||||
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" DATETIME NOT NULL,
|
||||
CONSTRAINT "AppDomain_appId_fkey" FOREIGN KEY ("appId") REFERENCES "App" ("id") ON DELETE RESTRICT ON UPDATE CASCADE
|
||||
);
|
||||
INSERT INTO "new_AppDomain" ("appId", "createdAt", "hostname", "id", "port", "updatedAt", "useSsl") SELECT "appId", "createdAt", "hostname", "id", "port", "updatedAt", "useSsl" FROM "AppDomain";
|
||||
DROP TABLE "AppDomain";
|
||||
ALTER TABLE "new_AppDomain" RENAME TO "AppDomain";
|
||||
CREATE UNIQUE INDEX "AppDomain_hostname_key" ON "AppDomain"("hostname");
|
||||
CREATE TABLE "new_AppVolume" (
|
||||
"id" TEXT NOT NULL PRIMARY KEY,
|
||||
"containerMountPath" TEXT NOT NULL,
|
||||
"size" INTEGER NOT NULL,
|
||||
"accessMode" TEXT NOT NULL DEFAULT 'rwo',
|
||||
"appId" TEXT NOT NULL,
|
||||
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updatedAt" DATETIME NOT NULL,
|
||||
CONSTRAINT "AppVolume_appId_fkey" FOREIGN KEY ("appId") REFERENCES "App" ("id") ON DELETE RESTRICT ON UPDATE CASCADE
|
||||
);
|
||||
INSERT INTO "new_AppVolume" ("accessMode", "appId", "containerMountPath", "createdAt", "id", "size", "updatedAt") SELECT "accessMode", "appId", "containerMountPath", "createdAt", "id", "size", "updatedAt" FROM "AppVolume";
|
||||
DROP TABLE "AppVolume";
|
||||
ALTER TABLE "new_AppVolume" RENAME TO "AppVolume";
|
||||
PRAGMA foreign_keys=ON;
|
||||
PRAGMA defer_foreign_keys=OFF;
|
||||
@@ -155,12 +155,13 @@ model App {
|
||||
}
|
||||
|
||||
model AppDomain {
|
||||
id String @id @default(uuid())
|
||||
hostname String @unique
|
||||
port Int
|
||||
useSsl Boolean @default(true)
|
||||
appId String
|
||||
app App @relation(fields: [appId], references: [id])
|
||||
id String @id @default(uuid())
|
||||
hostname String @unique
|
||||
port Int
|
||||
useSsl Boolean @default(true)
|
||||
redirectHttps Boolean @default(true)
|
||||
appId String
|
||||
app App @relation(fields: [appId], references: [id])
|
||||
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
113
setup.sh
Normal file
113
setup.sh
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/bin/bash
|
||||
|
||||
wait_until_all_pods_running() {
|
||||
|
||||
# Waits another 5 seconds to make sure all pods are registered for the first time.
|
||||
sleep 5
|
||||
|
||||
while true; do
|
||||
OUTPUT=$(sudo k3s kubectl get pods -A --no-headers 2>&1)
|
||||
|
||||
# Checks if there are no resources found --> Kubernetes ist still starting up
|
||||
if echo "$OUTPUT" | grep -q "No resources found"; then
|
||||
echo "Kubernetes is still starting up..."
|
||||
else
|
||||
# Extracts the STATUS column from the kubectl output and filters out the values "Running" and "Completed".
|
||||
STATUS=$(echo "$OUTPUT" | awk '{print $4}' | grep -vE '^(Running|Completed)$')
|
||||
|
||||
# If the STATUS variable is empty, all pods are running and the loop can be exited.
|
||||
if [ -z "$STATUS" ]; then
|
||||
echo "Pods started successfully."
|
||||
break
|
||||
else
|
||||
echo "Waiting for all pods to come online..."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Waits for X seconds before checking the pod status again.
|
||||
sleep 10
|
||||
done
|
||||
|
||||
# Waits another 5 seconds to make sure all pods are ready.
|
||||
sleep 5
|
||||
|
||||
sudo kubectl get node
|
||||
sudo kubectl get pods -A
|
||||
}
|
||||
|
||||
# Installation of k3s
|
||||
curl -sfL https://get.k3s.io | sh -
|
||||
# Todo: Check for Ready node, takes ~30 seconds
|
||||
sudo k3s kubectl get node
|
||||
|
||||
echo "Waiting for Kubernetes to start..."
|
||||
wait_until_all_pods_running
|
||||
|
||||
# Installation of Longhorn
|
||||
sudo kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/deploy/longhorn.yaml
|
||||
echo "Waiting for Longhorn to start..."
|
||||
wait_until_all_pods_running
|
||||
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
# THIS MUST BE INSTALLED ON ALL NODES --> https://longhorn.io/docs/1.7.2/deploy/install/#installing-nfsv4-client
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
echo "Installing nfs-common..."
|
||||
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.7.2/deploy/prerequisite/longhorn-nfs-installation.yaml
|
||||
wait_until_all_pods_running
|
||||
|
||||
# Installation of Cert-Manager
|
||||
sudo kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.1/cert-manager.yaml
|
||||
echo "Waiting for Cert-Manager to start..."
|
||||
wait_until_all_pods_running
|
||||
sudo kubectl -n cert-manager get pod
|
||||
|
||||
# add Cluster Issuer
|
||||
cat <<EOF > cluster-issuer.yaml
|
||||
# Staging ClusterIssuer
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
namespace: default
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
email: test@ost.ch
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-staging
|
||||
solvers:
|
||||
- selector: {}
|
||||
http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
---
|
||||
# Production ClusterIssuer
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-production
|
||||
namespace: default
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: test@ost.ch
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-production
|
||||
solvers:
|
||||
- selector: {}
|
||||
http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
EOF
|
||||
sudo kubectl apply -f cluster-issuer.yaml
|
||||
sudo kubectl get clusterissuer -o wide
|
||||
rm cluster-issuer.yaml
|
||||
|
||||
sudo kubectl get nodes
|
||||
|
||||
# evaluate url to add node to cluster
|
||||
joinTokenForOtherNodes=$(sudo cat /var/lib/rancher/k3s/server/node-token)
|
||||
echo "To add a worker node to the cluster, run the following command on the worker node:"
|
||||
echo "------------------------------------------------------------"
|
||||
echo "curl -sfL https://get.k3s.io | K3S_URL=https://<IP-ADDRESS-OR-HOSTNAME-OF-MASTERNODE>:6443 K3S_TOKEN=$joinTokenForOtherNodes sh -"
|
||||
echo "------------------------------------------------------------"
|
||||
@@ -54,6 +54,7 @@ export default function DialogEditDialog({ children, domain, appId }: { children
|
||||
FormUtils.mapValidationErrorsToForm<typeof appDomainEditZodModel>(state, form);
|
||||
}, [state]);
|
||||
|
||||
const values = form.watch();
|
||||
|
||||
return (
|
||||
<>
|
||||
@@ -102,6 +103,7 @@ export default function DialogEditDialog({ children, domain, appId }: { children
|
||||
/>
|
||||
|
||||
<CheckboxFormField form={form} name="useSsl" label="use HTTPS" />
|
||||
{values.useSsl && <CheckboxFormField form={form} name="redirectHttps" label="Redirect HTTP to HTTPS" />}
|
||||
<p className="text-red-500">{state.message}</p>
|
||||
<SubmitButton>Save</SubmitButton>
|
||||
</div>
|
||||
|
||||
@@ -51,6 +51,7 @@ export default function DomainsList({ app }: {
|
||||
<TableHead>Name</TableHead>
|
||||
<TableHead>Port</TableHead>
|
||||
<TableHead>SSL</TableHead>
|
||||
<TableHead>Redirect HTTP to HTTPS</TableHead>
|
||||
<TableHead className="w-[100px]">Action</TableHead>
|
||||
</TableRow>
|
||||
</TableHeader>
|
||||
@@ -60,6 +61,7 @@ export default function DomainsList({ app }: {
|
||||
<TableCell className="font-medium">{domain.hostname}</TableCell>
|
||||
<TableCell className="font-medium">{domain.port}</TableCell>
|
||||
<TableCell className="font-medium">{domain.useSsl ? <CheckIcon /> : <XIcon />}</TableCell>
|
||||
<TableCell className="font-medium">{domain.useSsl && domain.redirectHttps ? <CheckIcon /> : <XIcon />}</TableCell>
|
||||
<TableCell className="font-medium flex gap-2">
|
||||
<DialogEditDialog appId={app.id} domain={domain}>
|
||||
<Button variant="ghost"><EditIcon /></Button>
|
||||
|
||||
@@ -56,7 +56,7 @@ export default function EnvEdit({ app }: {
|
||||
<FormItem>
|
||||
<FormLabel>Env Variables</FormLabel>
|
||||
<FormControl>
|
||||
<Textarea className="h-96" placeholder="NAME=VALUE,NAME=VALUE,NAME=VALUE,..." {...field} value={field.value} />
|
||||
<Textarea className="h-96" placeholder="NAME=VALUE..." {...field} value={field.value} />
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
|
||||
@@ -111,7 +111,7 @@ export default function DialogEditDialog({ children, volume, appId }: { children
|
||||
name="size"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>Size in GB</FormLabel>
|
||||
<FormLabel>Size in MB</FormLabel>
|
||||
<FormControl>
|
||||
<Input type="number" placeholder="ex. 20" {...field} />
|
||||
</FormControl>
|
||||
|
||||
@@ -42,7 +42,7 @@ export default function StorageList({ app }: {
|
||||
<TableHeader>
|
||||
<TableRow>
|
||||
<TableHead>Mount Path</TableHead>
|
||||
<TableHead>Size in GB</TableHead>
|
||||
<TableHead>Size in MB</TableHead>
|
||||
<TableHead>Access Mode</TableHead>
|
||||
<TableHead className="w-[100px]">Action</TableHead>
|
||||
</TableRow>
|
||||
|
||||
@@ -4,6 +4,7 @@ import { z } from "zod";
|
||||
export const appDomainEditZodModel = z.object({
|
||||
hostname: z.string().trim().min(1),
|
||||
useSsl: stringToBoolean,
|
||||
redirectHttps: stringToBoolean,
|
||||
port: stringToNumber,
|
||||
})
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ export const AppDomainModel = z.object({
|
||||
hostname: z.string(),
|
||||
port: z.number().int(),
|
||||
useSsl: z.boolean(),
|
||||
redirectHttps: z.boolean(),
|
||||
appId: z.string(),
|
||||
createdAt: z.date(),
|
||||
updatedAt: z.date(),
|
||||
|
||||
@@ -37,6 +37,15 @@ const getK8sLogApiClient = () => {
|
||||
const k8sLogClient = globalThis.k8sLogGlobal ?? getK8sLogApiClient()
|
||||
if (process.env.NODE_ENV !== 'production') globalThis.k8sLogGlobal = k8sLogClient
|
||||
|
||||
const getK8sCustomObjectsApiClient = () => {
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromFile('/workspace/kube-config.config'); // todo update --> use security role
|
||||
const client = kc.makeApiClient(k8s.CustomObjectsApi);
|
||||
return client;
|
||||
}
|
||||
const k8sCustomObjectsClient = globalThis.k8sCustomObjectsGlobal ?? getK8sCustomObjectsApiClient()
|
||||
if (process.env.NODE_ENV !== 'production') globalThis.k8sCustomObjectsGlobal = k8sCustomObjectsClient
|
||||
|
||||
const getK8sNetworkApiClient = () => {
|
||||
const kc = new k8s.KubeConfig();
|
||||
kc.loadFromFile('/workspace/kube-config.config'); // todo update --> use security role
|
||||
@@ -52,6 +61,7 @@ declare const globalThis: {
|
||||
k8sJobGlobal: ReturnType<typeof getK8sBatchApiClient>;
|
||||
k8sLogGlobal: ReturnType<typeof getK8sLogApiClient>;
|
||||
k8sNetworkGlobal: ReturnType<typeof getK8sNetworkApiClient>;
|
||||
k8sCustomObjectsGlobal: ReturnType<typeof getK8sCustomObjectsApiClient>;
|
||||
} & typeof global;
|
||||
|
||||
|
||||
@@ -64,6 +74,7 @@ class K3sApiAdapter {
|
||||
batch = k8sJobClient;
|
||||
log = k8sLogClient;
|
||||
network = k8sNetworkClient;
|
||||
customObjects = k8sCustomObjectsClient;
|
||||
}
|
||||
|
||||
const k3s = new K3sApiAdapter();
|
||||
|
||||
@@ -100,17 +100,18 @@ class DeploymentService {
|
||||
async createDeployment(app: AppExtendedModel, buildJobName?: string) {
|
||||
await this.validateDeployment(app);
|
||||
await this.createNamespaceIfNotExists(app.projectId);
|
||||
if (await pvcService.doesAppConfigurationIncreaseAnyPvcSize(app)) {
|
||||
// await this.setReplicasForDeployment(app.projectId, app.id, 0); // update of PVCs is only possible if deployment is scaled down
|
||||
const appHasPvcChanges = await pvcService.doesAppConfigurationIncreaseAnyPvcSize(app)
|
||||
if (appHasPvcChanges) {
|
||||
await this.setReplicasForDeployment(app.projectId, app.id, 0); // update of PVCs is only possible if deployment is scaled down
|
||||
await new Promise(resolve => setTimeout(resolve, 5000));
|
||||
}
|
||||
const { volumes, volumeMounts } = await pvcService.createOrUpdatePvc(app);
|
||||
|
||||
const envVars = app.envVars
|
||||
? app.envVars.split(',').map(env => {
|
||||
const [name, value] = env.split('=');
|
||||
return { name, value };
|
||||
})
|
||||
: [];
|
||||
const envVars = app.envVars ? app.envVars.split('\n').filter(x => !!x).map(env => {
|
||||
const [name] = env.split('=');
|
||||
const value = env.replace(`${name}=`, '');
|
||||
return { name, value };
|
||||
}) : [];
|
||||
|
||||
const existingDeployment = await this.getDeployment(app.projectId, app.id);
|
||||
const body: V1Deployment = {
|
||||
@@ -153,7 +154,7 @@ class DeploymentService {
|
||||
body.spec!.template!.metadata!.annotations!.buildJobName = buildJobName; // add buildJobName to deployment
|
||||
}
|
||||
|
||||
if (app.appVolumes.length === 0 || app.appVolumes.every(vol => vol.accessMode === 'ReadWriteMany')) {
|
||||
if (!appHasPvcChanges && app.appVolumes.length === 0 || app.appVolumes.every(vol => vol.accessMode === 'ReadWriteMany')) {
|
||||
body.spec!.strategy = {
|
||||
type: 'RollingUpdate',
|
||||
rollingUpdate: {
|
||||
@@ -174,7 +175,7 @@ class DeploymentService {
|
||||
}
|
||||
await pvcService.deleteUnusedPvcOfApp(app);
|
||||
await this.createOrUpdateService(app);
|
||||
await ingressService.createOrUpdateIngress(app);
|
||||
await ingressService.createOrUpdateIngressForApp(app);
|
||||
}
|
||||
|
||||
async setReplicasForDeployment(projectId: string, appId: string, replicas: number) {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { AppExtendedModel } from "@/model/app-extended.model";
|
||||
import k3s from "../adapter/kubernetes-api.adapter";
|
||||
import { V1Ingress, V1PersistentVolumeClaim } from "@kubernetes/client-node";
|
||||
import { V1Ingress, V1PersistentVolumeClaim } from "@kubernetes/client-node";
|
||||
import { StringUtils } from "../utils/string.utils";
|
||||
import { AppDomain } from "@prisma/client";
|
||||
|
||||
class IngressService {
|
||||
|
||||
@@ -10,9 +11,13 @@ class IngressService {
|
||||
return res.body.items.filter((item) => item.metadata?.name?.startsWith(`ingress-${appId}`));
|
||||
}
|
||||
|
||||
async getIngress(projectId: string, appId: string, domainId: string) {
|
||||
async getIngress(projectId: string, appId: string, domainId: string, redirectIngress = false) {
|
||||
const res = await k3s.network.listNamespacedIngress(projectId);
|
||||
return res.body.items.find((item) => item.metadata?.name === `ingress-${appId}-${domainId}`);
|
||||
return res.body.items.find((item) => item.metadata?.name === this.getIngressName(appId, domainId, redirectIngress));
|
||||
}
|
||||
|
||||
getIngressName(appId: string, domainId: string, redirectIngress = false) {
|
||||
return `ingress-${appId}-${domainId}` + (redirectIngress ? '-redirect' : '');
|
||||
}
|
||||
|
||||
async deleteObsoleteIngresses(app: AppExtendedModel) {
|
||||
@@ -44,69 +49,181 @@ class IngressService {
|
||||
}
|
||||
}
|
||||
|
||||
async createOrUpdateIngress(app: AppExtendedModel) {
|
||||
async middlewareForNamespaceAlreadyExists(namespace: string) {
|
||||
const res = await k3s.customObjects.listNamespacedCustomObject(
|
||||
'traefik.io', // group
|
||||
'v1alpha1', // version
|
||||
namespace, // namespace
|
||||
'middlewares' // plural name of the custom resource
|
||||
);
|
||||
console.log(res.body);
|
||||
return (res.body as any) && (res.body as any)?.items && (res.body as any)?.items?.length > 0;
|
||||
}
|
||||
|
||||
async createIfNotExistRedirectMiddlewareIngress(namespace: string) {
|
||||
if (await this.middlewareForNamespaceAlreadyExists(namespace)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const middlewareManifest = {
|
||||
apiVersion: 'traefik.io/v1alpha1',
|
||||
kind: 'Middleware',
|
||||
metadata: {
|
||||
name: 'redirect-to-https',
|
||||
namespace,
|
||||
},
|
||||
spec: {
|
||||
redirectScheme: {
|
||||
scheme: 'https',
|
||||
permanent: true,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
await k3s.customObjects.createNamespacedCustomObject(
|
||||
'traefik.io', // group
|
||||
'v1alpha1', // version
|
||||
namespace, // namespace
|
||||
'middlewares', // plural name of the custom resource
|
||||
middlewareManifest // object manifest
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
async createOrUpdateIngressForApp(app: AppExtendedModel) {
|
||||
|
||||
await this.createIfNotExistRedirectMiddlewareIngress("kube-system");
|
||||
|
||||
for (const domainObj of app.appDomains) {
|
||||
const domain = domainObj.hostname;
|
||||
const ingressName = `ingress-${app.id}-${domainObj.id}`;
|
||||
|
||||
const existingIngress = await this.getIngress(app.projectId, app.id, domainObj.id);
|
||||
|
||||
const ingressDefinition: V1Ingress = {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'Ingress',
|
||||
metadata: {
|
||||
name: ingressName,
|
||||
namespace: app.projectId,
|
||||
annotations: {
|
||||
...(domainObj.useSsl === true && { 'cert-manager.io/cluster-issuer': 'letsencrypt-production' }),
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
ingressClassName: 'traefik',
|
||||
rules: [
|
||||
{
|
||||
host: domain,
|
||||
http: {
|
||||
paths: [
|
||||
{
|
||||
path: '/',
|
||||
pathType: 'Prefix',
|
||||
backend: {
|
||||
service: {
|
||||
name: StringUtils.toServiceName(app.id),
|
||||
port: {
|
||||
number: app.defaultPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
...(domainObj.useSsl === true && {
|
||||
tls: [
|
||||
{
|
||||
hosts: [domain],
|
||||
secretName: `secret-tls-${app.id}-${domainObj.id}`,
|
||||
},
|
||||
],
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
if (existingIngress) {
|
||||
await k3s.network.replaceNamespacedIngress(ingressName, app.projectId, ingressDefinition);
|
||||
console.log(`Ingress ${ingressName} für Domain ${domain} erfolgreich aktualisiert.`);
|
||||
await this.createIngress(app, domainObj);
|
||||
if (domainObj.useSsl && domainObj.redirectHttps) {
|
||||
await this.createRedirectIngress(app, domainObj);
|
||||
} else {
|
||||
await k3s.network.createNamespacedIngress(app.projectId, ingressDefinition);
|
||||
console.log(`Ingress ${ingressName} für Domain ${domain} erfolgreich erstellt.`);
|
||||
const redirectIngress = await this.getIngress(app.projectId, app.id, domainObj.id, true);
|
||||
if (redirectIngress) {
|
||||
await k3s.network.deleteNamespacedIngress(redirectIngress.metadata!.name!, app.projectId);
|
||||
console.log(`Deleted redirect-Ingress for Domain ${domainObj.hostname}.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await this.deleteObsoleteIngresses(app);
|
||||
}
|
||||
|
||||
async createIngress(app: AppExtendedModel, domain: AppDomain) {
|
||||
const hostname = domain.hostname;
|
||||
const ingressName = this.getIngressName(app.id, domain.id);
|
||||
const existingIngress = await this.getIngress(app.projectId, app.id, domain.id);
|
||||
|
||||
const ingressDefinition: V1Ingress = {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'Ingress',
|
||||
metadata: {
|
||||
name: ingressName,
|
||||
namespace: app.projectId,
|
||||
annotations: {
|
||||
...(domain.useSsl === true && { 'cert-manager.io/cluster-issuer': 'letsencrypt-production' }),
|
||||
...(domain.useSsl === true && { 'traefik.ingress.kubernetes.io/router.tls': 'true' }),
|
||||
...(domain.useSsl === true && { 'traefik.ingress.kubernetes.io/router.entrypoints': 'websecure' }), // disable requests from http --> use separate ingress for redirect
|
||||
...(domain.useSsl === false && { 'traefik.ingress.kubernetes.io/router.entrypoints': 'web' }), // disable requests from https
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
ingressClassName: 'traefik',
|
||||
rules: [
|
||||
{
|
||||
host: hostname,
|
||||
http: {
|
||||
paths: [
|
||||
{
|
||||
path: '/',
|
||||
pathType: 'Prefix',
|
||||
backend: {
|
||||
service: {
|
||||
name: StringUtils.toServiceName(app.id),
|
||||
|
||||
port: {
|
||||
number: domain.port,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
...(domain.useSsl === true && {
|
||||
tls: [
|
||||
{
|
||||
hosts: [hostname],
|
||||
secretName: `secret-tls-${app.id}-${domain.id}`,
|
||||
},
|
||||
],
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
if (existingIngress) {
|
||||
await k3s.network.replaceNamespacedIngress(ingressName, app.projectId, ingressDefinition);
|
||||
console.log(`Ingress ${ingressName} für Domain ${hostname} erfolgreich aktualisiert.`);
|
||||
} else {
|
||||
await k3s.network.createNamespacedIngress(app.projectId, ingressDefinition);
|
||||
console.log(`Ingress ${ingressName} für Domain ${hostname} erfolgreich erstellt.`);
|
||||
}
|
||||
}
|
||||
async createRedirectIngress(app: AppExtendedModel, domain: AppDomain) {
|
||||
|
||||
const ingressName = this.getIngressName(app.id, domain.id, true);
|
||||
const existingRedirectIngress = await this.getIngress(app.projectId, app.id, domain.id, true);
|
||||
|
||||
// https://devopsx.com/traefik-ingress-redirect-http-to-https/
|
||||
// https://aqibrahman.com/set-up-traefik-kubernetes-ingress-for-http-and-https-with-redirect-to-https
|
||||
const ingressDefinition: V1Ingress = {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'Ingress',
|
||||
metadata: {
|
||||
name: ingressName,
|
||||
namespace: app.projectId,
|
||||
annotations: {
|
||||
'traefik.ingress.kubernetes.io/router.entrypoints': 'web',
|
||||
'traefik.ingress.kubernetes.io/router.middlewares': `kube-system-redirect-to-https@kubernetescrd`, // <namespace>-<middleware-name>@kubernetescrd
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
ingressClassName: 'traefik',
|
||||
rules: [
|
||||
{
|
||||
host: domain.hostname,
|
||||
http: {
|
||||
paths: [
|
||||
{
|
||||
path: '/',
|
||||
pathType: 'ImplementationSpecific',
|
||||
backend: {
|
||||
service: {
|
||||
name: StringUtils.toServiceName(app.id),
|
||||
port: {
|
||||
number: domain.port,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
if (existingRedirectIngress) {
|
||||
await k3s.network.replaceNamespacedIngress(ingressName, app.projectId, ingressDefinition);
|
||||
console.log(`Updated redirect ingress ${ingressName} for domain ${domain.hostname}`);
|
||||
} else {
|
||||
await k3s.network.createNamespacedIngress(app.projectId, ingressDefinition);
|
||||
console.log(`Created redirect ingress ${ingressName} for domain ${domain.hostname}`);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const ingressService = new IngressService();
|
||||
|
||||
@@ -12,7 +12,7 @@ class PvcService {
|
||||
for (const appVolume of app.appVolumes) {
|
||||
const pvcName = `pvc-${app.id}-${appVolume.id}`;
|
||||
const existingPvc = existingPvcs.find(pvc => pvc.metadata?.name === pvcName);
|
||||
if (existingPvc && existingPvc.spec!.resources!.requests!.storage !== `${appVolume.size}Gi`) {
|
||||
if (existingPvc && existingPvc.spec!.resources!.requests!.storage !== `${appVolume.size}Mi`) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -60,7 +60,7 @@ class PvcService {
|
||||
storageClassName: 'longhorn',
|
||||
resources: {
|
||||
requests: {
|
||||
storage: `${appVolume.size}Gi`,
|
||||
storage: `${appVolume.size}Mi`,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -68,13 +68,13 @@ class PvcService {
|
||||
|
||||
const existingPvc = existingPvcs.find(pvc => pvc.metadata?.name === pvcName);
|
||||
if (existingPvc) {
|
||||
if (existingPvc.spec!.resources!.requests!.storage === `${appVolume.size}Gi`) {
|
||||
if (existingPvc.spec!.resources!.requests!.storage === `${appVolume.size}Mi`) {
|
||||
console.log(`PVC ${pvcName} for app ${app.id} already exists with the same size`);
|
||||
continue;
|
||||
}
|
||||
// Only the Size of PVC can be updated, so we need to delete and recreate the PVC
|
||||
// update PVC size
|
||||
existingPvc.spec!.resources!.requests!.storage = `${appVolume.size}Gi`;
|
||||
existingPvc.spec!.resources!.requests!.storage = `${appVolume.size}Mi`;
|
||||
await k3s.core.replaceNamespacedPersistentVolumeClaim(pvcName, app.projectId, existingPvc);
|
||||
console.log(`Updated PVC ${pvcName} for app ${app.id}`);
|
||||
|
||||
@@ -82,7 +82,7 @@ class PvcService {
|
||||
console.log(`Waiting for PV ${existingPvc.spec!.volumeName} to be resized`);
|
||||
|
||||
await this.waitUntilPvResized(existingPvc.spec!.volumeName!, appVolume.size);
|
||||
console.log(`PV ${existingPvc.spec!.volumeName} resized to ${appVolume.size}Gi`);
|
||||
console.log(`PV ${existingPvc.spec!.volumeName} resized to ${appVolume.size}Mi`);
|
||||
} else {
|
||||
await k3s.core.createNamespacedPersistentVolumeClaim(app.projectId, pvcDefinition);
|
||||
console.log(`Created PVC ${pvcName} for app ${app.id}`);
|
||||
@@ -111,10 +111,10 @@ class PvcService {
|
||||
private async waitUntilPvResized(persistentVolumeName: string, size: number) {
|
||||
let iterationCount = 0;
|
||||
let pv = await k3s.core.readPersistentVolume(persistentVolumeName);
|
||||
while (pv.body.spec!.capacity!.storage !== `${size}Gi`) {
|
||||
while (pv.body.spec!.capacity!.storage !== `${size}Mi`) {
|
||||
if (iterationCount > 30) {
|
||||
console.error(`Timeout: PV ${persistentVolumeName} not resized to ${size}Gi`);
|
||||
throw new ServiceException(`Timeout: Volume could not be resized to ${size}Gi`);
|
||||
console.error(`Timeout: PV ${persistentVolumeName} not resized to ${size}Mi`);
|
||||
throw new ServiceException(`Timeout: Volume could not be resized to ${size}Mi`);
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, 3000)); // wait 5 Seconds, so that the PV is resized
|
||||
pv = await k3s.core.readPersistentVolume(persistentVolumeName);
|
||||
|
||||
Reference in New Issue
Block a user