Merge pull request #498 from CanineHQ/chriszhu__redo_telepresence

consolidate implementation of cluster install with helm and skip veri…
This commit is contained in:
Chris Zhu
2026-01-14 16:33:38 -08:00
committed by GitHub
10 changed files with 162 additions and 87 deletions
+38 -13
View File
@@ -1,30 +1,55 @@
class Clusters::InstallAcmeIssuer
extend LightService::Action
expects :cluster, :kubectl
REPO_NAME = "jetstack".freeze
REPO_URL = "https://charts.jetstack.io".freeze
CHART_NAME = "cert-manager".freeze
CHART_URL = "jetstack/cert-manager".freeze
CHART_VERSION = "v1.15.3".freeze
CERT_MANAGER_VALUES = {
crds: {
enabled: true
}
}.freeze
expects :cluster, :kubectl, :connection
executed do |context|
cluster = context.cluster
kubectl = context.kubectl
connection = context.connection
namespace = Clusters::Install::DEFAULT_NAMESPACE
cluster.info("Checking if acme issuer is already installed", color: :yellow)
begin
kubectl.("get clusterissuer letsencrypt -n #{Clusters::Install::DEFAULT_NAMESPACE}")
kubectl.("get clusterissuer letsencrypt -n #{namespace}")
cluster.success("Acme issuer is already installed")
rescue Cli::CommandFailedError => e
cluster.info("Acme issuer not detected, installing...", color: :yellow)
cluster.info("Installing cert-manager...", color: :yellow)
command = "bash #{Rails.root.join("resources", "k8", "scripts", "install_cert_manager.sh")}"
runner = Cli::RunAndLog.new(cluster)
kubectl.with_kube_config do |kubeconfig_file|
begin
runner.(command, envs: { "KUBECONFIG" => kubeconfig_file.path, "NAMESPACE" => Clusters::Install::DEFAULT_NAMESPACE })
cluster.success("Cert-manager installed successfully")
rescue Cli::CommandFailedError => e
cluster.failed!
cluster.error("Cert-manager failed to install")
context.fail_and_return!("Script failed with exit code #{e.message}")
end
begin
runner = Cli::RunAndLog.new(cluster)
helm = K8::Helm::Client.connect(connection, runner)
helm.add_repo(REPO_NAME, REPO_URL)
helm.repo_update(repo_name: REPO_NAME)
helm.install(
CHART_NAME,
CHART_URL,
CHART_VERSION,
values: CERT_MANAGER_VALUES,
namespace: namespace,
create_namespace: true
)
cluster.success("Cert-manager installed successfully")
rescue StandardError => e
cluster.failed!
cluster.error("Cert-manager failed to install")
context.fail_and_return!("Helm install failed: #{e.message}")
end
cluster.info("Installing acme issuer...", color: :yellow)
+53 -13
View File
@@ -1,29 +1,69 @@
class Clusters::InstallNginxIngress
extend LightService::Action
expects :cluster, :kubectl
REPO_NAME = "ingress-nginx".freeze
REPO_URL = "https://kubernetes.github.io/ingress-nginx".freeze
CHART_NAME = "ingress-nginx".freeze
CHART_URL = "ingress-nginx/ingress-nginx".freeze
NGINX_VALUES = {
controller: {
config: {
"use-forwarded-headers" => "true",
"proxy-real-ip-cidr" => "0.0.0.0/0",
"enable-underscores-in-headers" => "true",
"proxy-pass-headers" => "*",
"proxy-body-size" => "0",
"proxy-buffer-size" => "16k",
"proxy-buffers-number" => "8",
"proxy-busy-buffers-size" => "32k",
"proxy-read-timeout" => "3600",
"proxy-send-timeout" => "3600",
"h2-backend" => "true",
"hsts" => "true",
"hsts-max-age" => "63072000",
"hsts-include-subdomains" => "true",
"hsts-preload" => "true",
"enable-gzip" => "true",
"gzip-types" => "text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript"
}
}
}.freeze
expects :cluster, :kubectl, :connection
executed do |context|
cluster = context.cluster
kubectl = context.kubectl
connection = context.connection
namespace = Clusters::Install::DEFAULT_NAMESPACE
cluster.info("Checking if Nginx ingress controller is already installed...", color: :yellow)
begin
kubectl.("get deployment ingress-nginx-controller -n #{Clusters::Install::DEFAULT_NAMESPACE}")
kubectl.("get deployment ingress-nginx-controller -n #{namespace}")
cluster.success("Nginx ingress controller is already installed")
rescue Cli::CommandFailedError => e
cluster.info("Nginx ingress controller not detected, installing...", color: :yellow)
command = "bash #{Rails.root.join("resources", "k8", "scripts", "install_nginx_ingress.sh")}"
runner = Cli::RunAndLog.new(cluster)
kubectl.with_kube_config do |kubeconfig_file|
begin
runner.(command, envs: { "KUBECONFIG" => kubeconfig_file.path, "NAMESPACE" => Clusters::Install::DEFAULT_NAMESPACE })
cluster.success("Nginx ingress controller installed successfully")
rescue Cli::CommandFailedError => e
cluster.failed!
cluster.error("Nginx ingress controller failed to install")
context.fail_and_return!("Script failed with exit code #{e.message}")
end
begin
runner = Cli::RunAndLog.new(cluster)
helm = K8::Helm::Client.connect(connection, runner)
helm.add_repo(REPO_NAME, REPO_URL)
helm.repo_update(repo_name: REPO_NAME)
helm.install(
CHART_NAME,
CHART_URL,
values: NGINX_VALUES,
namespace: namespace
)
cluster.success("Nginx ingress controller installed successfully")
rescue StandardError => e
cluster.failed!
cluster.error("Nginx ingress controller failed to install")
context.fail_and_return!("Helm install failed: #{e.message}")
end
end
end
+29 -13
View File
@@ -1,28 +1,44 @@
class Clusters::InstallTelepresence
extend LightService::Action
expects :cluster, :kubectl
REPO_NAME = "datawire".freeze
REPO_URL = "https://getambassador.io".freeze
CHART_NAME = "traffic-manager".freeze
CHART_URL = "datawire/telepresence".freeze
expects :cluster, :kubectl, :connection
executed do |context|
cluster = context.cluster
kubectl = context.kubectl
connection = context.connection
namespace = Clusters::Install::DEFAULT_NAMESPACE
cluster.info("Checking if Telepresence is already installed...", color: :yellow)
begin
kubectl.("get deployment traffic-manager -n #{Clusters::Install::DEFAULT_NAMESPACE}")
kubectl.("get deployment traffic-manager -n #{namespace}")
cluster.success("Telepresence already installed")
rescue Cli::CommandFailedError => e
cluster.info("Telepresence not detected, installing...", color: :yellow)
command = "bash #{Rails.root.join("resources", "k8", "scripts", "install_telepresence.sh")}"
runner = Cli::RunAndLog.new(cluster)
kubectl.with_kube_config do |kubeconfig_file|
begin
runner.(command, envs: { "KUBECONFIG" => kubeconfig_file.path, "NAMESPACE" => Clusters::Install::DEFAULT_NAMESPACE })
cluster.success("Telepresence installed successfully")
rescue Cli::CommandFailedError => e
cluster.failed!
cluster.error("Telepresence failed to install")
context.fail_and_return!("Script failed with exit code #{e.message}")
end
begin
runner = Cli::RunAndLog.new(cluster)
helm = K8::Helm::Client.connect(connection, runner)
helm.add_repo(REPO_NAME, REPO_URL)
helm.repo_update(repo_name: REPO_NAME)
helm.install(
CHART_NAME,
CHART_URL,
namespace: namespace
)
cluster.success("Telepresence installed successfully")
rescue StandardError => e
cluster.failed!
cluster.error("Telepresence failed to install")
context.fail_and_return!("Helm install failed: #{e.message}")
end
end
end
+11 -5
View File
@@ -78,18 +78,20 @@ class K8::Helm::Client
self.class.add_repo(repository_name, repository_url, runner)
end
def build_install_command(name, chart_url, version, values_file_path:, namespace:, timeout:, dry_run:, atomic:, wait:, history_max:)
def build_install_command(name, chart_url, version, values_file_path:, namespace:, timeout:, dry_run:, atomic:, wait:, history_max:, create_namespace:, skip_tls_verify:)
command_parts = [
"helm upgrade --install #{name} #{chart_url}",
"-f #{values_file_path}",
"--namespace #{namespace}",
"--timeout=#{timeout}",
"--version #{version}"
"--timeout=#{timeout}"
]
command_parts << "--version #{version}" if version.present?
command_parts << "--dry-run" if dry_run
command_parts << "--atomic" if atomic
command_parts << "--wait" if wait
command_parts << "--history-max=#{history_max}" if history_max
command_parts << "--create-namespace" if create_namespace
command_parts << "--kube-insecure-skip-tls-verify" if skip_tls_verify
command_parts.join(" ")
end
@@ -97,13 +99,15 @@ class K8::Helm::Client
def install(
name,
chart_url,
version,
version = nil,
values: {},
namespace: 'default',
dry_run: false,
atomic: false,
wait: false,
history_max: nil,
create_namespace: false,
skip_tls_verify: K8::Kubeconfig.skip_tls_verify?,
timeout: DEFAULT_TIMEOUT
)
return StandardError.new("Can't install helm chart if not connected") unless connected?
@@ -123,7 +127,9 @@ class K8::Helm::Client
dry_run: dry_run,
atomic: atomic,
wait: wait,
history_max: history_max
history_max: history_max,
create_namespace: create_namespace,
skip_tls_verify: skip_tls_verify
)
exit_status = runner.(command, envs: { "KUBECONFIG" => kubeconfig_file.path })
raise "`#{command}` failed with exit status #{exit_status}" unless exit_status.success?
+21
View File
@@ -1,12 +1,33 @@
module K8
module Kubeconfig
def self.skip_tls_verify?
!ENV['VERIFY_CLUSTER_TLS'].present?
end
def self.skip_tls_env
skip_tls_verify? ? { "SKIP_TLS_VERIFY" => "true" } : {}
end
def with_kube_config
Tempfile.open([ 'kubeconfig', '.yaml' ]) do |kubeconfig_file|
kubeconfig_hash = kubeconfig.is_a?(String) ? JSON.parse(kubeconfig) : kubeconfig
kubeconfig_hash = apply_tls_settings(kubeconfig_hash)
kubeconfig_file.write(kubeconfig_hash.to_yaml)
kubeconfig_file.flush
yield kubeconfig_file
end
end
private
def apply_tls_settings(kubeconfig_hash)
return kubeconfig_hash if ENV['VERIFY_CLUSTER_TLS'].present?
kubeconfig_hash = kubeconfig_hash.deep_dup
kubeconfig_hash['clusters']&.each do |cluster|
cluster['cluster']['insecure-skip-tls-verify'] = true
end
kubeconfig_hash
end
end
end
+1 -1
View File
@@ -27,7 +27,7 @@
label: "Cluster",
description: "The cluster to deploy your add on to."
)) do %>
<%= form.collection_select :cluster_id, current_account.clusters, :id, :name, {}, { class: "select select-bordered w-full" } %>
<%= form.collection_select :cluster_id, current_account.clusters.running, :id, :name, {}, { class: "select select-bordered w-full" } %>
<label class="label">
<span class="label-text-alt">* Required</span>
</label>
@@ -1,10 +0,0 @@
set -e
helm repo add jetstack https://charts.jetstack.io --force-update
helm repo update jetstack
helm install \
cert-manager jetstack/cert-manager \
--namespace $NAMESPACE \
--create-namespace \
--version v1.15.3 \
--set crds.enabled=true
@@ -1,23 +0,0 @@
set -e
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update ingress-nginx
helm upgrade --install ingress-nginx ingress-nginx/ingress-nginx \
--namespace $NAMESPACE \
--set controller.config.use-forwarded-headers=true \
--set controller.config.proxy-real-ip-cidr=0.0.0.0/0 \
--set controller.config.enable-underscores-in-headers=true \
--set controller.config.proxy-pass-headers="*" \
--set controller.config.proxy-body-size=0 \
--set controller.config.proxy-buffer-size=16k \
--set controller.config.proxy-buffers-number=8 \
--set controller.config.proxy-busy-buffers-size=32k \
--set controller.config.proxy-read-timeout=3600 \
--set controller.config.proxy-send-timeout=3600 \
--set controller.config.h2-backend=true \
--set controller.config.hsts=true \
--set controller.config.hsts-max-age=63072000 \
--set controller.config.hsts-include-subdomains=true \
--set controller.config.hsts-preload=true \
--set controller.config.enable-gzip=true \
--set controller.config.gzip-types="text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript"
@@ -1,6 +0,0 @@
helm repo add datawire https://getambassador.io
helm repo update datawire
helm install traffic-manager -n ambassador datawire/telepresence --namespace $NAMESPACE
# TODO: Can't figure out how to get this working within the docker container yet.
# telepresence helm install --namespace $NAMESPACE
+9 -3
View File
@@ -32,7 +32,9 @@ RSpec.describe K8::Helm::Client do
dry_run: false,
atomic: true,
wait: true,
history_max: 10
history_max: 10,
create_namespace: true,
skip_tls_verify: true
)
expect(command).to eq(
@@ -43,7 +45,9 @@ RSpec.describe K8::Helm::Client do
"--version 1.0.0 " \
"--atomic " \
"--wait " \
"--history-max=10"
"--history-max=10 " \
"--create-namespace " \
"--kube-insecure-skip-tls-verify"
)
end
@@ -58,7 +62,9 @@ RSpec.describe K8::Helm::Client do
dry_run: false,
atomic: false,
wait: false,
history_max: nil
history_max: nil,
create_namespace: false,
skip_tls_verify: false
)
expect(command).to eq(