Difference between revisions of "Kubeadm kubernetes cluster Ubuntu 24.04"
Jump to navigation
Jump to search
(3 intermediate revisions by the same user not shown) | |||
Line 15: | Line 15: | ||
sudo mkdir -p /etc/containerd | sudo mkdir -p /etc/containerd | ||
containerd config default | sudo tee /etc/containerd/config.toml > /dev/null | containerd config default | sudo tee /etc/containerd/config.toml > /dev/null | ||
+ | # Instruct containerd to use the Systemd cgroup hierarchy (typically cgroupv2) for managing container resources. | ||
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml | sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml | ||
sudo systemctl restart containerd | sudo systemctl restart containerd | ||
Line 29: | Line 30: | ||
sudo modprobe overlay | sudo modprobe overlay | ||
sudo modprobe br_netfilter | sudo modprobe br_netfilter | ||
− | echo -e "br_netfilter\noverlay" | sudo tee /etc/modules-load.d/ | + | echo -e "br_netfilter\noverlay" | sudo tee /etc/modules-load.d/k8s.conf |
lsmod | grep overlay | lsmod | grep overlay | ||
lsmod | grep br_netfilter | lsmod | grep br_netfilter | ||
+ | # ubuntu 24.04 does not need the bridge-nf-call lines | ||
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf | cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf | ||
net.bridge.bridge-nf-call-iptables = 1 | net.bridge.bridge-nf-call-iptables = 1 | ||
Line 102: | Line 104: | ||
update_kubeadm_version | update_kubeadm_version | ||
+ | ``` | ||
+ | |||
+ | ## Remove node taints | ||
+ | ``` | ||
+ | kubectl taint nodes --all node-role.kubernetes.io/control-plane- | ||
+ | kubectl taint nodes --all node.cilium.io/agent-not-ready- | ||
``` | ``` | ||
Latest revision as of 06:07, 4 January 2025
Kubeadm Cluster Scripts
Prep k8s node
#!/bin/bash set -eu k8s_minor_version=1.31 prep_k8s_node(){ sudo apt update && sudo apt upgrade -y sudo apt install apt-transport-https curl -y sudo apt install containerd -y sudo mkdir -p /etc/containerd containerd config default | sudo tee /etc/containerd/config.toml > /dev/null # Instruct containerd to use the Systemd cgroup hierarchy (typically cgroupv2) for managing container resources. sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml sudo systemctl restart containerd curl -fsSL https://pkgs.k8s.io/core:/stable:/v${k8s_minor_version}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${k8s_minor_version}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt update sudo apt install -y kubelet kubeadm kubectl sudo apt-mark hold kubelet kubeadm kubectl sudo swapoff -a sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab sudo modprobe overlay sudo modprobe br_netfilter echo -e "br_netfilter\noverlay" | sudo tee /etc/modules-load.d/k8s.conf lsmod | grep overlay lsmod | grep br_netfilter # ubuntu 24.04 does not need the bridge-nf-call lines cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.ipv4.ip_forward = 1 EOF sudo sysctl --system echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bashrc sudo reboot } prep_k8s_node
kubeadm init
# sudo kubeadm init --pod-network-cidr=${k8s_subnet} --v=5 sudo kubeadm init --v=5
Install Cilium on init Controller
set -eu install_cilium(){ # https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/ CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt) CLI_ARCH=amd64 if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} export KUBECONFIG=/etc/kubernetes/admin.conf cilium install --version 1.16.5 cilium status --wait } install_cilium
Upgrade node Kubernetes version
#!/bin/bash set -eu k8s_minor_version=1.31 update_kubeadm_version(){ k8s_minor_version=1.31 k8s_keyring_file="/etc/apt/keyrings/kubernetes-apt-keyring.gpg" if [ ! -f "${k8s_keyring_file}" ]; then curl -fsSL https://pkgs.k8s.io/core:/stable:/v${k8s_minor_version}/deb/Release.key 2>/dev/null | sudo gpg --dearmor -o ${k8s_keyring_file} fi echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${k8s_minor_version}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update sudo apt-mark unhold kubeadm kubectl kubelet && \ sudo apt-get install -y kubelet=${k8s_minor_version}.* kubeadm=${k8s_minor_version}.* kubectl=${k8s_minor_version}.* && \ sudo apt-mark hold kubeadm kubectl kubelet && \ sudo systemctl restart kubelet sudo apt-get -y dist-upgrade kubeadm version -o short echo "Reboot in 10 seconds"; sleep 10 sudo reboot } update_kubeadm_version
Remove node taints
kubectl taint nodes --all node-role.kubernetes.io/control-plane- kubectl taint nodes --all node.cilium.io/agent-not-ready-
Upgrade primary controller
k8s_patch_version=$(kubeadm version -o short) echo "Updating to version ${k8s_patch_version} in 10 seconds."; sleep 10 sudo kubeadm upgrade plan sudo killall -s SIGTERM kube-apiserver # trigger a graceful kube-apiserver shutdown sleep 20 # wait a little bit to permit completing in-flight requests kubeadm upgrade apply -y ${k8s_patch_version}
Get Worker Join Command - token expires 24h default
sudo kubeadm token create --print-join-command --ttl 1h
Get Controller Join Command (must be ran from existing controller. token expires 2h default)
sudo kubeadm token create --print-join-command --certificate-key $(sudo kubeadm init phase upload-certs --upload-certs | sed -n '3p')
Drain node
#!/bin/bash set -eux # Function to check if all pods are drained from the node function is_drained() { local node_name="$1" local pods_on_node=$(kubectl get pods -l node-name=$node_name -o jsonpath='{.items[*].metadata.name}' | wc -l) if [[ "$pods_on_node" -eq 0 ]]; then return 0 else return 1 fi } # Node to drain node_to_drain=$1 # Drain the node kubectl drain $node_to_drain --ignore-daemonsets --force --delete-emptydir-data --grace-period=-1 # Negative uses pod default i.e. -1 # Wait for all pods to be drained while ! is_drained "$node_to_drain"; do echo "Waiting for pods to be drained from $node_to_drain..." sleep 5 done # Print success message echo "Drain complete!"