Kubeadm init ubuntu 24.04

From UVOO Tech Wiki
Revision as of 20:53, 21 December 2024 by Busk (talk | contribs)
Jump to navigation Jump to search

Kubernetes multi-node cluster using kubeadm based on Ubuntu 24.04

Scripts

lxc-vm-launch.sh

#!/bin/bash
set -eu
export name=$1
lxc init ubuntu:24.04 --vm -c limits.cpu=4 -c limits.memory=16GB $name
lxc config device override $name root size=30GB
lxc start $name

lxc-vm-launch expanded

#!/bin/bash
set -eu
if [ $# -eq 1 ]; then
  lxc cluster list
  zpool list
fi

if [ $# -ne 7 ]; then
  echo "Usage: $0 <name> <disk_size> <cpu> <memory> <add zvol for rook-ceph true/false> <zvol size in G> <remote node target>"
  echo "Example: $0 test1 30GB 8 16GB true 100G lxd1.example.com"
  exit 1
fi
name=$1
disk_size=$2
cpu=$3
memory=$4
rook_ceph_zvol=$5
rook_ceph_zvol_size=$6
target=$7
# lxc cluster list
lxc init ubuntu:24.04 --vm $name -c limits.cpu=$cpu -c limits.memory=$memory --target $target
lxc config device override $name root size=$disk_size
if [[ "$rook_ceph_zvol" == "true" ]]; then
  sudo zfs create -s -V $rook_ceph_zvol_size tank/$name-rookceph
  lxc config device add $name data disk source=/dev/zvol/tank/$name-rookceph
fi
lxc start $name


# lxc delete kub2
# sudo zfs destroy tank/kub2-rookcep

Create two containers

./lxd-vm-launch.sh myk8s1
./lxd-vm-launch.sh myk8s2

kubeadm-bootstrap.sh

#!/bin/bash
set -eu

k8s_subnet="10.244.0.0/16"


prep_vm(){
  sudo apt update && sudo apt upgrade -y
  sudo apt install apt-transport-https curl -y

  sudo apt install containerd -y
  sudo mkdir -p /etc/containerd
  containerd config default | sudo tee /etc/containerd/config.toml > /dev/null
  sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
  sudo systemctl restart containerd

  curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
  echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
  sudo apt update
  sudo apt install -y kubelet kubeadm kubectl
  sudo apt-mark hold kubelet kubeadm kubectl

  sudo swapoff -a
  sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

  sudo modprobe overlay
  sudo modprobe br_netfilter

  cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

  sysctl --system
}


kubeadm_init(){
  sudo kubeadm init --pod-network-cidr=${k8s_subnet} --v=5
}


set_kubectl_config(){
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

  # alternative # export KUBECONFIG=/etc/kubernetes/admin.conf

  kubectl get nodes
  kubectl get pods --all-namespaces
}


add_calico_cni(){
  # kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
  # https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises
  kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
  curl https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/custom-resources.yaml -O
  # kubectl create -f custom-resources.yaml
  kubectl create --set spec.calicoNetwork.ipPools[0].cidr=${k8s_subnet} -f custom-resources.yaml
  watch kubectl get pods -n calico-system
}


get_celium_cli(){
  # https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/
  CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
  CLI_ARCH=amd64
  if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
  curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
  sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
  sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
  rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
}


install_celium(){
  cilium install --version 1.16.5
  cilium status --wait
}


get_join_command(){
  kubeadm token create --print-join-command
}


wip_harden_security(){
  # 1. Enable RBAC (Role-Based Access Control)
  kubectl create clusterrolebinding default-worker --clusterrole=cluster-role.rbac.authorization.k8s.io/system:node --serviceaccount=default:default

  # 2. Configure Network Policies (example: deny all by default)
  cat <<EOF | kubectl apply -f -
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: deny-all
  namespace: default
spec:
  podSelector: {} # Apply to all pods in the namespace
  policyTypes:
    - Ingress
    - Egress
  ingress: [] # No ingress allowed by default
  egress: [] # No egress allowed by default
EOF

  # 3. Enable Pod Security Policies (if supported by your Kubernetes version)
  # ... (Add PSP definitions and enforcement configurations here) ...

  # 4. Limit Resource Requests and Limits for Pods
  # ... (Modify deployments/deployments.yaml to include resource limits) ...

  # 5. Enable Audit Logging
  # ... (Configure audit logging using the Kubernetes API) ...

  # 6. Regularly update Kubernetes components
  kubectl apply -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/kubernetes-dashboard/dashboard-rbac.yaml
  kubectl apply -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/kubernetes-dashboard/dashboard.yaml

  # 7. Enable TLS encryption for API server communication
  # ... (Modify kube-apiserver configuration) ...

  # 8. Secure the Kubernetes API server
  # ... (Restrict access to the API server using firewall rules and authentication methods) ...

  # 9. Regularly review and update security best practices
}


prep_vm
kubeadm_init
set_kubectl_config
get_celium_cli
install_celium
get_join_command
# add_calico_cni
# harden_security

pod.yaml

apiVersion: v1
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: docker.io/library/nginx:latest
    ports:
    - containerPort: 80

create pod

kubectl apply -f pod.yaml

get pods

kubectl get pods

Rotate Certificates

curl https://localhost:6443 -k -v 2>&1 | grep expire.
kubeadm certs renew all
curl https://localhost:6443 -k -v 2>&1 | grep expire.

Some Examples of Commands

Join command

kubeadm join 10.1.9.209:6443 --token qmcl0q.m2r... --discovery-token-ca-cert-hash sha256:d1fbc1851c161a617c986...

Install Docker

sudo apt install docker.io

Helm Charts

Rook Ceph

# https://rook.io/docs/rook/v1.16/Getting-Started/intro/

helm repo add rook-release https://charts.rook.io/release
helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph
helm install --create-namespace --namespace rook-ceph-cluster --set 'cephFileSystems[0].storageClass.enabled=false' --set 'cephObjectStores[0].storageClass.enabled=false' --set 'cephClusterSpec.dashboard.enabled=false'
# ingress.dashboard


# helm repo add rook-release https://charts.rook.io/release
# helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph
# helm upgrade --install  --version 1.15.6 rook-ceph-cluster rook-release/rook-ceph-cluster -f rook-ceph-cluster.values.yaml

Issues

Docker Hub Rate Limit

Just wait

 Warning  Failed     2s (x3 over 48s)   kubelet            Failed to pull image "docker.io/calico/node-driver-registrar:v3.29.1": failed to pull and unpack image "docker.io/calico/node-driver-registrar:v3.29.1": failed to copy: httpReadSeeker: failed open: unexpected status code https://registry-1.docker.io/v2/calico/node-driver-registrar/manifests/sha256:c15fcc64ea239e0f5a0df9ca01a632c7fbcab93c77c228b5b517272ca7fef01d: 429 Too Many Requests - Server message: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit