Difference between revisions of "Kubeadm init ubuntu 24.04"
Jump to navigation
Jump to search
lxc-vm-launch.sh
| (20 intermediate revisions by the same user not shown) | |||
| Line 2: | Line 2: | ||
- https://medium.com/@subhampradhan966/kubeadm-setup-for-ubuntu-24-04-lts-f6a5fc67f0df | - https://medium.com/@subhampradhan966/kubeadm-setup-for-ubuntu-24-04-lts-f6a5fc67f0df | ||
| + | - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ | ||
| + | - https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/ | ||
## Scripts | ## Scripts | ||
| − | ### | + | ### lxc-vm-launch.sh <lxd container name> |
``` | ``` | ||
#!/bin/bash | #!/bin/bash | ||
| Line 13: | Line 15: | ||
lxc config device override $name root size=30GB | lxc config device override $name root size=30GB | ||
lxc start $name | lxc start $name | ||
| + | ``` | ||
| + | |||
| + | ### lxc-vm-launch expanded | ||
| + | ``` | ||
| + | #!/bin/bash | ||
| + | set -eu | ||
| + | if [ $# -eq 1 ]; then | ||
| + | lxc cluster list | ||
| + | zpool list | ||
| + | fi | ||
| + | |||
| + | if [ $# -ne 7 ]; then | ||
| + | echo "Usage: $0 <name> <disk_size> <cpu> <memory> <add zvol for rook-ceph true/false> <zvol size in G> <remote node target>" | ||
| + | echo "Example: $0 test1 30GB 8 16GB true 100G lxd1.example.com" | ||
| + | exit 1 | ||
| + | fi | ||
| + | name=$1 | ||
| + | disk_size=$2 | ||
| + | cpu=$3 | ||
| + | memory=$4 | ||
| + | rook_ceph_zvol=$5 | ||
| + | rook_ceph_zvol_size=$6 | ||
| + | target=$7 | ||
| + | # lxc cluster list | ||
| + | lxc init ubuntu:24.04 --vm $name -c limits.cpu=$cpu -c limits.memory=$memory --target $target | ||
| + | lxc config device override $name root size=$disk_size | ||
| + | if [[ "$rook_ceph_zvol" == "true" ]]; then | ||
| + | sudo zfs create -s -V $rook_ceph_zvol_size tank/$name-rookceph | ||
| + | lxc config device add $name data disk source=/dev/zvol/tank/$name-rookceph | ||
| + | fi | ||
| + | lxc start $name | ||
| + | |||
| + | |||
| + | # lxc delete kub2 | ||
| + | # sudo zfs destroy tank/kub2-rookcep | ||
``` | ``` | ||
| Line 27: | Line 64: | ||
k8s_subnet="10.244.0.0/16" | k8s_subnet="10.244.0.0/16" | ||
| + | |||
prep_vm(){ | prep_vm(){ | ||
| + | |||
sudo apt update && sudo apt upgrade -y | sudo apt update && sudo apt upgrade -y | ||
sudo apt install apt-transport-https curl -y | sudo apt install apt-transport-https curl -y | ||
| Line 49: | Line 88: | ||
sudo modprobe overlay | sudo modprobe overlay | ||
sudo modprobe br_netfilter | sudo modprobe br_netfilter | ||
| + | echo -e "br_netfilter\noverlay" | sudo tee /etc/modules-load.d/kubernetes-cni.conf | ||
| + | lsmod | grep overlay | ||
| + | lsmod | grep br_netfilter | ||
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf | cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf | ||
| Line 58: | Line 100: | ||
sysctl --system | sysctl --system | ||
} | } | ||
| + | |||
kubeadm_init(){ | kubeadm_init(){ | ||
sudo kubeadm init --pod-network-cidr=${k8s_subnet} --v=5 | sudo kubeadm init --pod-network-cidr=${k8s_subnet} --v=5 | ||
} | } | ||
| + | |||
set_kubectl_config(){ | set_kubectl_config(){ | ||
| Line 73: | Line 117: | ||
kubectl get pods --all-namespaces | kubectl get pods --all-namespaces | ||
} | } | ||
| + | |||
add_calico_cni(){ | add_calico_cni(){ | ||
| Line 83: | Line 128: | ||
watch kubectl get pods -n calico-system | watch kubectl get pods -n calico-system | ||
} | } | ||
| + | |||
| + | |||
| + | get_celium_cli(){ | ||
| + | # https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/ | ||
| + | CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt) | ||
| + | CLI_ARCH=amd64 | ||
| + | if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi | ||
| + | curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} | ||
| + | sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum | ||
| + | sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin | ||
| + | rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum} | ||
| + | } | ||
| + | |||
| + | |||
| + | install_celium(){ | ||
| + | export KUBECONFIG=/etc/kubernetes/admin.conf | ||
| + | cilium install --version 1.16.5 | ||
| + | cilium status --wait | ||
| + | } | ||
| + | |||
get_join_command(){ | get_join_command(){ | ||
kubeadm token create --print-join-command | kubeadm token create --print-join-command | ||
} | } | ||
| + | |||
| + | get_controlplane_join_command(){ | ||
| + | kubeadm init phase upload-certs --upload-certs | ||
| + | kubeadm join <control node ip>:6443 --token wc2qp7.1b... --discovery-token-ca-cert-hash sha256:1b15... --control-plane --certificate-key=<key from upload-certs command above or maybe kubectl -n kube-system get secret kubeadm-certs -o yaml> | ||
| + | } | ||
| + | |||
| + | |||
| + | wip_harden_security(){ | ||
| + | # 1. Enable RBAC (Role-Based Access Control) | ||
| + | kubectl create clusterrolebinding default-worker --clusterrole=cluster-role.rbac.authorization.k8s.io/system:node --serviceaccount=default:default | ||
| + | |||
| + | # 2. Configure Network Policies (example: deny all by default) | ||
| + | cat <<EOF | kubectl apply -f - | ||
| + | apiVersion: networking.k8s.io/v1 | ||
| + | kind: NetworkPolicy | ||
| + | metadata: | ||
| + | name: deny-all | ||
| + | namespace: default | ||
| + | spec: | ||
| + | podSelector: {} # Apply to all pods in the namespace | ||
| + | policyTypes: | ||
| + | - Ingress | ||
| + | - Egress | ||
| + | ingress: [] # No ingress allowed by default | ||
| + | egress: [] # No egress allowed by default | ||
| + | EOF | ||
| + | |||
| + | # 3. Enable Pod Security Policies (if supported by your Kubernetes version) | ||
| + | # ... (Add PSP definitions and enforcement configurations here) ... | ||
| + | |||
| + | # 4. Limit Resource Requests and Limits for Pods | ||
| + | # ... (Modify deployments/deployments.yaml to include resource limits) ... | ||
| + | |||
| + | # 5. Enable Audit Logging | ||
| + | # ... (Configure audit logging using the Kubernetes API) ... | ||
| + | |||
| + | # 6. Regularly update Kubernetes components | ||
| + | kubectl apply -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/kubernetes-dashboard/dashboard-rbac.yaml | ||
| + | kubectl apply -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/kubernetes-dashboard/dashboard.yaml | ||
| + | |||
| + | # 7. Enable TLS encryption for API server communication | ||
| + | # ... (Modify kube-apiserver configuration) ... | ||
| + | |||
| + | # 8. Secure the Kubernetes API server | ||
| + | # ... (Restrict access to the API server using firewall rules and authentication methods) ... | ||
| + | |||
| + | # 9. Regularly review and update security best practices | ||
| + | } | ||
| + | |||
prep_vm | prep_vm | ||
kubeadm_init | kubeadm_init | ||
set_kubectl_config | set_kubectl_config | ||
| − | add_calico_cni | + | get_celium_cli |
| + | install_celium | ||
| + | get_join_command | ||
| + | # add_calico_cni | ||
| + | # harden_security | ||
| + | ``` | ||
| + | |||
| + | ### k8s_node_tests.sh | ||
``` | ``` | ||
| + | #!/bin/bash | ||
| + | set -eu | ||
| + | tests(){ | ||
| + | swapon --show | ||
| + | grep "noverlay" /etc/modules-load.d/kubernetes-cni.conf | ||
| + | grep "br_netfilter" /etc/modules-load.d/kubernetes-cni.conf | ||
| + | lsmod | grep overlay | ||
| + | lsmod | grep br_netfilter | ||
| + | if [[ $(sysctl -n net.bridge.bridge-nf-call-iptables) -eq 1 && \ | ||
| + | $(sysctl -n net.bridge.bridge-nf-call-ip6tables) -eq 1 && \ | ||
| + | $(sysctl -n net.ipv4.ip_forward) -eq 1 ]]; then | ||
| + | echo "1" | ||
| + | echo "1" | ||
| + | echo "1" | ||
| + | else | ||
| + | echo "One or more sysctl parameters are not set to 1" | ||
| + | fi | ||
| + | } | ||
| − | ### Some Examples of Commands | + | ``` |
| + | |||
| + | ### pod.yaml | ||
| + | ``` | ||
| + | apiVersion: v1 | ||
| + | kind: Pod | ||
| + | metadata: | ||
| + | name: nginx | ||
| + | spec: | ||
| + | containers: | ||
| + | - name: nginx | ||
| + | image: docker.io/library/nginx:latest | ||
| + | ports: | ||
| + | - containerPort: 80 | ||
| + | ``` | ||
| + | |||
| + | ### create pod | ||
| + | ``` | ||
| + | kubectl apply -f pod.yaml | ||
| + | ``` | ||
| + | |||
| + | ### get pods | ||
| + | ``` | ||
| + | kubectl get pods | ||
| + | ``` | ||
| + | |||
| + | ### Rotate Certificates | ||
| + | ``` | ||
| + | curl https://localhost:6443 -k -v 2>&1 | grep expire. | ||
| + | kubeadm certs renew all | ||
| + | curl https://localhost:6443 -k -v 2>&1 | grep expire. | ||
| + | ``` | ||
| + | |||
| + | |||
| + | ## Some Examples of Commands | ||
| + | |||
| + | ### Join command | ||
``` | ``` | ||
kubeadm join 10.1.9.209:6443 --token qmcl0q.m2r... --discovery-token-ca-cert-hash sha256:d1fbc1851c161a617c986... | kubeadm join 10.1.9.209:6443 --token qmcl0q.m2r... --discovery-token-ca-cert-hash sha256:d1fbc1851c161a617c986... | ||
``` | ``` | ||
| + | |||
| + | ### Upload certificates | ||
| + | ``` | ||
| + | kubeadm init phase upload-certs --upload-certs | ||
| + | ``` | ||
| + | |||
| + | ### Install Docker | ||
| + | |||
| + | ``` | ||
| + | sudo apt install docker.io | ||
| + | ``` | ||
| + | |||
| + | # Join more control planes | ||
| + | ``` | ||
| + | kubectl -n kube-system edit cm kubeadm-config | ||
| + | |||
| + | add line controlPlaneEndpoint: "10.7.1.50:6443" | ||
| + | |||
| + | apiVersion: v1 | ||
| + | data: | ||
| + | ClusterConfiguration: | | ||
| + | apiServer: | ||
| + | timeoutForControlPlane: 4m0s | ||
| + | controlPlaneEndpoint: "10.7.1.50:6443" | ||
| + | |||
| + | |||
| + | |||
| + | kubeadm join 10.7.1.50:6443 --token m1v... --discovery-token-ca-cert-hash sha256:1b15c1ad... --control-plane --certificate-key=$(kubeadm certs certificate-key) | ||
| + | ``` | ||
| + | |||
| + | # Helm Charts | ||
| + | |||
| + | ## Rook Ceph | ||
| + | ``` | ||
| + | # https://rook.io/docs/rook/v1.16/Getting-Started/intro/ | ||
| + | |||
| + | helm repo add rook-release https://charts.rook.io/release | ||
| + | helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph | ||
| + | helm install --create-namespace --namespace rook-ceph-cluster --set 'cephFileSystems[0].storageClass.enabled=false' --set 'cephObjectStores[0].storageClass.enabled=false' --set 'cephClusterSpec.dashboard.enabled=false' | ||
| + | # ingress.dashboard | ||
| + | |||
| + | |||
| + | # helm repo add rook-release https://charts.rook.io/release | ||
| + | # helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph | ||
| + | # helm upgrade --install --version 1.15.6 rook-ceph-cluster rook-release/rook-ceph-cluster -f rook-ceph-cluster.values.yaml | ||
| + | ``` | ||
| + | |||
| + | # HA Multiple Control Planes | ||
| + | - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#initializing-your-control-plane-node | ||
| + | ``` | ||
| + | kubeadm init --control-plane-endpoint <load-balancer-ip>:6443 ... | ||
| + | ``` | ||
| + | |||
| + | # Upgrade | ||
| + | - https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/ | ||
Revision as of 03:46, 26 December 2024
Kubernetes multi-node cluster using kubeadm based on Ubuntu 24.04
- https://medium.com/@subhampradhan966/kubeadm-setup-for-ubuntu-24-04-lts-f6a5fc67f0df
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/
- https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/
Scripts
lxc-vm-launch.sh
#!/bin/bash set -eu export name=$1 lxc init ubuntu:24.04 --vm -c limits.cpu=4 -c limits.memory=16GB $name lxc config device override $name root size=30GB lxc start $name
lxc-vm-launch expanded
#!/bin/bash set -eu if [ $# -eq 1 ]; then lxc cluster list zpool list fi if [ $# -ne 7 ]; then echo "Usage: $0 <name> <disk_size> <cpu> <memory> <add zvol for rook-ceph true/false> <zvol size in G> <remote node target>" echo "Example: $0 test1 30GB 8 16GB true 100G lxd1.example.com" exit 1 fi name=$1 disk_size=$2 cpu=$3 memory=$4 rook_ceph_zvol=$5 rook_ceph_zvol_size=$6 target=$7 # lxc cluster list lxc init ubuntu:24.04 --vm $name -c limits.cpu=$cpu -c limits.memory=$memory --target $target lxc config device override $name root size=$disk_size if [[ "$rook_ceph_zvol" == "true" ]]; then sudo zfs create -s -V $rook_ceph_zvol_size tank/$name-rookceph lxc config device add $name data disk source=/dev/zvol/tank/$name-rookceph fi lxc start $name # lxc delete kub2 # sudo zfs destroy tank/kub2-rookcep
Create two containers
./lxd-vm-launch.sh myk8s1 ./lxd-vm-launch.sh myk8s2
kubeadm-bootstrap.sh
#!/bin/bash
set -eu
k8s_subnet="10.244.0.0/16"
prep_vm(){
sudo apt update && sudo apt upgrade -y
sudo apt install apt-transport-https curl -y
sudo apt install containerd -y
sudo mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml > /dev/null
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
sudo systemctl restart containerd
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt update
sudo apt install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
sudo swapoff -a
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
sudo modprobe overlay
sudo modprobe br_netfilter
echo -e "br_netfilter\noverlay" | sudo tee /etc/modules-load.d/kubernetes-cni.conf
lsmod | grep overlay
lsmod | grep br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
}
kubeadm_init(){
sudo kubeadm init --pod-network-cidr=${k8s_subnet} --v=5
}
set_kubectl_config(){
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# alternative # export KUBECONFIG=/etc/kubernetes/admin.conf
kubectl get nodes
kubectl get pods --all-namespaces
}
add_calico_cni(){
# kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
# https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/tigera-operator.yaml
curl https://raw.githubusercontent.com/projectcalico/calico/v3.29.1/manifests/custom-resources.yaml -O
# kubectl create -f custom-resources.yaml
kubectl create --set spec.calicoNetwork.ipPools[0].cidr=${k8s_subnet} -f custom-resources.yaml
watch kubectl get pods -n calico-system
}
get_celium_cli(){
# https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
}
install_celium(){
export KUBECONFIG=/etc/kubernetes/admin.conf
cilium install --version 1.16.5
cilium status --wait
}
get_join_command(){
kubeadm token create --print-join-command
}
get_controlplane_join_command(){
kubeadm init phase upload-certs --upload-certs
kubeadm join <control node ip>:6443 --token wc2qp7.1b... --discovery-token-ca-cert-hash sha256:1b15... --control-plane --certificate-key=<key from upload-certs command above or maybe kubectl -n kube-system get secret kubeadm-certs -o yaml>
}
wip_harden_security(){
# 1. Enable RBAC (Role-Based Access Control)
kubectl create clusterrolebinding default-worker --clusterrole=cluster-role.rbac.authorization.k8s.io/system:node --serviceaccount=default:default
# 2. Configure Network Policies (example: deny all by default)
cat <<EOF | kubectl apply -f -
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all
namespace: default
spec:
podSelector: {} # Apply to all pods in the namespace
policyTypes:
- Ingress
- Egress
ingress: [] # No ingress allowed by default
egress: [] # No egress allowed by default
EOF
# 3. Enable Pod Security Policies (if supported by your Kubernetes version)
# ... (Add PSP definitions and enforcement configurations here) ...
# 4. Limit Resource Requests and Limits for Pods
# ... (Modify deployments/deployments.yaml to include resource limits) ...
# 5. Enable Audit Logging
# ... (Configure audit logging using the Kubernetes API) ...
# 6. Regularly update Kubernetes components
kubectl apply -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/kubernetes-dashboard/dashboard-rbac.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/kubernetes-dashboard/dashboard.yaml
# 7. Enable TLS encryption for API server communication
# ... (Modify kube-apiserver configuration) ...
# 8. Secure the Kubernetes API server
# ... (Restrict access to the API server using firewall rules and authentication methods) ...
# 9. Regularly review and update security best practices
}
prep_vm
kubeadm_init
set_kubectl_config
get_celium_cli
install_celium
get_join_command
# add_calico_cni
# harden_security
k8s_node_tests.sh
#!/bin/bash
set -eu
tests(){
swapon --show
grep "noverlay" /etc/modules-load.d/kubernetes-cni.conf
grep "br_netfilter" /etc/modules-load.d/kubernetes-cni.conf
lsmod | grep overlay
lsmod | grep br_netfilter
if [[ $(sysctl -n net.bridge.bridge-nf-call-iptables) -eq 1 && \
$(sysctl -n net.bridge.bridge-nf-call-ip6tables) -eq 1 && \
$(sysctl -n net.ipv4.ip_forward) -eq 1 ]]; then
echo "1"
echo "1"
echo "1"
else
echo "One or more sysctl parameters are not set to 1"
fi
}
pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- name: nginx
image: docker.io/library/nginx:latest
ports:
- containerPort: 80
create pod
kubectl apply -f pod.yaml
get pods
kubectl get pods
Rotate Certificates
curl https://localhost:6443 -k -v 2>&1 | grep expire. kubeadm certs renew all curl https://localhost:6443 -k -v 2>&1 | grep expire.
Some Examples of Commands
Join command
kubeadm join 10.1.9.209:6443 --token qmcl0q.m2r... --discovery-token-ca-cert-hash sha256:d1fbc1851c161a617c986...
Upload certificates
kubeadm init phase upload-certs --upload-certs
Install Docker
sudo apt install docker.io
Join more control planes
kubectl -n kube-system edit cm kubeadm-config
add line controlPlaneEndpoint: "10.7.1.50:6443"
apiVersion: v1
data:
ClusterConfiguration: |
apiServer:
timeoutForControlPlane: 4m0s
controlPlaneEndpoint: "10.7.1.50:6443"
kubeadm join 10.7.1.50:6443 --token m1v... --discovery-token-ca-cert-hash sha256:1b15c1ad... --control-plane --certificate-key=$(kubeadm certs certificate-key)
Helm Charts
Rook Ceph
# https://rook.io/docs/rook/v1.16/Getting-Started/intro/ helm repo add rook-release https://charts.rook.io/release helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph helm install --create-namespace --namespace rook-ceph-cluster --set 'cephFileSystems[0].storageClass.enabled=false' --set 'cephObjectStores[0].storageClass.enabled=false' --set 'cephClusterSpec.dashboard.enabled=false' # ingress.dashboard # helm repo add rook-release https://charts.rook.io/release # helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph # helm upgrade --install --version 1.15.6 rook-ceph-cluster rook-release/rook-ceph-cluster -f rook-ceph-cluster.values.yaml
HA Multiple Control Planes
kubeadm init --control-plane-endpoint <load-balancer-ip>:6443 ...
Upgrade
Issues
Docker Hub Rate Limit
Just wait
Warning Failed 2s (x3 over 48s) kubelet Failed to pull image "docker.io/calico/node-driver-registrar:v3.29.1": failed to pull and unpack image "docker.io/calico/node-driver-registrar:v3.29.1": failed to copy: httpReadSeeker: failed open: unexpected status code https://registry-1.docker.io/v2/calico/node-driver-registrar/manifests/sha256:c15fcc64ea239e0f5a0df9ca01a632c7fbcab93c77c228b5b517272ca7fef01d: 429 Too Many Requests - Server message: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit