Difference between revisions of "Cka command snippets"
		
		
		
		
		
		Jump to navigation
		Jump to search
		
				
		
		
	
| Line 1: | Line 1: | ||
| − | Little command snippets  | + | # Little command snippets  | 
| + | |||
| + | ## DS  | ||
```  | ```  | ||
| Line 115: | Line 117: | ||
             level: restricted  |              level: restricted  | ||
         topologyKey: kubernetes.io/hostname  |          topologyKey: kubernetes.io/hostname  | ||
| + | ```  | ||
| + | |||
| + | ## Configmap  | ||
| + | |||
| + | ```  | ||
| + | apiVersion: v1  | ||
| + | kind: Pod  | ||
| + | metadata:  | ||
| + |   name: nginx  | ||
| + | spec:  | ||
| + |   containers:  | ||
| + |   - name: nginx  | ||
| + |     image: nginx:alpine  | ||
| + |     ports:  | ||
| + |     - containerPort: 80  | ||
| + |     env:  | ||
| + |     - name: TREE1  | ||
| + |       valueFrom:  | ||
| + |         configMapKeyRef:  | ||
| + |           name: trauerweide  | ||
| + |           key: tree  | ||
| + |     envFrom:  | ||
| + |     - configMapRef:  | ||
| + |         name: trauerweide  | ||
| + |     volumeMounts:  | ||
| + |     - name: config-volume  | ||
| + |       mountPath: /etc/birke/  | ||
| + |   volumes:  | ||
| + |   - name: config-volume  | ||
| + |     configMap:  | ||
| + |       name: trauerweide  | ||
| + | |||
| + | |||
| + | |||
| + | apiVersion: v1  | ||
| + | kind: Pod  | ||
| + | metadata:  | ||
| + |   name: pod1  | ||
| + | spec:  | ||
| + |   volumes:  | ||
| + |   - name: birke  | ||
| + |     configMap:  | ||
| + |       name: birke  | ||
| + |   containers:  | ||
| + |   - image: nginx:alpine  | ||
| + |     name: pod1  | ||
| + |     volumeMounts:  | ||
| + |       - name: birke  | ||
| + |         mountPath: /etc/birke  | ||
| + |     env:  | ||
| + |       - name: TREE1  | ||
| + |         valueFrom:  | ||
| + |           configMapKeyRef:  | ||
| + |             name: trauerweide  | ||
| + |             key: tree  | ||
| + | ```  | ||
| + | |||
| + | ## RBAC  | ||
| + | |||
| + | ```  | ||
| + | # create SAs  | ||
| + | k -n ns1 create sa pipeline  | ||
| + | k -n ns2 create sa pipeline  | ||
| + | |||
| + | # use ClusterRole view  | ||
| + | k get clusterrole view # there is default one  | ||
| + | k create clusterrolebinding pipeline-view --clusterrole view --serviceaccount ns1:pipeline --serviceaccount ns2:pipeline  | ||
| + | |||
| + | # manage Deployments in both Namespaces  | ||
| + | k create clusterrole -h # examples  | ||
| + | k create clusterrole pipeline-deployment-manager --verb create,delete --resource deployments  | ||
| + | # instead of one ClusterRole we could also create the same Role in both Namespaces  | ||
| + | |||
| + | k -n ns1 create rolebinding pipeline-deployment-manager --clusterrole pipeline-deployment-manager --serviceaccount ns1:pipeline  | ||
| + | k -n ns2 create rolebinding pipeline-deployment-manager --clusterrole pipeline-deployment-manager --serviceaccount ns2:pipeline  | ||
| + | |||
| + | |||
| + | k -n applications create role smoke --verb create,delete --resource pods,deployments,sts  | ||
| + | k -n applications create rolebinding smoke --role smoke --user smoke  | ||
| + | |||
| + | 2) view permission in all Namespaces but not kube-system  | ||
| + | |||
| + | |||
| + | As of now it’s not possible to create deny-RBAC in K8s  | ||
| + | |||
| + | So we allow for all other Namespaces  | ||
| + | |||
| + | |||
| + | k get ns # get all namespaces  | ||
| + | k -n applications create rolebinding smoke-view --clusterrole view --user smoke  | ||
| + | k -n default create rolebinding smoke-view --clusterrole view --user smoke  | ||
| + | k -n kube-node-lease create rolebinding smoke-view --clusterrole view --user smoke  | ||
| + | k -n kube-public create rolebinding smoke-view --clusterrole view --user smoke  | ||
```  | ```  | ||
Revision as of 22:32, 12 January 2025
Little command snippets
DS
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: configurator
  namespace: configurator
spec:
  selector:
    matchLabels:
      name: configurator
  template:
    metadata:
      labels:
        name: configurator
    spec:
      containers:
      - name: configurator
        image: bash
        command: ['bash', '-c', 'echo aba997ac-1c89-4d64 | tee /configurator/config && sleep 1d']
        volumeMounts:
        - name: my-mount 
          mountPath: /configurator
      volumes:
      - name: my-mount
        hostPath:
          path: /configurator
kubectl expose deploy/asia --port=80
 kubectl create ingress world --class=nginx \
  --annotation nginx.ingress.kubernetes.io/rewrite-target=/ \
  --rule="world.universe.mine/europe*=europe:80" \
  --rule="world.universe.mine/asia*=asia:80"
alias k=kubectl
source /etc/bash_completion
source <(kubectl completion bash)
complete -F __start_kubectl k
controlplane $ k get pod
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: np
  namespace: space2
spec:
  podSelector: {}
  policyTypes:
  - Ingress
  ingress:
   - from:
     - namespaceSelector:
        matchLabels:
         kubernetes.io/metadata.name: space1
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: np
  namespace: space1
spec:
  podSelector: {}
  policyTypes:
  - Egress
  egress:
  - to:
     - namespaceSelector:
        matchLabels:
         kubernetes.io/metadata.name: space2
  - ports:
    - port: 53
      protocol: TCP
    - port: 53
      protocol: UDP
podantiaffinity
apiVersion: v1
kind: Pod
metadata:
  labels:
    level: test
  name: test
spec:
  containers:
  - image: nginx:alpine
    name: test
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - key: level
            operator: In
            values:
            - restricted
        topologyKey: kubernetes.io/hostname
or
...
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchLabels:
            level: restricted
        topologyKey: kubernetes.io/hostname
Configmap
apiVersion: v1
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx:alpine
    ports:
    - containerPort: 80
    env:
    - name: TREE1
      valueFrom:
        configMapKeyRef:
          name: trauerweide
          key: tree
    envFrom:
    - configMapRef:
        name: trauerweide
    volumeMounts:
    - name: config-volume
      mountPath: /etc/birke/
  volumes:
  - name: config-volume
    configMap:
      name: trauerweide
apiVersion: v1
kind: Pod
metadata:
  name: pod1
spec:
  volumes:
  - name: birke
    configMap:
      name: birke
  containers:
  - image: nginx:alpine
    name: pod1
    volumeMounts:
      - name: birke
        mountPath: /etc/birke
    env:
      - name: TREE1
        valueFrom:
          configMapKeyRef:
            name: trauerweide
            key: tree
RBAC
# create SAs k -n ns1 create sa pipeline k -n ns2 create sa pipeline # use ClusterRole view k get clusterrole view # there is default one k create clusterrolebinding pipeline-view --clusterrole view --serviceaccount ns1:pipeline --serviceaccount ns2:pipeline # manage Deployments in both Namespaces k create clusterrole -h # examples k create clusterrole pipeline-deployment-manager --verb create,delete --resource deployments # instead of one ClusterRole we could also create the same Role in both Namespaces k -n ns1 create rolebinding pipeline-deployment-manager --clusterrole pipeline-deployment-manager --serviceaccount ns1:pipeline k -n ns2 create rolebinding pipeline-deployment-manager --clusterrole pipeline-deployment-manager --serviceaccount ns2:pipeline k -n applications create role smoke --verb create,delete --resource pods,deployments,sts k -n applications create rolebinding smoke --role smoke --user smoke 2) view permission in all Namespaces but not kube-system As of now it’s not possible to create deny-RBAC in K8s So we allow for all other Namespaces k get ns # get all namespaces k -n applications create rolebinding smoke-view --clusterrole view --user smoke k -n default create rolebinding smoke-view --clusterrole view --user smoke k -n kube-node-lease create rolebinding smoke-view --clusterrole view --user smoke k -n kube-public create rolebinding smoke-view --clusterrole view --user smoke