Difference between revisions of "Mv pvc k8s"

From UVOO Tech Wiki
Jump to navigation Jump to search
Line 94: Line 94:
 
# kubectl get pv # Find the PV associated with the old PVC
 
# kubectl get pv # Find the PV associated with the old PVC
 
# kubectl delete pv <pv-name>
 
# kubectl delete pv <pv-name>
 +
```
 +
 +
# PVC release
 +
```
 +
#!/bin/bash
 +
set -eux
 +
 +
if [ "$#" -ne 3 ]; then
 +
    echo "Usage: $0 <old-storage-class> <new-storage-class> <namespace|'all'>"
 +
    exit 1
 +
fi
 +
 +
OLD_SC=$1
 +
NEW_SC=$2
 +
NS_FILTER=$3
 +
 +
if [ "$NS_FILTER" == "all" ]; then
 +
    PVC_LIST=$(kubectl get pvc -A -o jsonpath="{range .items[?(@.spec.storageClassName=='$OLD_SC')]}{.metadata.namespace} {.metadata.name}{'\n'}{end}")
 +
else
 +
    PVC_LIST=$(kubectl get pvc -n "$NS_FILTER" -o jsonpath="{range .items[?(@.spec.storageClassName=='$OLD_SC')]}{.metadata.namespace} {.metadata.name}{'\n'}{end}")
 +
fi
 +
 +
echo "$PVC_LIST" | while read -r NS PVC; do
 +
    PV=$(kubectl get pvc "$PVC" -n "$NS" -o jsonpath="{.spec.volumeName}")
 +
    STORAGE_SIZE=$(kubectl get pvc "$PVC" -n "$NS" -o jsonpath="{.spec.resources.requests.storage}")
 +
 +
    kubectl patch pv "$PV" --type='json' -p='[{"op": "replace", "path": "/spec/persistentVolumeReclaimPolicy", "value": "Retain"}]'
 +
 +
    kubectl patch pvc "$PVC" --type=merge -p '{"metadata":{"finalizers": []}}' || true
 +
    kubectl delete pvc "$PVC" -n "$NS" --wait=true
 +
 +
    kubectl patch pv "$PV" --type='json' -p="[{'op': 'replace', 'path': '/spec/storageClassName', 'value':'$NEW_SC'}]"
 +
    kubectl patch pv "$PV" --type=merge -p '{"spec":{"claimRef": null}}'
 +
 +
    kubectl apply -f - <<EOF
 +
apiVersion: v1
 +
kind: PersistentVolumeClaim
 +
metadata:
 +
  name: $PVC
 +
  namespace: $NS
 +
spec:
 +
  storageClassName: $NEW_SC
 +
  accessModes:
 +
    - ReadWriteOnce
 +
  resources:
 +
    requests:
 +
      storage: $STORAGE_SIZE
 +
  volumeName: $PV
 +
EOF
 +
 +
    # Restore original reclaim policy (optional)
 +
    kubectl patch pv "$PV" --type='json' -p='[{"op": "replace", "path": "/spec/persistentVolumeReclaimPolicy", "value": "Delete"}]'
 +
done
 
```
 
```
  

Revision as of 22:30, 25 February 2025

Script

mv-pvc.sh

#!/bin/bash
set -eux

# Function to display usage instructions
usage() {
  echo "Usage: $0 <old_pvc_name> <new_pvc_name> <new_pvc_size_Gi> <new_storage_class> <namespace>"
  exit 1
}

# Check for correct number of arguments
if [ "$#" -ne 5 ]; then
  usage
fi

# Assign arguments to variables
OLD_PVC_NAME="$1"
NEW_PVC_NAME="$2"
NEW_PVC_SIZE_GI="$3"
NEW_STORAGE_CLASS="$4"
NAMESPACE="$5"

# Validate PVC size
if [[ ! "$NEW_PVC_SIZE_GI" =~ ^[0-9]+$ ]]; then
  echo "Error: New PVC size must be a positive integer (Gi)"
  usage
fi


DATA_COPY_POD_NAME="data-copy-pod"

# 1. Create the new PVC
kubectl create -f - <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: $NEW_PVC_NAME
  namespace: $NAMESPACE
spec:
  accessModes: [ReadWriteOnce] # Or ReadWriteMany if needed
  resources:
    requests:
      storage: ${NEW_PVC_SIZE_GI}Gi
  storageClassName: $NEW_STORAGE_CLASS
EOF

# 2. Create the data copy pod
kubectl create -f - <<EOF
apiVersion: v1
kind: Pod
metadata:
  name: $DATA_COPY_POD_NAME
  namespace: $NAMESPACE
spec:
  restartPolicy: Never # Important: Only run once
  containers:
  - name: data-copy
    image: alpine:latest
    command: ["sh", "-c", "apk add --no-cache rsync && rsync -av /mnt/old/ /mnt/new/"]
    volumeMounts:
    - name: old-volume
      mountPath: /mnt/old
    - name: new-volume
      mountPath: /mnt/new
  volumes:
  - name: old-volume
    persistentVolumeClaim:
      claimName: $OLD_PVC_NAME
  - name: new-volume
    persistentVolumeClaim:
      claimName: $NEW_PVC_NAME
EOF

# 3. Wait for the data copy to finish (adjust timeout as needed)
# kubectl wait --for=condition=Succeeded --timeout=5m pod/$DATA_COPY_POD_NAME -n $NAMESPACE
kubectl wait --for=condition=Completed --timeout=5m pod/$DATA_COPY_POD_NAME -n $NAMESPACE

# 4. Check the status of the copy pod
kubectl logs $DATA_COPY_POD_NAME -n $NAMESPACE
kubectl get pod $DATA_COPY_POD_NAME -n $NAMESPACE

# 5. (Optional) Update your application deployment to use the new PVC

# 6. (After verifying everything is working) Delete the data copy pod
kubectl delete pod $DATA_COPY_POD_NAME -n $NAMESPACE

# 7. Delete the old PVC (after you're *absolutely sure* you don't need it)
# kubectl delete pvc $OLD_PVC_NAME -n $NAMESPACE

# 8. Delete the old PV (if reclaimPolicy was Retain)
# kubectl get pv # Find the PV associated with the old PVC
# kubectl delete pv <pv-name>

PVC release

#!/bin/bash
set -eux

if [ "$#" -ne 3 ]; then
    echo "Usage: $0 <old-storage-class> <new-storage-class> <namespace|'all'>"
    exit 1
fi

OLD_SC=$1
NEW_SC=$2
NS_FILTER=$3

if [ "$NS_FILTER" == "all" ]; then
    PVC_LIST=$(kubectl get pvc -A -o jsonpath="{range .items[?(@.spec.storageClassName=='$OLD_SC')]}{.metadata.namespace} {.metadata.name}{'\n'}{end}")
else
    PVC_LIST=$(kubectl get pvc -n "$NS_FILTER" -o jsonpath="{range .items[?(@.spec.storageClassName=='$OLD_SC')]}{.metadata.namespace} {.metadata.name}{'\n'}{end}")
fi

echo "$PVC_LIST" | while read -r NS PVC; do
    PV=$(kubectl get pvc "$PVC" -n "$NS" -o jsonpath="{.spec.volumeName}")
    STORAGE_SIZE=$(kubectl get pvc "$PVC" -n "$NS" -o jsonpath="{.spec.resources.requests.storage}")

    kubectl patch pv "$PV" --type='json' -p='[{"op": "replace", "path": "/spec/persistentVolumeReclaimPolicy", "value": "Retain"}]'

    kubectl patch pvc "$PVC" --type=merge -p '{"metadata":{"finalizers": []}}' || true
    kubectl delete pvc "$PVC" -n "$NS" --wait=true

    kubectl patch pv "$PV" --type='json' -p="[{'op': 'replace', 'path': '/spec/storageClassName', 'value':'$NEW_SC'}]"
    kubectl patch pv "$PV" --type=merge -p '{"spec":{"claimRef": null}}'

    kubectl apply -f - <<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: $PVC
  namespace: $NS
spec:
  storageClassName: $NEW_SC
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: $STORAGE_SIZE
  volumeName: $PV
EOF

    # Restore original reclaim policy (optional)
    kubectl patch pv "$PV" --type='json' -p='[{"op": "replace", "path": "/spec/persistentVolumeReclaimPolicy", "value": "Delete"}]'
done

WIP

#!/bin/bash

# Script to take a snapshot of an AKS disk and create a new disk from it in a different zone, then rename it back to the original name.

# Usage: ./snapshot_and_clone_rename.sh <resource_group> <disk_name> <source_zone> <target_zone>

# Check for required arguments
if [ $# -ne 4 ]; then
  echo "Usage: $0 <resource_group> <disk_name> <source_zone> <target_zone>"
  exit 1
fi

RESOURCE_GROUP="$1"
DISK_NAME="$2"
SOURCE_ZONE="$3"
TARGET_ZONE="$4"
SNAPSHOT_NAME="${DISK_NAME}-snapshot-$(date +%Y%m%d%H%M%S)"
NEW_DISK_NAME="${DISK_NAME}-temp-cloned-${TARGET_ZONE}" # Temporary name for the cloned disk

# Check if Azure CLI is installed
if ! command -v az &> /dev/null; then
  echo "Azure CLI is not installed. Please install it."
  exit 1
fi

# Check if logged in
if ! az account show &> /dev/null; then
  echo "Please log in to Azure CLI using 'az login'."
  exit 1
fi

# Check if the disk exists
if ! az disk show --resource-group "$RESOURCE_GROUP" --name "$DISK_NAME" &> /dev/null; then
  echo "Disk '$DISK_NAME' not found in resource group '$RESOURCE_GROUP'."
  exit 1
fi

# Check if the source zone is valid
if [[ -z "$(az disk show --resource-group "$RESOURCE_GROUP" --name "$DISK_NAME" --query location -o tsv)" ]]; then
    echo "Disk '$DISK_NAME' does not appear to be zonal. Please ensure that the disk is in a specific zone."
    exit 1
fi

# Check if the target zone is valid
if [[ "$SOURCE_ZONE" == "$TARGET_ZONE" ]]; then
    echo "Source and target zones cannot be the same."
    exit 1
fi

# Get the disk ID
DISK_ID=$(az disk show --resource-group "$RESOURCE_GROUP" --name "$DISK_NAME" --query id -o tsv)

# Create the snapshot
echo "Creating snapshot '$SNAPSHOT_NAME'..."
az snapshot create \
  --resource-group "$RESOURCE_GROUP" \
  --name "$SNAPSHOT_NAME" \
  --source "$DISK_ID" \
  --location "$(az disk show --resource-group "$RESOURCE_GROUP" --name "$DISK_NAME" --query location -o tsv)" \
  --zone "$SOURCE_ZONE"

if [ $? -ne 0 ]; then
  echo "Failed to create snapshot."
  exit 1
fi

# Get the snapshot ID
SNAPSHOT_ID=$(az snapshot show --resource-group "$RESOURCE_GROUP" --name "$SNAPSHOT_NAME" --query id -o tsv)

# Create the new disk from the snapshot in the target zone with a temporary name
echo "Creating new disk '$NEW_DISK_NAME' in zone '$TARGET_ZONE'..."
az disk create \
  --resource-group "$RESOURCE_GROUP" \
  --name "$NEW_DISK_NAME" \
  --source "$SNAPSHOT_ID" \
  --location "$(az disk show --resource-group "$RESOURCE_GROUP" --name "$DISK_NAME" --query location -o tsv)" \
  --zone "$TARGET_ZONE"

if [ $? -ne 0 ]; then
  echo "Failed to create new disk."
  # Optionally delete the snapshot if disk creation fails
  az snapshot delete --resource-group "$RESOURCE_GROUP" --name "$SNAPSHOT_NAME"
  exit 1
fi

# Delete the original disk
echo "Deleting original disk '$DISK_NAME'..."
az disk delete --resource-group "$RESOURCE_GROUP" --name "$DISK_NAME" --yes

if [ $? -ne 0 ]; then
  echo "Failed to delete original disk."
  # Optionally delete the snapshot and new disk if original disk deletion fails
  az snapshot delete --resource-group "$RESOURCE_GROUP" --name "$SNAPSHOT_NAME"
  az disk delete --resource-group "$RESOURCE_GROUP" --name "$NEW_DISK_NAME" --yes
  exit 1
fi

# Rename the new disk to the original name
echo "Renaming new disk '$NEW_DISK_NAME' to '$DISK_NAME'..."
az disk update --resource-group "$RESOURCE_GROUP" --name "$NEW_DISK_NAME" --set name="$DISK_NAME"

if [ $? -ne 0 ]; then
  echo "Failed to rename new disk."
  # Optionally delete the snapshot and new disk if rename fails
  az snapshot delete --resource-group "$RESOURCE_GROUP" --name "$SNAPSHOT_NAME"
  az disk delete --resource-group "$RESOURCE_GROUP" --name "$DISK_NAME" --yes
  exit 1
fi

# Clean up the snapshot (optional)
echo "Deleting snapshot '$SNAPSHOT_NAME'..."
az snapshot delete --resource-group "$RESOURCE_GROUP" --name "$SNAPSHOT_NAME"

echo "Disk '$DISK_NAME' created successfully in zone '$TARGET_ZONE'."

exit 0

Key Changes:
 * Temporary Disk Name:
   * The cloned disk is now created with a temporary name (e.g., myAKSDisk-temp-cloned-westus2). This avoids a name collision with the original disk.
 * Original Disk Deletion:
   * The script now deletes the original disk after the cloned disk is successfully created.
   * Added --yes flag to avoid prompt.
 * Renaming the Cloned Disk:
   * The cloned disk is then renamed to the original disk's name.
   * Uses az disk update --set name to rename the disk.
 * Enhanced Error Handling:
   * Added error checking for the original disk deletion and rename operation.
   * Added cleanup routines in case of deletion or rename failures.
Important Considerations:
 * Data Loss Risk: Be extremely careful when using this script, as it involves deleting the original disk. Make sure you have backups or understand the implications before running it.
 * Downtime: This process will cause downtime for any services that rely on the disk.
 * Testing: Thoroughly test this script in a non-production environment before using it in production.
 * AKS Integration: If the disk is used by an AKS node pool, you will likely need to perform additional steps to update the node pool configuration after the disk is moved. Consider using node pool scaling to replace the nodes using the old disks with the new disks.