Io tests
Jump to navigation
Jump to search
apt install fio
#!/usr/bin/env bash set -o errexit echo $(date):Random read fio --filename=test --sync=1 --rw=randread --bs=4k --numjobs=1 \ --iodepth=4 --group_reporting --name=test --filesize=10G --runtime=300 && rm test echo $(date):Random write fio --filename=test --sync=1 --rw=randwrite --bs=4k --numjobs=1 \ --iodepth=4 --group_reporting --name=test --filesize=10G --runtime=300 && rm test echo $(date):Sequential read fio --filename=test --sync=1 --rw=read --bs=4k --numjobs=1 \ --iodepth=4 --group_reporting --name=test --filesize=10G --runtime=300 && rm test echo $(date):Sequential write fio --filename=test --sync=1 --rw=write --bs=4k --numjobs=1 \ --iodepth=4 --group_reporting --name=test --filesize=10G --runtime=300 && rm test
Output example
Sat Nov 25 16:25:47 UTC 2023:Random read test: (g=0): rw=randread, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=psync, iodepth=4 fio-3.28 Starting 1 process test: Laying out IO file (1 file / 10240MiB) Jobs: 1 (f=1): [r(1)][98.7%][r=130MiB/s][r=33.3k IOPS][eta 00m:02s] test: (groupid=0, jobs=1): err= 0: pid=1894: Sat Nov 25 16:29:04 2023 read: IOPS=17.7k, BW=69.2MiB/s (72.6MB/s)(10.0GiB/147882msec) clat (usec): min=3, max=9506, avg=55.42, stdev=23.89 lat (usec): min=3, max=9506, avg=55.49, stdev=23.91 clat percentiles (usec): | 1.00th=[ 6], 5.00th=[ 7], 10.00th=[ 45], 20.00th=[ 53], | 30.00th=[ 57], 40.00th=[ 58], 50.00th=[ 59], 60.00th=[ 62], | 70.00th=[ 64], 80.00th=[ 66], 90.00th=[ 68], 95.00th=[ 70], | 99.00th=[ 74], 99.50th=[ 75], 99.90th=[ 80], 99.95th=[ 83], | 99.99th=[ 1614] bw ( KiB/s): min=64224, max=175456, per=99.56%, avg=70592.32, stdev=9296.71, samples=295 iops : min=16056, max=43864, avg=17648.03, stdev=2324.16, samples=295 lat (usec) : 4=0.06%, 10=8.27%, 20=0.02%, 50=8.75%, 100=82.87% lat (usec) : 250=0.01%, 500=0.01%, 750=0.01% lat (msec) : 2=0.01%, 10=0.01% cpu : usr=3.31%, sys=96.66%, ctx=822, majf=14, minf=15 IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% issued rwts: total=2621440,0,0,0 short=0,0,0,0 dropped=0,0,0,0 latency : target=0, window=0, percentile=100.00%, depth=4 Run status group 0 (all jobs): READ: bw=69.2MiB/s (72.6MB/s), 69.2MiB/s-69.2MiB/s (72.6MB/s-72.6MB/s), io=10.0GiB (10.7GB), run=147882-147882msec
Random write 512b LBA
root@iotest:~# ./b.sh Sat Nov 25 16:31:58 UTC 2023:Random write test: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=psync, iodepth=4 fio-3.28 Starting 1 process test: Laying out IO file (1 file / 10240MiB) Jobs: 1 (f=1): [w(1)][100.0%][w=5081KiB/s][w=1270 IOPS][eta 00m:00s] test: (groupid=0, jobs=1): err= 0: pid=2656: Sat Nov 25 16:36:59 2023 write: IOPS=1392, BW=5569KiB/s (5703kB/s)(1632MiB/300053msec); 0 zone resets clat (usec): min=87, max=309487, avg=712.01, stdev=1711.10 lat (usec): min=87, max=309488, avg=712.62, stdev=1711.11 clat percentiles (usec): | 1.00th=[ 163], 5.00th=[ 412], 10.00th=[ 529], 20.00th=[ 603], | 30.00th=[ 644], 40.00th=[ 676], 50.00th=[ 701], 60.00th=[ 725], | 70.00th=[ 750], 80.00th=[ 791], 90.00th=[ 840], 95.00th=[ 889], | 99.00th=[ 1106], 99.50th=[ 1319], 99.90th=[ 2343], 99.95th=[ 2507], | 99.99th=[77071] bw ( KiB/s): min= 2080, max=25368, per=100.00%, avg=5577.50, stdev=1138.26, samples=599 iops : min= 520, max= 6342, avg=1394.34, stdev=284.56, samples=599 lat (usec) : 100=0.02%, 250=1.59%, 500=7.01%, 750=60.48%, 1000=29.51% lat (msec) : 2=1.22%, 4=0.14%, 10=0.01%, 20=0.01%, 50=0.01% lat (msec) : 100=0.01%, 250=0.01%, 500=0.01% cpu : usr=1.54%, sys=27.88%, ctx=840390, majf=0, minf=1148 IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% issued rwts: total=0,417781,0,0 short=0,0,0,0 dropped=0,0,0,0 latency : target=0, window=0, percentile=100.00%, depth=4 Run status group 0 (all jobs): WRITE: bw=5569KiB/s (5703kB/s), 5569KiB/s-5569KiB/s (5703kB/s-5703kB/s), io=1632MiB (1711MB), run=300053-300053msec
Random Write 4k LBA
Sun Nov 26 14:59:25 UTC 2023:Random write test: (g=0): rw=randwrite, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=psync, iodepth=4 fio-3.28 Starting 1 process test: Laying out IO file (1 file / 10240MiB) Jobs: 1 (f=1): [w(1)][100.0%][w=5396KiB/s][w=1349 IOPS][eta 00m:00s] test: (groupid=0, jobs=1): err= 0: pid=563: Sun Nov 26 15:04:26 2023 write: IOPS=1548, BW=6195KiB/s (6343kB/s)(1815MiB/300001msec); 0 zone resets clat (usec): min=97, max=357060, avg=640.23, stdev=1565.86 lat (usec): min=97, max=357061, avg=640.75, stdev=1565.88 clat percentiles (usec): | 1.00th=[ 161], 5.00th=[ 217], 10.00th=[ 269], 20.00th=[ 465], | 30.00th=[ 578], 40.00th=[ 627], 50.00th=[ 668], 60.00th=[ 701], | 70.00th=[ 725], 80.00th=[ 758], 90.00th=[ 807], 95.00th=[ 857], | 99.00th=[ 1188], 99.50th=[ 1369], 99.90th=[ 2343], 99.95th=[ 2474], | 99.99th=[84411] bw ( KiB/s): min= 2004, max=22256, per=100.00%, avg=6198.96, stdev=2267.76, samples=599 iops : min= 501, max= 5564, avg=1549.69, stdev=566.95, samples=599 lat (usec) : 100=0.01%, 250=9.03%, 500=13.50%, 750=55.32%, 1000=20.70% lat (msec) : 2=1.29%, 4=0.12%, 10=0.01%, 20=0.01%, 50=0.01% lat (msec) : 100=0.01%, 250=0.01%, 500=0.01% cpu : usr=1.51%, sys=29.04%, ctx=935332, majf=15, minf=519 IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% issued rwts: total=0,464613,0,0 short=0,0,0,0 dropped=0,0,0,0 latency : target=0, window=0, percentile=100.00%, depth=4 Run status group 0 (all jobs): WRITE: bw=6195KiB/s (6343kB/s), 6195KiB/s-6195KiB/s (6343kB/s-6343kB/s), io=1815MiB (1903MB), run=300001-300001msec
Cheat Sheets
One
----------------------------------------------------------------------------------------------------- Read Test fio --name=randread --ioengine=libaio --iodepth=16 --rw=randread --bs=4k --direct=0 --size=512M --numjobs=4 --runtime=240 --group_reporting ----------------------------------------------------------------------------------------------------- writes a total 2GB files [4 jobs x 512 MB = 2GB] running 4 processes at a time: fio --name=randwrite --ioengine=libaio --iodepth=1 --rw=randwrite --bs=4k --direct=0 --size=512M --numjobs=4 --runtime=240 --group_reporting ----------------------------------------------------------------------------------------------------- Read Write Performance Test fio --randrepeat=1 --ioengine=libaio --direct=1 --gtod_reduce=1 --name=test --filename=random_read_write.fio --bs=4k --iodepth=64 --size=4G --readwrite=randrw --rwmixread=75 ----------------------------------------------------------------------------------------------------- Sequential Reads – Async mode – 8K block size – Direct IO – 100% Reads fio --name=seqread --rw=read --direct=1 --ioengine=libaio --bs=8k --numjobs=8 --size=1G --runtime=600 --group_reporting ----------------------------------------------------------------------------------------------------- Sequential Writes – Async mode – 32K block size – Direct IO – 100% Writes fio --name=seqwrite --rw=write --direct=1 --ioengine=libaio --bs=32k --numjobs=4 --size=2G --runtime=600 --group_reporting ----------------------------------------------------------------------------------------------------- Random Reads – Async mode – 8K block size – Direct IO – 100% Reads fio --name=randread --rw=randread --direct=1 --ioengine=libaio --bs=8k --numjobs=16 --size=1G --runtime=600 --group_reporting ----------------------------------------------------------------------------------------------------- Random Writes – Async mode – 64K block size – Direct IO – 100% Writes fio --name=randwrite --rw=randwrite --direct=1 --ioengine=libaio --bs=64k --numjobs=8 --size=512m --runtime=600 --group_reporting ----------------------------------------------------------------------------------------------------- Random Read/Writes – Async mode – 16K block size – Direct IO – 90% Reads/10% Writes fio --name=randrw --rw=randrw --direct=1 --ioengine=libaio --bs=16k --numjobs=8 --rwmixread=90 --size=1G --runtime=600 --group_reporting ----------------------------------------------------------------------------------------------------- creates 8 files (numjobs=8) each with size 512MB (size) at 64K block size (bs=64k) and will perform random read/write (rw=randrw) with the mixed workload of 70% reads and 30% writes. The job will run for full 5 minutes (runtime=300 & time_based) even if the files were created and read/written. fio --name=randrw --ioengine=libaio --iodepth=1 --rw=randrw --bs=64k --direct=1 --size=512m --numjobs=8 --runtime=300 --group_reporting --time_based --rwmixread=70 ----------------------------------------------------------------------------------------------------- compare disk performance with a simple 3:1 4K read/write test creates a 4 GB file and perform 4KB reads and writes using a 75%/25% split within the file, with 64 operations running at a time. The 3:1 ratio represents a typical database. fio --randrepeat=1 --ioengine=libaio --direct=1 --gtod_reduce=1 --name=test --filename=test --bs=4k --iodepth=64 --size=4G --readwrite=randrw --rwmixread=75 ----------------------------------------------------------------------------------------------------- Random read performance fio --randrepeat=1 --ioengine=libaio --direct=1 --gtod_reduce=1 --name=test --filename=test --bs=4k --iodepth=64 --size=4G --readwrite=randread ----------------------------------------------------------------------------------------------------- Random write performance fio --randrepeat=1 --ioengine=libaio --direct=1 --gtod_reduce=1 --name=test --filename=test --bs=4k --iodepth=64 --size=4G --readwrite=randwrite ----------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------- -----------------------------------------------------------------------------------------------------
Pure Bash & dd
ibs
dd_ibs_test.sh
#!/bin/bash # Since we're dealing with dd, abort if any errors occur set -e TEST_FILE=${1:-dd_ibs_testfile} if [ -e "$TEST_FILE" ]; then TEST_FILE_EXISTS=$?; fi TEST_FILE_SIZE=134217728 # Exit if file exists if [ -e $TEST_FILE ]; then echo "Test file $TEST_FILE exists, aborting." exit 1 fi TEST_FILE_EXISTS=1 if [ $EUID -ne 0 ]; then echo "NOTE: Kernel cache will not be cleared between tests without sudo. This will likely cause inaccurate results." 1>&2 fi # Create test file echo 'Generating test file...' BLOCK_SIZE=65536 COUNT=$(($TEST_FILE_SIZE / $BLOCK_SIZE)) dd if=/dev/urandom of=$TEST_FILE bs=$BLOCK_SIZE count=$COUNT conv=fsync > /dev/null 2>&1 # Header PRINTF_FORMAT="%8s : %s\n" printf "$PRINTF_FORMAT" 'block size' 'transfer rate' # Block sizes of 512b 1K 2K 4K 8K 16K 32K 64K 128K 256K 512K 1M 2M 4M 8M 16M 32M 64M for BLOCK_SIZE in 512 1024 2048 4096 8192 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304 8388608 16777216 33554432 67108864 do # Clear kernel cache to ensure more accurate test [ $EUID -eq 0 ] && [ -e /proc/sys/vm/drop_caches ] && echo 3 > /proc/sys/vm/drop_caches # Read test file out to /dev/null with specified block size DD_RESULT=$(dd if=$TEST_FILE of=/dev/null bs=$BLOCK_SIZE 2>&1 1>/dev/null) # Extract transfer rate TRANSFER_RATE=$(echo $DD_RESULT | \grep --only-matching -E '[0-9.]+ ([MGk]?B|bytes)/s(ec)?') printf "$PRINTF_FORMAT" "$BLOCK_SIZE" "$TRANSFER_RATE" done # Clean up the test file if we created one if [ $TEST_FILE_EXISTS -ne 0 ]; then rm $TEST_FILE; fi
obs
dd_obs_test.sh
#!/bin/bash # Since we're dealing with dd, abort if any errors occur set -e TEST_FILE=${1:-dd_obs_testfile} TEST_FILE_EXISTS=0 if [ -e "$TEST_FILE" ]; then TEST_FILE_EXISTS=1; fi TEST_FILE_SIZE=134217728 if [ $EUID -ne 0 ]; then echo "NOTE: Kernel cache will not be cleared between tests without sudo. This will likely cause inaccurate results." 1>&2 fi # Header PRINTF_FORMAT="%8s : %s\n" printf "$PRINTF_FORMAT" 'block size' 'transfer rate' # Block sizes of 512b 1K 2K 4K 8K 16K 32K 64K 128K 256K 512K 1M 2M 4M 8M 16M 32M 64M for BLOCK_SIZE in 512 1024 2048 4096 8192 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304 8388608 16777216 33554432 67108864 do # Calculate number of segments required to copy COUNT=$(($TEST_FILE_SIZE / $BLOCK_SIZE)) if [ $COUNT -le 0 ]; then echo "Block size of $BLOCK_SIZE estimated to require $COUNT blocks, aborting further tests." break fi # Clear kernel cache to ensure more accurate test [ $EUID -eq 0 ] && [ -e /proc/sys/vm/drop_caches ] && echo 3 > /proc/sys/vm/drop_caches # Create a test file with the specified block size DD_RESULT=$(dd if=/dev/zero of=$TEST_FILE bs=$BLOCK_SIZE count=$COUNT conv=fsync 2>&1 1>/dev/null) # Extract the transfer rate from dd's STDERR output TRANSFER_RATE=$(echo $DD_RESULT | \grep --only-matching -E '[0-9.]+ ([MGk]?B|bytes)/s(ec)?') # Clean up the test file if we created one if [ $TEST_FILE_EXISTS -ne 0 ]; then rm $TEST_FILE; fi # Output the result printf "$PRINTF_FORMAT" "$BLOCK_SIZE" "$TRANSFER_RATE" done