summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Oberhollenzer <david.oberhollenzer@tele2.at>2015-10-22 10:19:39 +0200
committerRichard Weinberger <richard@nod.at>2015-10-29 20:17:24 +0100
commit3634e0d9d773f05141867fa2ca5aa83c36b40b68 (patch)
tree5838bf0f6cd81b76da8a3b44fd383828a17a590e
parent44271cb375ebac50560d19962c5d6f5355233dba (diff)
downloadubifs-load-3634e0d9d773f05141867fa2ca5aa83c36b40b68.tar.gz
Add random filesize, append and bulk write support
Signed-off-by: David Oberhollenzer <david.oberhollenzer@tele2.at> Signed-off-by: Richard Weinberger <richard@nod.at>
-rwxr-xr-xfsstress.sh75
1 files changed, 73 insertions, 2 deletions
diff --git a/fsstress.sh b/fsstress.sh
index f0d58bb..c0c65a2 100755
--- a/fsstress.sh
+++ b/fsstress.sh
@@ -13,15 +13,20 @@
#
# TODO:
# - getopt support
-# - more write tests (file appned, bulk write, ...)
# - report results from UBI stats interface (needs rewrite in C)
#
set -u -e
+BIG_FILE_NUM=19
FILE_NUM=99
RUN_NUM=9
WRITE_MUCH=0
+BULK_WRITE=0
+MAX_SIZE_MB=5
+MIN_SIZE_MB=1
+MIN_BULK_SIZE_MB=10
+MAX_BULK_SIZE_MB=20
mkdir -p testdir
@@ -34,8 +39,11 @@ write_files()
{
for i in $(seq 0 $FILE_NUM)
do
+ SIZE=$(($RANDOM % ($MAX_SIZE_MB - $MIN_SIZE_MB) + $MIN_SIZE_MB))
+
touch testdir/smallfile$i
- dd if=/dev/urandom of=testdir/smallfile$i bs=1M count=1 &> /dev/null
+ dd if=/dev/urandom of=testdir/smallfile$i bs=1M \
+ count=$SIZE &> /dev/null
done
}
@@ -61,6 +69,43 @@ write_rand_file_sync()
sync
}
+write_big_files_sync()
+{
+ for i in $(seq 0 $BIG_FILE_NUM)
+ do
+ SIZE=$(($RANDOM % ($MAX_BULK_SIZE_MB - $MIN_BULK_SIZE_MB) +
+ $MIN_BULK_SIZE_MB))
+
+ touch testdir/bigfile$i
+ sync
+ dd if=/dev/urandom of=testdir/bigfile$i bs=1M count=$SIZE &> /dev/null
+ sync
+ done
+}
+
+append_to_files()
+{
+ for i in $(seq 0 $FILE_NUM)
+ do
+ SIZE=$(($RANDOM % $MAX_SIZE_MB))
+
+ dd if=/dev/urandom of=testdir/smallfile$i bs=1M count=$SIZE\
+ oflag=append conv=notrunc &> /dev/null
+ done
+}
+
+append_to_files_sync()
+{
+ for i in $(seq 0 $FILE_NUM)
+ do
+ SIZE=$(($RANDOM % $MAX_SIZE_MB))
+
+ dd if=/dev/urandom of=testdir/smallfile$i bs=1M count=$SIZE\
+ oflag=append conv=notrunc &> /dev/null
+ sync
+ done
+}
+
read_files()
{
for i in $(seq 1 $FILE_NUM)
@@ -69,11 +114,22 @@ read_files()
done
}
+read_big_files()
+{
+ for i in $(seq 0 $BIG_FILE_NUM)
+ do
+ cat testdir/bigfile$i > /dev/null
+ done
+}
+
for r in $(seq 0 $RUN_NUM)
do
drop_caches
write_files
+ drop_caches
+ append_to_files
+
for i in $(seq 1 20)
do
read_files
@@ -104,5 +160,20 @@ do
write_files_sync
read_files
done
+
+ for i in $(seq 1 20)
+ do
+ append_to_files_sync
+ read_files
+ done
+ fi
+
+ if [ $BULK_WRITE -eq 1 ]
+ then
+ for i in $(seq 1 20)
+ do
+ write_big_files_sync
+ read_big_files
+ done
fi
done