summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Oberhollenzer <david.oberhollenzer@sigma-star.at>2015-10-29 12:43:34 +0100
committerRichard Weinberger <richard@nod.at>2015-10-29 20:17:40 +0100
commitf68a09852db17aefe090c811551647c7da513400 (patch)
tree179b1e87315f9db94270ec9fb78c26f2d22dc77d
parent05218f918fd40a55e9a40285dbd888c0283c3241 (diff)
downloadubifs-load-f68a09852db17aefe090c811551647c7da513400.tar.gz
Rewrite fsstress.sh in CHEADmaster
Signed-off-by: David Oberhollenzer <david.oberhollenzer@sigma-star.at> Signed-off-by: Richard Weinberger <richard@nod.at>
-rw-r--r--Makefile8
-rw-r--r--fsstress.c277
2 files changed, 285 insertions, 0 deletions
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..e9f69b1
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,8 @@
+CFLAGS:=-ansi -pedantic -Wall -Wextra -D_DEFAULT_SOURCE
+
+fsstress: fsstress.c
+ $(CC) $(CFLAGS) $^ -o $@
+
+.PHONY: clean
+clean:
+ $(RM) fsstress *.o
diff --git a/fsstress.c b/fsstress.c
new file mode 100644
index 0000000..b0567b5
--- /dev/null
+++ b/fsstress.c
@@ -0,0 +1,277 @@
+/*
+ The purpose of this program is simulating a typical workload in short time.
+ Especially filesystem read's are interesting for us as they can lead
+ to read disturb on the NAND flash. UBIFS utilizes the page cache,
+ therefore reading the same file multiple times will not lead to multiple
+ reads at MTD level.
+ But the page cache is not an infinite resource and the kernel is allowed to
+ shrink/flush it at any time, this can lead to reads on MTD level again.
+ To simulate that the script regularly flushes the page cache and the inode
+ cache.
+
+ TODO:
+ - report results from UBI stats interface
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <fcntl.h>
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+
+#define WRITE_SYNC 1
+#define WRITE_APPEND 2
+
+#define F_BULK_WRITE 1
+#define F_WRITE_MUCH 2
+
+static const char* outdir = "testdir";
+extern char* __progname;
+
+static const struct option options[] = {
+ {"bulk", 0, 0, 'b'},
+ {"writemuch", 0, 0, 'w'},
+ {"runs", 1, 0, 'r'},
+ {"files", 1, 0, 'n'},
+ {"big-files", 1, 0, 'N'},
+ {"help", 0, 0, 'h'},
+ {"min-mb", 1, 0, 'm'},
+ {"max-mb", 1, 0, 'M'},
+ {"min-bulk-mb", 1, 0, 'f'},
+ {"max-bulk-mb", 1, 0, 'F'},
+ {NULL, 0, NULL, 0}
+};
+
+static int tryopen(const char* path, int flags, int mode)
+{
+ int fd = open(path, flags, mode);
+
+ if (fd<0)
+ fprintf(stderr, "Opening %s: %s\n", path, strerror(errno));
+
+ return fd;
+}
+
+static int copy_mb(const char* name, int flags, int infd, size_t megs)
+{
+ int outfd, status=0;
+ char buffer[4096];
+ ssize_t count;
+ size_t i, j;
+
+ outfd = tryopen(name, (flags & WRITE_APPEND) ? O_WRONLY|O_APPEND :
+ O_WRONLY|O_CREAT|O_TRUNC, 0644);
+
+ if (outfd<0)
+ return 0;
+
+ count = read(infd, buffer, sizeof(buffer));
+
+ if (count!=sizeof(buffer))
+ goto fail;
+
+ for (i=0; i<megs; ++i) {
+ for (j=0; j<((1024*1024)/sizeof(buffer)); ++j) {
+ count = write(outfd, buffer, sizeof(buffer));
+
+ if (count!=sizeof(buffer))
+ goto fail;
+ }
+ }
+
+ if (flags & WRITE_SYNC)
+ sync();
+
+ status = 1;
+out:
+ close(outfd);
+ return status;
+fail:
+ fprintf(stderr, "copy_mb %s: %s\n", name, count<0 ? strerror(errno) :
+ (count==0 ? "EOF" : "could not transfer entire 1M block"));
+ goto out;
+}
+
+static void drop_caches(void)
+{
+ int fd = open("/proc/sys/vm/drop_caches", O_RDONLY, 0);
+ const char* str = "3\n";
+
+ write(fd, str, strlen(str));
+ close(fd);
+}
+
+static void write_files(const char* prefix, int flags, int count,
+ int minsize, int maxsize)
+{
+ int i, size, infd;
+ char buffer[64];
+
+ infd = tryopen("/dev/urandom", O_RDONLY, 0);
+
+ if (infd<0)
+ exit(EXIT_FAILURE);
+
+ for (i=0; i<count; ++i) {
+ size = rand();
+ size = (size<0 ? -size : size) % (maxsize - minsize) + minsize;
+
+ if (!size)
+ continue;
+
+ sprintf(buffer, "%s/%s%d", outdir, prefix, i);
+
+ if (!copy_mb(buffer, flags, infd, size))
+ exit(EXIT_FAILURE);
+ }
+
+ close(infd);
+}
+
+static void write_rand_file(int flags, int count)
+{
+ char buffer[64];
+ int i, infd;
+
+ infd = tryopen("/dev/urandom", O_RDONLY, 0);
+
+ if (infd<0)
+ exit(EXIT_FAILURE);
+
+ i = rand();
+ sprintf(buffer, "%s/smallfile%d", outdir, (i<0?-i:i) % count);
+
+ if (!copy_mb(buffer, flags, infd, 1))
+ exit(EXIT_FAILURE);
+
+ close(infd);
+}
+
+static void read_files(const char* prefix, int count)
+{
+ char buffer[1024];
+ int i, fd;
+
+ for (i=0; i<count; ++i) {
+ sprintf(buffer, "%s/%s%d", outdir, prefix, i);
+ fd = tryopen(buffer, O_RDONLY, 0);
+ if (fd<0)
+ exit(EXIT_FAILURE);
+ while (read(fd, buffer, sizeof(buffer))>0) { }
+ close(fd);
+ }
+}
+
+static void usage(void)
+{
+ printf( "Usage: %s [arguments]\n", __progname );
+
+ puts(
+ " -b, --bulk If set, perform bulk write test.\n"
+ " -w, --writemuch If set, perform write stress test.\n"
+ " -r, --runs <count> Specify the number of test iterations.\n"
+ " -n, --files <count> Specify the number of small files to create.\n"
+ " -N, --big-files <count> Specify the number of large files to create.\n"
+ " -h, --help Display this text and exit.\n");
+ puts(
+ " --min-mb <count> The minimum size (MiB) of small files.\n"
+ " --max-mb <count> The maximum size (MiB) of small files.\n"
+ " --min-bulk-mb <count> The minimum size (MiB) of large files.\n"
+ " --max-bulk-mb <count> The maximum size (MiB) of large files.");
+
+ exit(EXIT_SUCCESS);
+}
+
+int main(int argc, char** argv)
+{
+ int i, j, idx=0, max_mb=5, min_mb=1, min_bulk_mb=10, max_bulk_mb=20;
+ int flags=0, runs=10, bigfiles=20, files=100;
+ struct stat sb;
+
+ while ((i=getopt_long(argc, argv, "bwr:n:N:h", options, &idx))!=-1) {
+ switch (i) {
+ case 'b': flags |= F_BULK_WRITE; break;
+ case 'w': flags |= F_WRITE_MUCH; break;
+ case 'h': usage(); break;
+ case 'r': runs = strtol(optarg, NULL, 10); break;
+ case 'n': files = strtol(optarg, NULL, 10); break;
+ case 'N': bigfiles = strtol(optarg, NULL, 10); break;
+ case 'm': min_mb = strtol(optarg, NULL, 10); break;
+ case 'M': max_mb = strtol(optarg, NULL, 10); break;
+ case 'f': min_bulk_mb = strtol(optarg, NULL, 10); break;
+ case 'F': max_bulk_mb = strtol(optarg, NULL, 10); break;
+ default:
+ fputs( "Unknown option\n", stderr );
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (stat(outdir, &sb)!=0) {
+ if (mkdir(outdir, 0755)!=0) {
+ fprintf( stderr, "mkdir %s: %s", outdir, strerror(errno) );
+ return EXIT_FAILURE;
+ }
+ } else if (!S_ISDIR(sb.st_mode)) {
+ fprintf( stderr, "'%s' exists and is not a directory!\n", outdir );
+ return EXIT_FAILURE;
+ }
+
+ srand(time(NULL));
+
+ for (i=0; i<runs; ++i) {
+ drop_caches();
+ write_files("smallfile", 0, files, min_mb, max_mb);
+
+ drop_caches();
+ write_files("smallfile", WRITE_APPEND, files, min_mb, max_mb);
+
+ for (j=0; j<20; ++j)
+ read_files("smallfile", files);
+
+ for (j=0; j<20; ++j) {
+ drop_caches();
+ read_files("smallfile", files);
+ }
+
+ if (files > 0) {
+ for (j=0; j<20; ++j) {
+ read_files("smallfile", files);
+ write_rand_file(0, files);
+ }
+
+ for (j=0; j<20; ++j) {
+ read_files("smallfile", files);
+ write_rand_file(WRITE_SYNC, files);
+ }
+ }
+
+ if (flags & F_WRITE_MUCH) {
+ for (j=0; j<20; ++j) {
+ write_files("smallfile", WRITE_SYNC, files, min_mb, max_mb);
+ read_files("smallfile", files);
+ }
+
+ for (j=0; j<20; ++j) {
+ write_files("smallfile", WRITE_APPEND|WRITE_SYNC, files,
+ min_mb, max_mb);
+ read_files("smallfile", files);
+ }
+ }
+
+ if (flags & F_BULK_WRITE) {
+ for (j=0; j<20; ++j) {
+ write_files("bigfile", WRITE_SYNC, bigfiles,
+ min_bulk_mb, max_bulk_mb);
+ read_files("bigfile", bigfiles);
+ }
+ }
+ }
+ return EXIT_SUCCESS;
+}
+