summaryrefslogtreecommitdiff
path: root/blktrace.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-05-16 09:25:09 +0200
committerJens Axboe <jens.axboe@oracle.com>2007-05-16 09:25:09 +0200
commite28875637094451a3c5ec4071f964c1a02dd8f5b (patch)
tree77487b30c2cddb4b11ee63519e98d240d49ee7e2 /blktrace.c
parentd84f8d4931be0c7519bd9f97f9914b07578a9854 (diff)
downloadfio-e28875637094451a3c5ec4071f964c1a02dd8f5b.tar.gz
blktrace support: speedup reading of data
We used to read in data in really small chunks (48 bytes at the time, the size of the trace). This is really slow for large traces, so add a fifo frontend to refill the cache in much larger sizes. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'blktrace.c')
-rw-r--r--blktrace.c75
1 files changed, 62 insertions, 13 deletions
diff --git a/blktrace.c b/blktrace.c
index afa699cd..fc980918 100644
--- a/blktrace.c
+++ b/blktrace.c
@@ -8,6 +8,59 @@
#include "fio.h"
#include "blktrace_api.h"
+#define TRACE_FIFO_SIZE (sizeof(struct blk_io_trace) * 1000)
+
+/*
+ * fifo refill frontend, to avoid reading data in trace sized bites
+ */
+static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd)
+{
+ char buf[TRACE_FIFO_SIZE];
+ unsigned int total, left;
+ void *ptr;
+ int ret;
+
+ total = 0;
+ ptr = buf;
+ while (total < TRACE_FIFO_SIZE) {
+ left = TRACE_FIFO_SIZE - total;
+
+ ret = read(fd, ptr, left);
+ if (ret < 0) {
+ td_verror(td, errno, "read blktrace file");
+ return -1;
+ } else if (!ret)
+ break;
+
+ fifo_put(fifo, ptr, ret);
+ ptr += ret;
+ total += ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Retrieve 'len' bytes from the fifo, refilling if necessary.
+ */
+static int trace_fifo_get(struct thread_data *td, struct fifo *fifo, int fd,
+ void *buf, unsigned int len)
+{
+ int ret;
+
+ if (fifo_len(fifo) >= len)
+ return fifo_get(fifo, buf, len);
+
+ ret = refill_fifo(td, fifo, fd);
+ if (ret < 0)
+ return ret;
+
+ if (fifo_len(fifo) < len)
+ return 0;
+
+ return fifo_get(fifo, buf, len);
+}
+
/*
* Just discard the pdu by seeking past it.
*/
@@ -120,6 +173,7 @@ int load_blktrace(struct thread_data *td, const char *filename)
unsigned long ios[2];
unsigned int cpu;
unsigned int rw_bs[2];
+ struct fifo *fifo;
int fd;
fd = open(filename, O_RDONLY);
@@ -128,6 +182,8 @@ int load_blktrace(struct thread_data *td, const char *filename)
return 1;
}
+ fifo = fifo_alloc(TRACE_FIFO_SIZE);
+
td->o.size = 0;
cpu = 0;
@@ -135,22 +191,15 @@ int load_blktrace(struct thread_data *td, const char *filename)
ios[0] = ios[1] = 0;
rw_bs[0] = rw_bs[1] = 0;
do {
- /*
- * Once this is working fully, I'll add a layer between
- * here and read to cache trace data. Then we can avoid
- * doing itsy bitsy reads, but instead pull in a larger
- * chunk of data at the time.
- */
- int ret = read(fd, &t, sizeof(t));
+ int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));
- if (ret < 0) {
- td_verror(td, errno, "read blktrace file");
+ if (ret < 0)
goto err;
- } else if (!ret) {
+ else if (!ret)
+ break;
+ else if (ret < (int) sizeof(t)) {
+ log_err("fio: short fifo get\n");
break;
- } else if (ret != sizeof(t)) {
- log_err("fio: short read on blktrace file\n");
- goto err;
}
if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {