diff --git a/aiostress/aio-stress.c b/aiostress/aio-stress.c new file mode 100644 index 000000000..91af264fd --- /dev/null +++ b/aiostress/aio-stress.c @@ -0,0 +1,1514 @@ +/* + * Copyright (c) 2004 SuSE, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * + * aio-stress + * + * will open or create each file on the command line, and start a series + * of aio to it. + * + * aio is done in a rotating loop. first file1 gets 8 requests, then + * file2, then file3 etc. As each file finishes writing, it is switched + * to reads + * + * io buffers are aligned in case you want to do raw io + * + * compile with gcc -Wall -laio -lpthread -o aio-stress aio-stress.c + * + * run aio-stress -h to see the options + * + * Please mail Chris Mason (mason@suse.com) with bug reports or patches + */ +#define _FILE_OFFSET_BITS 64 +#define PROG_VERSION "0.21" +#define NEW_GETEVENTS + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IO_FREE 0 +#define IO_PENDING 1 +#define RUN_FOREVER -1 + +#ifndef O_DIRECT +#define O_DIRECT 040000 /* direct disk access hint */ +#endif + +enum { + WRITE, + READ, + RWRITE, + RREAD, + LAST_STAGE, +}; + +#define USE_MALLOC 0 +#define USE_SHM 1 +#define USE_SHMFS 2 + +/* + * various globals, these are effectively read only by the time the threads + * are started + */ +long stages = 0; +unsigned long page_size_mask; +int o_direct = 0; +int o_sync = 0; +int latency_stats = 0; +int completion_latency_stats = 0; +int io_iter = 8; +int iterations = RUN_FOREVER; +int max_io_submit = 0; +long rec_len = 64 * 1024; +int depth = 64; +int num_threads = 1; +int num_contexts = 1; +off_t context_offset = 2 * 1024 * 1024; +int fsync_stages = 1; +int use_shm = 0; +int shm_id; +char *unaligned_buffer = NULL; +char *aligned_buffer = NULL; +int padded_reclen = 0; +int stonewall = 1; +int verify = 0; +char *verify_buf = NULL; +int unlink_files = 0; + +struct io_unit; +struct thread_info; + +/* pthread mutexes and other globals for keeping the threads in sync */ +pthread_cond_t stage_cond = PTHREAD_COND_INITIALIZER; +pthread_mutex_t stage_mutex = PTHREAD_MUTEX_INITIALIZER; +int threads_ending = 0; +int threads_starting = 0; +struct timeval global_stage_start_time; +struct thread_info *global_thread_info; + +/* + * latencies during io_submit are measured, these are the + * granularities for deviations + */ +#define DEVIATIONS 6 +int deviations[DEVIATIONS] = { 100, 250, 500, 1000, 5000, 10000 }; +struct io_latency { + double max; + double min; + double total_io; + double total_lat; + double deviations[DEVIATIONS]; +}; + +/* container for a series of operations to a file */ +struct io_oper { + /* already open file descriptor, valid for whatever operation you want */ + int fd; + + /* starting byte of the operation */ + off_t start; + + /* ending byte of the operation */ + off_t end; + + /* size of the read/write buffer */ + int reclen; + + /* max number of pending requests before a wait is triggered */ + int depth; + + /* current number of pending requests */ + int num_pending; + + /* last error, zero if there were none */ + int last_err; + + /* total number of errors hit. */ + int num_err; + + /* read,write, random, etc */ + int rw; + + /* number of ios that will get sent to aio */ + int total_ios; + + /* number of ios we've already sent */ + int started_ios; + + /* last offset used in an io operation */ + off_t last_offset; + + /* stonewalled = 1 when we got cut off before submitting all our ios */ + int stonewalled; + + /* list management */ + struct io_oper *next; + struct io_oper *prev; + + struct timeval start_time; + + char *file_name; +}; + +/* a single io, and all the tracking needed for it */ +struct io_unit { + /* note, iocb must go first! */ + struct iocb iocb; + + /* pointer to parent io operation struct */ + struct io_oper *io_oper; + + /* aligned buffer */ + char *buf; + + /* size of the aligned buffer (record size) */ + int buf_size; + + /* state of this io unit (free, pending, done) */ + int busy; + + /* result of last operation */ + long res; + + struct io_unit *next; + + struct timeval io_start_time; /* time of io_submit */ +}; + +struct thread_info { + io_context_t io_ctx; + pthread_t tid; + + /* allocated array of io_unit structs */ + struct io_unit *ios; + + /* list of io units available for io */ + struct io_unit *free_ious; + + /* number of io units in the ios array */ + int num_global_ios; + + /* number of io units in flight */ + int num_global_pending; + + /* preallocated array of iocb pointers, only used in run_active */ + struct iocb **iocbs; + + /* preallocated array of events */ + struct io_event *events; + + /* size of the events array */ + int num_global_events; + + /* latency stats for io_submit */ + struct io_latency io_submit_latency; + + /* list of operations still in progress, and of those finished */ + struct io_oper *active_opers; + struct io_oper *finished_opers; + + /* number of files this thread is doing io on */ + int num_files; + + /* how much io this thread did in the last stage */ + double stage_mb_trans; + + /* latency completion stats i/o time from io_submit until io_getevents */ + struct io_latency io_completion_latency; +}; + +/* + * return seconds between start_tv and stop_tv in double precision + */ +static double time_since(struct timeval *start_tv, struct timeval *stop_tv) +{ + double sec, usec; + double ret; + sec = stop_tv->tv_sec - start_tv->tv_sec; + usec = stop_tv->tv_usec - start_tv->tv_usec; + if (sec > 0 && usec < 0) { + sec--; + usec += 1000000; + } + ret = sec + usec / (double)1000000; + if (ret < 0) + ret = 0; + return ret; +} + +/* + * return seconds between start_tv and now in double precision + */ +static double time_since_now(struct timeval *start_tv) +{ + struct timeval stop_time; + gettimeofday(&stop_time, NULL); + return time_since(start_tv, &stop_time); +} + +/* + * Add latency info to latency struct + */ +static void calc_latency(struct timeval *start_tv, struct timeval *stop_tv, + struct io_latency *lat) +{ + double delta; + int i; + delta = time_since(start_tv, stop_tv); + delta = delta * 1000; + + if (delta > lat->max) + lat->max = delta; + if (!lat->min || delta < lat->min) + lat->min = delta; + lat->total_io++; + lat->total_lat += delta; + for (i = 0 ; i < DEVIATIONS ; i++) { + if (delta < deviations[i]) { + lat->deviations[i]++; + break; + } + } +} + +static void oper_list_add(struct io_oper *oper, struct io_oper **list) +{ + if (!*list) { + *list = oper; + oper->prev = oper->next = oper; + return; + } + oper->prev = (*list)->prev; + oper->next = *list; + (*list)->prev->next = oper; + (*list)->prev = oper; + return; +} + +static void oper_list_del(struct io_oper *oper, struct io_oper **list) +{ + if ((*list)->next == (*list)->prev && *list == (*list)->next) { + *list = NULL; + return; + } + oper->prev->next = oper->next; + oper->next->prev = oper->prev; + if (*list == oper) + *list = oper->next; +} + +/* worker func to check error fields in the io unit */ +static int check_finished_io(struct io_unit *io) { + int i; + if (io->res != io->buf_size) { + + struct stat s; + fstat(io->io_oper->fd, &s); + + /* + * If file size is large enough for the read, then this short + * read is an error. + */ + if ((io->io_oper->rw == READ || io->io_oper->rw == RREAD) && + s.st_size > (io->iocb.u.c.offset + io->res)) { + + fprintf(stderr, "io err %lu (%s) op %d, off %Lu size %d\n", + io->res, strerror(-io->res), io->iocb.aio_lio_opcode, + io->iocb.u.c.offset, io->buf_size); + io->io_oper->last_err = io->res; + io->io_oper->num_err++; + return -1; + } + } + if (verify && io->io_oper->rw == READ) { + if (memcmp(io->buf, verify_buf, io->io_oper->reclen)) { + fprintf(stderr, "verify error, file %s offset %Lu contents (offset:bad:good):\n", + io->io_oper->file_name, io->iocb.u.c.offset); + + for (i = 0 ; i < io->io_oper->reclen ; i++) { + if (io->buf[i] != verify_buf[i]) { + fprintf(stderr, "%d:%c:%c ", i, io->buf[i], verify_buf[i]); + } + } + fprintf(stderr, "\n"); + } + + } + return 0; +} + +/* worker func to check the busy bits and get an io unit ready for use */ +static int grab_iou(struct io_unit *io, struct io_oper *oper) { + if (io->busy == IO_PENDING) + return -1; + + io->busy = IO_PENDING; + io->res = 0; + io->io_oper = oper; + return 0; +} + +char *stage_name(int rw) { + switch(rw) { + case WRITE: + return "write"; + case READ: + return "read"; + case RWRITE: + return "random write"; + case RREAD: + return "random read"; + } + return "unknown"; +} + +static inline double oper_mb_trans(struct io_oper *oper) { + return ((double)oper->started_ios * (double)oper->reclen) / + (double)(1024 * 1024); +} + +static void print_time(struct io_oper *oper) { + double runtime; + double tput; + double mb; + + runtime = time_since_now(&oper->start_time); + mb = oper_mb_trans(oper); + tput = mb / runtime; + fprintf(stderr, "%s on %s (%.2f MB/s) %.2f MB in %.2fs\n", + stage_name(oper->rw), oper->file_name, tput, mb, runtime); +} + +static void print_lat(char *str, struct io_latency *lat) { + double avg = lat->total_lat / lat->total_io; + int i; + double total_counted = 0; + fprintf(stderr, "%s min %.2f avg %.2f max %.2f\n\t", + str, lat->min, avg, lat->max); + + for (i = 0 ; i < DEVIATIONS ; i++) { + fprintf(stderr, " %.0f < %d", lat->deviations[i], deviations[i]); + total_counted += lat->deviations[i]; + } + if (total_counted && lat->total_io - total_counted) + fprintf(stderr, " < %.0f", lat->total_io - total_counted); + fprintf(stderr, "\n"); + memset(lat, 0, sizeof(*lat)); +} + +static void print_latency(struct thread_info *t) +{ + struct io_latency *lat = &t->io_submit_latency; + print_lat("latency", lat); +} + +static void print_completion_latency(struct thread_info *t) +{ + struct io_latency *lat = &t->io_completion_latency; + print_lat("completion latency", lat); +} + +/* + * updates the fields in the io operation struct that belongs to this + * io unit, and make the io unit reusable again + */ +void finish_io(struct thread_info *t, struct io_unit *io, long result, + struct timeval *tv_now) { + struct io_oper *oper = io->io_oper; + + calc_latency(&io->io_start_time, tv_now, &t->io_completion_latency); + io->res = result; + io->busy = IO_FREE; + io->next = t->free_ious; + t->free_ious = io; + oper->num_pending--; + t->num_global_pending--; + check_finished_io(io); + if (oper->num_pending == 0 && + (oper->started_ios == oper->total_ios || oper->stonewalled)) + { + print_time(oper); + } +} + +int read_some_events(struct thread_info *t) { + struct io_unit *event_io; + struct io_event *event; + int nr; + int i; + int min_nr = io_iter; + struct timeval stop_time; + + if (t->num_global_pending < io_iter) + min_nr = t->num_global_pending; + +#ifdef NEW_GETEVENTS + nr = io_getevents(t->io_ctx, min_nr, t->num_global_events, t->events,NULL); +#else + nr = io_getevents(t->io_ctx, t->num_global_events, t->events, NULL); +#endif + if (nr <= 0) + return nr; + + gettimeofday(&stop_time, NULL); + for (i = 0 ; i < nr ; i++) { + event = t->events + i; + event_io = (struct io_unit *)((unsigned long)event->obj); + finish_io(t, event_io, event->res, &stop_time); + } + return nr; +} + +/* + * finds a free io unit, waiting for pending requests if required. returns + * null if none could be found + */ +static struct io_unit *find_iou(struct thread_info *t, struct io_oper *oper) +{ + struct io_unit *event_io; + int nr; + +retry: + if (t->free_ious) { + event_io = t->free_ious; + t->free_ious = t->free_ious->next; + if (grab_iou(event_io, oper)) { + fprintf(stderr, "io unit on free list but not free\n"); + abort(); + } + return event_io; + } + nr = read_some_events(t); + if (nr > 0) + goto retry; + else + fprintf(stderr, "no free ious after read_some_events\n"); + return NULL; +} + +/* + * wait for all pending requests for this io operation to finish + */ +static int io_oper_wait(struct thread_info *t, struct io_oper *oper) { + struct io_event event; + struct io_unit *event_io; + + if (oper == NULL) { + return 0; + } + + if (oper->num_pending == 0) + goto done; + + /* this func is not speed sensitive, no need to go wild reading + * more than one event at a time + */ +#ifdef NEW_GETEVENTS + while(io_getevents(t->io_ctx, 1, 1, &event, NULL) > 0) { +#else + while(io_getevents(t->io_ctx, 1, &event, NULL) > 0) { +#endif + struct timeval tv_now; + event_io = (struct io_unit *)((unsigned long)event.obj); + + gettimeofday(&tv_now, NULL); + finish_io(t, event_io, event.res, &tv_now); + + if (oper->num_pending == 0) + break; + } +done: + if (oper->num_err) { + fprintf(stderr, "%u errors on oper, last %u\n", + oper->num_err, oper->last_err); + } + return 0; +} + +off_t random_byte_offset(struct io_oper *oper) { + off_t num; + off_t rand_byte = oper->start; + off_t range; + off_t offset = 1; + + range = (oper->end - oper->start) / (1024 * 1024); + if ((page_size_mask+1) > (1024 * 1024)) + offset = (page_size_mask+1) / (1024 * 1024); + if (range < offset) + range = 0; + else + range -= offset; + + /* find a random mb offset */ + num = 1 + (int)((double)range * rand() / (RAND_MAX + 1.0 )); + rand_byte += num * 1024 * 1024; + + /* find a random byte offset */ + num = 1 + (int)((double)(1024 * 1024) * rand() / (RAND_MAX + 1.0)); + + /* page align */ + num = (num + page_size_mask) & ~page_size_mask; + rand_byte += num; + + if (rand_byte + oper->reclen > oper->end) { + rand_byte -= oper->reclen; + } + return rand_byte; +} + +/* + * build an aio iocb for an operation, based on oper->rw and the + * last offset used. This finds the struct io_unit that will be attached + * to the iocb, and things are ready for submission to aio after this + * is called. + * + * returns null on error + */ +static struct io_unit *build_iocb(struct thread_info *t, struct io_oper *oper) +{ + struct io_unit *io; + off_t rand_byte; + + io = find_iou(t, oper); + if (!io) { + fprintf(stderr, "unable to find io unit\n"); + return NULL; + } + + switch(oper->rw) { + case WRITE: + io_prep_pwrite(&io->iocb,oper->fd, io->buf, oper->reclen, + oper->last_offset); + oper->last_offset += oper->reclen; + break; + case READ: + io_prep_pread(&io->iocb,oper->fd, io->buf, oper->reclen, + oper->last_offset); + oper->last_offset += oper->reclen; + break; + case RREAD: + rand_byte = random_byte_offset(oper); + oper->last_offset = rand_byte; + io_prep_pread(&io->iocb,oper->fd, io->buf, oper->reclen, + rand_byte); + break; + case RWRITE: + rand_byte = random_byte_offset(oper); + oper->last_offset = rand_byte; + io_prep_pwrite(&io->iocb,oper->fd, io->buf, oper->reclen, + rand_byte); + + break; + } + + return io; +} + +/* + * wait for any pending requests, and then free all ram associated with + * an operation. returns the last error the operation hit (zero means none) + */ +static int +finish_oper(struct thread_info *t, struct io_oper *oper) +{ + unsigned long last_err; + + io_oper_wait(t, oper); + last_err = oper->last_err; + if (oper->num_pending > 0) { + fprintf(stderr, "oper num_pending is %d\n", oper->num_pending); + } + close(oper->fd); + free(oper); + return last_err; +} + +/* + * allocates an io operation and fills in all the fields. returns + * null on error + */ +static struct io_oper * +create_oper(int fd, int rw, off_t start, off_t end, int reclen, int depth, + int iter, char *file_name) +{ + struct io_oper *oper; + + oper = malloc (sizeof(*oper)); + if (!oper) { + fprintf(stderr, "unable to allocate io oper\n"); + return NULL; + } + memset(oper, 0, sizeof(*oper)); + + oper->depth = depth; + oper->start = start; + oper->end = end; + oper->last_offset = oper->start; + oper->fd = fd; + oper->reclen = reclen; + oper->rw = rw; + oper->total_ios = (oper->end - oper->start) / oper->reclen; + oper->file_name = file_name; + + return oper; +} + +/* + * does setup on num_ios worth of iocbs, but does not actually + * start any io + */ +int build_oper(struct thread_info *t, struct io_oper *oper, int num_ios, + struct iocb **my_iocbs) +{ + int i; + struct io_unit *io; + + if (oper->started_ios == 0) + gettimeofday(&oper->start_time, NULL); + + if (num_ios == 0) + num_ios = oper->total_ios; + + if ((oper->started_ios + num_ios) > oper->total_ios) + num_ios = oper->total_ios - oper->started_ios; + + for( i = 0 ; i < num_ios ; i++) { + io = build_iocb(t, oper); + if (!io) { + return -1; + } + my_iocbs[i] = &io->iocb; + } + return num_ios; +} + +/* + * runs through the iocbs in the array provided and updates + * counters in the associated oper struct + */ +static void update_iou_counters(struct iocb **my_iocbs, int nr, + struct timeval *tv_now) +{ + struct io_unit *io; + int i; + for (i = 0 ; i < nr ; i++) { + io = (struct io_unit *)(my_iocbs[i]); + io->io_oper->num_pending++; + io->io_oper->started_ios++; + io->io_start_time = *tv_now; /* set time of io_submit */ + } +} + +/* starts some io for a given file, returns zero if all went well */ +int run_built(struct thread_info *t, int num_ios, struct iocb **my_iocbs) +{ + int ret; + struct timeval start_time; + struct timeval stop_time; + +resubmit: + gettimeofday(&start_time, NULL); + ret = io_submit(t->io_ctx, num_ios, my_iocbs); + gettimeofday(&stop_time, NULL); + calc_latency(&start_time, &stop_time, &t->io_submit_latency); + + if (ret != num_ios) { + /* some ios got through */ + if (ret > 0) { + update_iou_counters(my_iocbs, ret, &stop_time); + my_iocbs += ret; + t->num_global_pending += ret; + num_ios -= ret; + } + /* + * we've used all the requests allocated in aio_init, wait and + * retry + */ + if (ret > 0 || ret == -EAGAIN) { + int old_ret = ret; + if ((ret = read_some_events(t) > 0)) { + goto resubmit; + } else { + fprintf(stderr, "ret was %d and now is %d\n", ret, old_ret); + abort(); + } + } + + fprintf(stderr, "ret %d (%s) on io_submit\n", ret, strerror(-ret)); + return -1; + } + update_iou_counters(my_iocbs, ret, &stop_time); + t->num_global_pending += ret; + return 0; +} + +/* + * changes oper->rw to the next in a command sequence, or returns zero + * to say this operation is really, completely done for + */ +static int restart_oper(struct io_oper *oper) { + int new_rw = 0; + if (oper->last_err) + return 0; + + /* this switch falls through */ + switch(oper->rw) { + case WRITE: + if (stages & (1 << READ)) + new_rw = READ; + case READ: + if (!new_rw && stages & (1 << RWRITE)) + new_rw = RWRITE; + case RWRITE: + if (!new_rw && stages & (1 << RREAD)) + new_rw = RREAD; + } + + if (new_rw) { + oper->started_ios = 0; + oper->last_offset = oper->start; + oper->stonewalled = 0; + + /* + * we're restarting an operation with pending requests, so the + * timing info won't be printed by finish_io. Printing it here + */ + if (oper->num_pending) + print_time(oper); + + oper->rw = new_rw; + return 1; + } + return 0; +} + +static int oper_runnable(struct io_oper *oper) { + struct stat buf; + int ret; + + /* first context is always runnable, if started_ios > 0, no need to + * redo the calculations + */ + if (oper->started_ios || oper->start == 0) + return 1; + /* + * only the sequential phases force delays in starting */ + if (oper->rw >= RWRITE) + return 1; + ret = fstat(oper->fd, &buf); + if (ret < 0) { + perror("fstat"); + exit(1); + } + if (S_ISREG(buf.st_mode) && buf.st_size < oper->start) + return 0; + return 1; +} + +/* + * runs through all the io operations on the active list, and starts + * a chunk of io on each. If any io operations are completely finished, + * it either switches them to the next stage or puts them on the + * finished list. + * + * this function stops after max_io_submit iocbs are sent down the + * pipe, even if it has not yet touched all the operations on the + * active list. Any operations that have finished are moved onto + * the finished_opers list. + */ +static int run_active_list(struct thread_info *t, + int io_iter, + int max_io_submit) +{ + struct io_oper *oper; + struct io_oper *built_opers = NULL; + struct iocb **my_iocbs = t->iocbs; + int ret = 0; + int num_built = 0; + + oper = t->active_opers; + while(oper) { + if (!oper_runnable(oper)) { + oper = oper->next; + if (oper == t->active_opers) + break; + continue; + } + ret = build_oper(t, oper, io_iter, my_iocbs); + if (ret >= 0) { + my_iocbs += ret; + num_built += ret; + oper_list_del(oper, &t->active_opers); + oper_list_add(oper, &built_opers); + oper = t->active_opers; + if (num_built + io_iter > max_io_submit) + break; + } else + break; + } + if (num_built) { + ret = run_built(t, num_built, t->iocbs); + if (ret < 0) { + fprintf(stderr, "error %d on run_built\n", ret); + exit(1); + } + while(built_opers) { + oper = built_opers; + oper_list_del(oper, &built_opers); + oper_list_add(oper, &t->active_opers); + if (oper->started_ios == oper->total_ios) { + oper_list_del(oper, &t->active_opers); + oper_list_add(oper, &t->finished_opers); + } + } + } + return 0; +} + +void drop_shm() { + int ret; + struct shmid_ds ds; + if (use_shm != USE_SHM) + return; + + ret = shmctl(shm_id, IPC_RMID, &ds); + if (ret) { + perror("shmctl IPC_RMID"); + } +} + +void aio_setup(io_context_t *io_ctx, int n) +{ + int res = io_queue_init(n, io_ctx); + if (res != 0) { + fprintf(stderr, "io_queue_setup(%d) returned %d (%s)\n", + n, res, strerror(-res)); + exit(3); + } +} + +/* + * allocate io operation and event arrays for a given thread + */ +int setup_ious(struct thread_info *t, + int num_files, int depth, + int reclen, int max_io_submit) { + int i; + size_t bytes = num_files * depth * sizeof(*t->ios); + + t->ios = malloc(bytes); + if (!t->ios) { + fprintf(stderr, "unable to allocate io units\n"); + return -1; + } + memset(t->ios, 0, bytes); + + for (i = 0 ; i < depth * num_files; i++) { + t->ios[i].buf = aligned_buffer; + aligned_buffer += padded_reclen; + t->ios[i].buf_size = reclen; + if (verify) + memset(t->ios[i].buf, 'b', reclen); + else + memset(t->ios[i].buf, 0, reclen); + t->ios[i].next = t->free_ious; + t->free_ious = t->ios + i; + } + if (verify) { + verify_buf = aligned_buffer; + memset(verify_buf, 'b', reclen); + } + + t->iocbs = malloc(sizeof(struct iocb *) * max_io_submit); + if (!t->iocbs) { + fprintf(stderr, "unable to allocate iocbs\n"); + goto free_buffers; + } + + memset(t->iocbs, 0, max_io_submit * sizeof(struct iocb *)); + + t->events = malloc(sizeof(struct io_event) * depth * num_files); + if (!t->events) { + fprintf(stderr, "unable to allocate ram for events\n"); + goto free_buffers; + } + memset(t->events, 0, num_files * sizeof(struct io_event)*depth); + + t->num_global_ios = num_files * depth; + t->num_global_events = t->num_global_ios; + return 0; + +free_buffers: + if (t->ios) + free(t->ios); + if (t->iocbs) + free(t->iocbs); + if (t->events) + free(t->events); + return -1; +} + +/* + * The buffers used for file data are allocated as a single big + * malloc, and then each thread and operation takes a piece and uses + * that for file data. This lets us do a large shm or bigpages alloc + * and without trying to find a special place in each thread to map the + * buffers to + */ +int setup_shared_mem(int num_threads, int num_files, int depth, + int reclen, int max_io_submit) +{ + char *p = NULL; + size_t total_ram; + + padded_reclen = (reclen + page_size_mask) / (page_size_mask+1); + padded_reclen = padded_reclen * (page_size_mask+1); + total_ram = num_files * depth * padded_reclen + num_threads; + if (verify) + total_ram += padded_reclen; + + if (use_shm == USE_MALLOC) { + p = malloc(total_ram + page_size_mask); + } else if (use_shm == USE_SHM) { + shm_id = shmget(IPC_PRIVATE, total_ram, IPC_CREAT | 0700); + if (shm_id < 0) { + perror("shmget"); + drop_shm(); + goto free_buffers; + } + p = shmat(shm_id, (char *)0x50000000, 0); + if ((long)p == -1) { + perror("shmat"); + goto free_buffers; + } + /* won't really be dropped until we shmdt */ + drop_shm(); + } else if (use_shm == USE_SHMFS) { + char mmap_name[16]; /* /dev/shm/ + null + XXXXXX */ + int fd; + + strcpy(mmap_name, "/dev/shm/XXXXXX"); + fd = mkstemp(mmap_name); + if (fd < 0) { + perror("mkstemp"); + goto free_buffers; + } + unlink(mmap_name); + ftruncate(fd, total_ram); + shm_id = fd; + p = mmap((char *)0x50000000, total_ram, + PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + + if (p == MAP_FAILED) { + perror("mmap"); + goto free_buffers; + } + } + if (!p) { + fprintf(stderr, "unable to allocate buffers\n"); + goto free_buffers; + } + unaligned_buffer = p; + p = (char*)((intptr_t) (p + page_size_mask) & ~page_size_mask); + aligned_buffer = p; + return 0; + +free_buffers: + drop_shm(); + if (unaligned_buffer) + free(unaligned_buffer); + return -1; +} + +/* + * runs through all the thread_info structs and calculates a combined + * throughput + */ +void global_thread_throughput(struct thread_info *t, char *this_stage) { + int i; + double runtime = time_since_now(&global_stage_start_time); + double total_mb = 0; + double min_trans = 0; + + for (i = 0 ; i < num_threads ; i++) { + total_mb += global_thread_info[i].stage_mb_trans; + if (!min_trans || t->stage_mb_trans < min_trans) + min_trans = t->stage_mb_trans; + } + if (total_mb) { + fprintf(stderr, "%s throughput (%.2f MB/s) ", this_stage, + total_mb / runtime); + fprintf(stderr, "%.2f MB in %.2fs", total_mb, runtime); + if (stonewall) + fprintf(stderr, " min transfer %.2fMB", min_trans); + fprintf(stderr, "\n"); + } +} + + +/* this is the meat of the state machine. There is a list of + * active operations structs, and as each one finishes the required + * io it is moved to a list of finished operations. Once they have + * all finished whatever stage they were in, they are given the chance + * to restart and pick a different stage (read/write/random read etc) + * + * various timings are printed in between the stages, along with + * thread synchronization if there are more than one threads. + */ +int worker(struct thread_info *t) +{ + struct io_oper *oper; + char *this_stage = NULL; + struct timeval stage_time; + int status = 0; + int iteration = 0; + int cnt; + + aio_setup(&t->io_ctx, 512); + +restart: + if (num_threads > 1) { + pthread_mutex_lock(&stage_mutex); + threads_starting++; + if (threads_starting == num_threads) { + threads_ending = 0; + gettimeofday(&global_stage_start_time, NULL); + pthread_cond_broadcast(&stage_cond); + } + while (threads_starting != num_threads) + pthread_cond_wait(&stage_cond, &stage_mutex); + pthread_mutex_unlock(&stage_mutex); + } + if (t->active_opers) { + this_stage = stage_name(t->active_opers->rw); + gettimeofday(&stage_time, NULL); + t->stage_mb_trans = 0; + } + + cnt = 0; + /* first we send everything through aio */ + while(t->active_opers && (cnt < iterations || iterations == RUN_FOREVER)) { + if (stonewall && threads_ending) { + oper = t->active_opers; + oper->stonewalled = 1; + oper_list_del(oper, &t->active_opers); + oper_list_add(oper, &t->finished_opers); + } else { + run_active_list(t, io_iter, max_io_submit); + } + cnt++; + } + if (latency_stats) + print_latency(t); + + if (completion_latency_stats) + print_completion_latency(t); + + /* then we wait for all the operations to finish */ + oper = t->finished_opers; + do { + if (!oper) + break; + io_oper_wait(t, oper); + oper = oper->next; + } while(oper != t->finished_opers); + + /* then we do an fsync to get the timing for any future operations + * right, and check to see if any of these need to get restarted + */ + oper = t->finished_opers; + while(oper) { + if (fsync_stages) + fsync(oper->fd); + t->stage_mb_trans += oper_mb_trans(oper); + if (restart_oper(oper)) { + oper_list_del(oper, &t->finished_opers); + oper_list_add(oper, &t->active_opers); + oper = t->finished_opers; + continue; + } + oper = oper->next; + if (oper == t->finished_opers) + break; + } + + if (t->stage_mb_trans && t->num_files > 0) { + double seconds = time_since_now(&stage_time); + fprintf(stderr, "thread %d %s totals (%.2f MB/s) %.2f MB in %.2fs\n", + t - global_thread_info, this_stage, t->stage_mb_trans/seconds, + t->stage_mb_trans, seconds); + } + + if (num_threads > 1) { + pthread_mutex_lock(&stage_mutex); + threads_ending++; + if (threads_ending == num_threads) { + threads_starting = 0; + pthread_cond_broadcast(&stage_cond); + global_thread_throughput(t, this_stage); + } + while(threads_ending != num_threads) + pthread_cond_wait(&stage_cond, &stage_mutex); + pthread_mutex_unlock(&stage_mutex); + } + + /* someone got restarted, go back to the beginning */ + if (t->active_opers && (cnt < iterations || iterations == RUN_FOREVER)) { + iteration++; + goto restart; + } + + /* finally, free all the ram */ + while(t->finished_opers) { + oper = t->finished_opers; + oper_list_del(oper, &t->finished_opers); + status = finish_oper(t, oper); + } + + if (t->num_global_pending) { + fprintf(stderr, "global num pending is %d\n", t->num_global_pending); + } + io_queue_release(t->io_ctx); + + return status; +} + +typedef void * (*start_routine)(void *); +int run_workers(struct thread_info *t, int num_threads) +{ + int ret; + int thread_ret; + int i; + + for(i = 0 ; i < num_threads ; i++) { + ret = pthread_create(&t[i].tid, NULL, (start_routine)worker, t + i); + if (ret) { + perror("pthread_create"); + exit(1); + } + } + for(i = 0 ; i < num_threads ; i++) { + ret = pthread_join(t[i].tid, (void *)&thread_ret); + if (ret) { + perror("pthread_join"); + exit(1); + } + } + return 0; +} + +off_t parse_size(char *size_arg, off_t mult) { + char c; + int num; + off_t ret; + c = size_arg[strlen(size_arg) - 1]; + if (c > '9') { + size_arg[strlen(size_arg) - 1] = '\0'; + } + num = atoi(size_arg); + switch(c) { + case 'g': + case 'G': + mult = 1024 * 1024 * 1024; + break; + case 'm': + case 'M': + mult = 1024 * 1024; + break; + case 'k': + case 'K': + mult = 1024; + break; + case 'b': + case 'B': + mult = 1; + break; + } + ret = mult * num; + return ret; +} + +void print_usage(void) { + printf("usage: aio-stress [-s size] [-r size] [-a size] [-d num] [-b num]\n"); + printf(" [-i num] [-t num] [-c num] [-C size] [-nxhOS ]\n"); + printf(" file1 [file2 ...]\n"); + printf("\t-a size in KB at which to align buffers\n"); + printf("\t-b max number of iocbs to give io_submit at once\n"); + printf("\t-c number of io contexts per file\n"); + printf("\t-C offset between contexts, default 2MB\n"); + printf("\t-s size in MB of the test file(s), default 1024MB\n"); + printf("\t-r record size in KB used for each io, default 64KB\n"); + printf("\t-d number of pending aio requests for each file, default 64\n"); + printf("\t-i number of ios per file sent before switching\n\t to the next file, default 8\n"); + printf("\t-I total number of ayncs IOs the program will run, default is run until Cntl-C\n"); + printf("\t-O Use O_DIRECT (not available in 2.4 kernels),\n"); + printf("\t-S Use O_SYNC for writes\n"); + printf("\t-o add an operation to the list: write=0, read=1,\n"); + printf("\t random write=2, random read=3.\n"); + printf("\t repeat -o to specify multiple ops: -o 0 -o 1 etc.\n"); + printf("\t-m shm use ipc shared memory for io buffers instead of malloc\n"); + printf("\t-m shmfs mmap a file in /dev/shm for io buffers\n"); + printf("\t-n no fsyncs between write stage and read stage\n"); + printf("\t-l print io_submit latencies after each stage\n"); + printf("\t-L print io completion latencies after each stage\n"); + printf("\t-t number of threads to run\n"); + printf("\t-u unlink files after completion\n"); + printf("\t-v verification of bytes written\n"); + printf("\t-x turn off thread stonewalling\n"); + printf("\t-h this message\n"); + printf("\n\t the size options (-a -s and -r) allow modifiers -s 400{k,m,g}\n"); + printf("\t translate to 400KB, 400MB and 400GB\n"); + printf("version %s\n", PROG_VERSION); +} + +int main(int ac, char **av) +{ + int rwfd; + int i; + int j; + int c; + + off_t file_size = 1 * 1024 * 1024 * 1024; + int first_stage = WRITE; + struct io_oper *oper; + int status = 0; + int num_files = 0; + int open_fds = 0; + struct thread_info *t; + + page_size_mask = getpagesize() - 1; + + while(1) { + c = getopt(ac, av, "a:b:c:C:m:s:r:d:i:I:o:t:lLnhOSxvu"); + if (c < 0) + break; + + switch(c) { + case 'a': + page_size_mask = parse_size(optarg, 1024); + page_size_mask--; + break; + case 'c': + num_contexts = atoi(optarg); + break; + case 'C': + context_offset = parse_size(optarg, 1024 * 1024); + case 'b': + max_io_submit = atoi(optarg); + break; + case 's': + file_size = parse_size(optarg, 1024 * 1024); + break; + case 'd': + depth = atoi(optarg); + break; + case 'r': + rec_len = parse_size(optarg, 1024); + break; + case 'i': + io_iter = atoi(optarg); + break; + case 'I': + iterations = atoi(optarg); + break; + case 'n': + fsync_stages = 0; + break; + case 'l': + latency_stats = 1; + break; + case 'L': + completion_latency_stats = 1; + break; + case 'm': + if (!strcmp(optarg, "shm")) { + fprintf(stderr, "using ipc shm\n"); + use_shm = USE_SHM; + } else if (!strcmp(optarg, "shmfs")) { + fprintf(stderr, "using /dev/shm for buffers\n"); + use_shm = USE_SHMFS; + } + break; + case 'o': + i = atoi(optarg); + stages |= 1 << i; + fprintf(stderr, "adding stage %s\n", stage_name(i)); + break; + case 'O': + o_direct = O_DIRECT; + break; + case 'S': + o_sync = O_SYNC; + break; + case 't': + num_threads = atoi(optarg); + break; + case 'x': + stonewall = 0; + break; + case 'u': + unlink_files = 1; + break; + case 'v': + verify = 1; + break; + case 'h': + default: + print_usage(); + exit(1); + } + } + + /* + * make sure we don't try to submit more ios than we have allocated + * memory for + */ + if (depth < io_iter) { + io_iter = depth; + fprintf(stderr, "dropping io_iter to %d\n", io_iter); + } + + if (optind >= ac) { + print_usage(); + exit(1); + } + + num_files = ac - optind; + + if (num_threads > (num_files * num_contexts)) { + num_threads = num_files * num_contexts; + fprintf(stderr, "dropping thread count to the number of contexts %d\n", + num_threads); + } + + t = malloc(num_threads * sizeof(*t)); + if (!t) { + perror("malloc"); + exit(1); + } + global_thread_info = t; + + /* by default, allow a huge number of iocbs to be sent towards + * io_submit + */ + if (!max_io_submit) + max_io_submit = num_files * io_iter * num_contexts; + + /* + * make sure we don't try to submit more ios than max_io_submit allows + */ + if (max_io_submit < io_iter) { + io_iter = max_io_submit; + fprintf(stderr, "dropping io_iter to %d\n", io_iter); + } + + if (!stages) { + stages = (1 << WRITE) | (1 << READ) | (1 << RREAD) | (1 << RWRITE); + } else { + for (i = 0 ; i < LAST_STAGE; i++) { + if (stages & (1 << i)) { + first_stage = i; + fprintf(stderr, "starting with %s\n", stage_name(i)); + break; + } + } + } + + if (file_size < num_contexts * context_offset) { + fprintf(stderr, "file size %Lu too small for %d contexts\n", + file_size, num_contexts); + exit(1); + } + + fprintf(stderr, "file size %LuMB, record size %luKB, depth %d, ios per iteration %d\n", file_size / (1024 * 1024), rec_len / 1024, depth, io_iter); + fprintf(stderr, "max io_submit %d, buffer alignment set to %luKB\n", + max_io_submit, (page_size_mask + 1)/1024); + fprintf(stderr, "threads %d files %d contexts %d context offset %LuMB verification %s\n", + num_threads, num_files, num_contexts, + context_offset / (1024 * 1024), verify ? "on" : "off"); + /* open all the files and do any required setup for them */ + for (i = optind ; i < ac ; i++) { + int thread_index; + for (j = 0 ; j < num_contexts ; j++) { + thread_index = open_fds % num_threads; + open_fds++; + + rwfd = open(av[i], O_CREAT | O_RDWR | o_direct | o_sync, 0600); + assert(rwfd != -1); + + oper = create_oper(rwfd, first_stage, j * context_offset, + file_size - j * context_offset, rec_len, + depth, io_iter, av[i]); + if (!oper) { + fprintf(stderr, "error in create_oper\n"); + exit(-1); + } + oper_list_add(oper, &t[thread_index].active_opers); + t[thread_index].num_files++; + } + } + if (setup_shared_mem(num_threads, num_files * num_contexts, + depth, rec_len, max_io_submit)) + { + exit(1); + } + for (i = 0 ; i < num_threads ; i++) { + if (setup_ious(&t[i], t[i].num_files, depth, rec_len, max_io_submit)) + exit(1); + } + if (num_threads > 1){ + printf("Running multi thread version num_threads:%d\n", num_threads); + run_workers(t, num_threads); + } else { + printf("Running single thread version \n"); + status = worker(t); + } + if (unlink_files) { + for (i = optind ; i < ac ; i++) { + printf("Cleaning up file %s \n", av[i]); + unlink(av[i]); + } + } + + if (status) { + exit(1); + } + return status; +} + diff --git a/aiostress/aiostress.py b/aiostress/aiostress.py new file mode 100755 index 000000000..af1092aa4 --- /dev/null +++ b/aiostress/aiostress.py @@ -0,0 +1,45 @@ +# This requires aio headers to build. +# Should work automagically out of deps now. + +# NOTE - this should also have the ability to mount a filesystem, +# run the tests, unmount it, then fsck the filesystem + +import test +from autotest_utils import * + +class aiostress(test.test): + version = 2 + + def initialize(self): + self.job.setup_dep(['libaio']) + ldflags = '-L ' + self.autodir + '/deps/libaio/lib' + cflags = '-I ' + self.autodir + '/deps/libaio/include' + self.gcc_flags = ldflags + ' ' + cflags + + + # ftp://ftp.suse.com/pub/people/mason/utils/aio-stress.c + def setup(self, tarball = None): + print self.srcdir, self.bindir, self.tmpdir + os.mkdir(self.srcdir) + os.chdir(self.srcdir) + system('cp ' + self.bindir+'/aio-stress.c .') + os.chdir(self.srcdir) + self.gcc_flags += ' -Wall -lpthread -laio' + system('gcc ' + self.gcc_flags + ' aio-stress.c -o aio-stress') + + + def execute(self, args = ''): + os.chdir(self.tmpdir) + libs = self.autodir+'/deps/libaio/lib/' + ld_path = prepend_path(libs, environ('LD_LIBRARY_PATH')) + var_ld_path = 'LD_LIBRARY_PATH=' + ld_path + cmd = self.srcdir + '/aio-stress ' + args + ' poo' + system(var_ld_path + ' ' + cmd) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(var_ld_path + ' ' + cmd) + profilers.stop(self) + profilers.report(self) diff --git a/aiostress/control b/aiostress/control new file mode 100644 index 000000000..5e7e8073f --- /dev/null +++ b/aiostress/control @@ -0,0 +1 @@ +job.runtest(None, 'aiostress') diff --git a/bonnie/bonnie++-1.03a.tgz b/bonnie/bonnie++-1.03a.tgz new file mode 100644 index 000000000..65f96ac94 Binary files /dev/null and b/bonnie/bonnie++-1.03a.tgz differ diff --git a/bonnie/bonnie.py b/bonnie/bonnie.py new file mode 100755 index 000000000..a4247f01b --- /dev/null +++ b/bonnie/bonnie.py @@ -0,0 +1,29 @@ +import test, os_dep +from autotest_utils import * + +class bonnie(test.test): + version = 1 + + # http://www.coker.com.au/bonnie++/bonnie++-1.03a.tgz + def setup(self, tarball = 'bonnie++-1.03a.tgz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + os_dep.command('g++') + system('./configure') + system('make') + + def execute(self, iterations = 1, extra_args = '', user = 'root'): + args = '-d ' + self.tmpdir + ' -u ' + user + ' ' + extra_args + + for i in range(1, iterations+1): + system(self.srcdir + '/bonnie++ ' + args) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(self.srcdir + '/bonnie++ ' + args) + profilers.stop(self) + profilers.report(self) diff --git a/bonnie/control b/bonnie/control new file mode 100644 index 000000000..013224e92 --- /dev/null +++ b/bonnie/control @@ -0,0 +1 @@ +job.runtest(None, 'bonnie') diff --git a/cpu_hotplug/control b/cpu_hotplug/control new file mode 100644 index 000000000..e034d9e3b --- /dev/null +++ b/cpu_hotplug/control @@ -0,0 +1 @@ +job.runtest(None, 'cpu_hotplug') diff --git a/cpu_hotplug/cpu_hotplug.py b/cpu_hotplug/cpu_hotplug.py new file mode 100644 index 000000000..e3815e474 --- /dev/null +++ b/cpu_hotplug/cpu_hotplug.py @@ -0,0 +1,44 @@ +import test, time +from autotest_utils import * + +class cpu_hotplug(test.test): + version = 1 + + # http://developer.osdl.org/dev/hotplug/tests/lhcs_regression-1.4.tgz + def setup(self, tarball = 'lhcs_regression-1.4.tgz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + + def execute(self): + # Check if the kernel supports cpu hotplug + config = running_config() + if config and not grep('CONFIG_HOTPLUG_CPU=y', config): + raise TestError('Kernel does not support cpu hotplug') + + # Check cpu nums, if equals 1, quit. + if count_cpus() == 1: + print 'Just only single cpu online, quiting...' + sys.exit() + + # Have a simple and quick check first, FIX me please. + system('dmesg -c > /dev/null') + for cpu in cpu_online_map(): + if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu): + system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1) + system('dmesg -c') + time.sleep(3) + system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1) + system('dmesg -c') + time.sleep(3) + + # Begin this cpu hotplug test big guru. + os.chdir(self.srcdir) + system('./runtests.sh') + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system('./runtests.sh') + profilers.stop(self) + profilers.report(self) diff --git a/cpu_hotplug/lhcs_regression-1.4.tgz b/cpu_hotplug/lhcs_regression-1.4.tgz new file mode 100644 index 000000000..10d0c9529 Binary files /dev/null and b/cpu_hotplug/lhcs_regression-1.4.tgz differ diff --git a/cyclictest/README b/cyclictest/README new file mode 100644 index 000000000..0db88f4de --- /dev/null +++ b/cyclictest/README @@ -0,0 +1,32 @@ +cyclictest -t 5 -p 80 -n -q -l 10 + +runs a test with 5 threads, stops after 10 loops and outputs: + +T: 0 ( 2215) P:80 I: 1000 C: 10 Min: 31 Act: 33 Avg: 33 Max: 43 +T: 1 ( 2216) P:79 I: 1500 C: 10 Min: 22 Act: 22 Avg: 36 Max: 61 +T: 2 ( 2217) P:78 I: 2000 C: 10 Min: 27 Act: 33 Avg: 36 Max: 50 +T: 3 ( 2218) P:77 I: 2500 C: 10 Min: 23 Act: 37 Avg: 38 Max: 59 +T: 4 ( 2219) P:76 I: 3000 C: 10 Min: 26 Act: 48 Avg: 36 Max: 48 + +All numbers in micro seconds. You get the minimium, maximum and average latency for each thread. + +I use this for automated regression testing. + +The -v option outputs: + + 0: 0: 0 + 0: 1: 45 + 0: 2: 41 + 0: 3: 31 + 0: 4: 31 + 0: 5: 34 + 1: 0: 0 + 1: 1: 29 + 1: 2: 33 + 1: 3: 33 +... +where the first column is the thread, the second column is the loop +counter and the third is the latency value for this step. You can use +this for your own statistics or for latency distribution plots. + + tglx diff --git a/cyclictest/control b/cyclictest/control new file mode 100644 index 000000000..184715e2d --- /dev/null +++ b/cyclictest/control @@ -0,0 +1 @@ +job.runtest(None, 'cyclictest') diff --git a/cyclictest/cyclictest-v0.11.tar.bz2 b/cyclictest/cyclictest-v0.11.tar.bz2 new file mode 100644 index 000000000..5fc8e29c0 Binary files /dev/null and b/cyclictest/cyclictest-v0.11.tar.bz2 differ diff --git a/cyclictest/cyclictest.py b/cyclictest/cyclictest.py new file mode 100755 index 000000000..0f3206ab0 --- /dev/null +++ b/cyclictest/cyclictest.py @@ -0,0 +1,17 @@ +import test +from autotest_utils import * + +class cyclictest(test.test): + version = 1 + + # http://tglx.de/projects/misc/cyclictest/cyclictest-v0.11.tar.bz2 + + def setup(self, tarball = 'cyclictest-v0.11.tar.bz2'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + + def execute(self, args = '-t 10 -l 100000'): + system(self.srcdir + '/cyclictest ' + args) diff --git a/cyclictest/help b/cyclictest/help new file mode 100644 index 000000000..676b0abc8 --- /dev/null +++ b/cyclictest/help @@ -0,0 +1,172 @@ +Without parameters cyclictest creates one thread with a 1ms interval timer. + +cyclictest -h provides help text for the various options +-b USEC --breaktrace=USEC send break trace command when latency > USEC +-c CLOCK --clock=CLOCK select clock + +0 = CLOCK_MONOTONIC (default) +1 = CLOCK_REALTIME +-d DIST --distance=DIST distance of thread intervals in us default=500 +-i INTV --interval=INTV base interval of thread in us default=1000 +-l LOOPS --loops=LOOPS number of loops: default=0(endless) +-n --nanosleep use clock_nanosleep +-p PRIO --prio=PRIO priority of highest prio thread +-q --quiet print only a summary on exit +-r --relative use relative timer instead of absolute +-s --system use sys_nanosleep and sys_setitimer +-t NUM --threads=NUM number of threads: default=1 +-v --verbose output values on stdout for statistics +format: n:c:v n=tasknum c=count v=value in us + +-b is a debugging option to control the latency tracer in the realtime +preemption patch. + +It is useful to track down unexpected large latencies on a system. This option +does only work with + + * CONFIG_PREEMPT_RT=y + * CONFIG_LATENCY_TIMING=y + * CONFIG_LATENCY_TRACE=y + +kernel configuration options enabled. The USEC parameter to the -b option +defines a maximum latency value, which is compared against the actual latencies +of the test. Once the measured latency is higher than the given maximum, +the kernel tracer and cyclictest is stopped. The trace can be read from +/proc/latency_trace + +mybox# cat /proc/latency_trace >trace.log + +Please be aware that the tracer adds significant overhead to the kernel, +so the latencies will be much higher than on a kernel with latency tracing +disabled. + +-c CLOCK selects the clock, which is used + + * 0 selects CLOCK_MONOTONIC, which is the monotonic increasing system time. + This is the default selection + * 1 selects CLOCK_REALTIME, which is the time of day time. + +CLOCK_REALTIME can be set by settimeofday, while CLOCK_MONOTONIC can not be +modified by the user. + +This option has no influence when the -s option is given. + +-d DIST set the distance of thread intervals in microseconds (default is 500us) + +When cylictest is called with the -t option and more than one thread is created, +then this distance value is added to the interval of the threads. + +Interval(thread N) = Interval(thread N-1) + DIST + +-i INTV set the base interval of the thread(s) in microseconds (default is 1000us) + +This sets the interval of the first thread. See also -d. + +-l LOOPS set the number of loops (default = 0(endless)) + +This option is useful for automated tests with a given number of test cycles. +Cyclictest is stopped once the number of timer intervals has been reached. + +-n use clock_nanosleep instead of posix interval timers + +Setting this option runs the tests with clock_nanosleep instead of posix +interval timers. + +-p PRIO set the priority of the first thread + +The given priority is set to the first test thread. Each further thread gets +a lower priority: + +Priority(Thread N) = Priority(Thread N-1) + +-q run the tests quiet and print only a summary on exit + +Useful for automated tests, where only the summary output needs to be captured + +-r use relative timers instead of absolute + +The default behaviour of the tests is to use absolute timers. This option is +there for completeness and should not be used for reproducible tests. + +-s use sys_nanosleep and sys_setitimer instead of posix timers + +Note, that -s can only be used with one thread because itimers are per process +and not per thread. -s in combination with -n uses the nanosleep syscall +and is not restricted to one thread + +-t NUM set the number of test threads (default is 1) + +Create NUM test threads. See -d, -i and -p for further information. + +-v output values on stdout for statistics + +This option is used to gather statistical information about the latency +distribution. The output is sent to stdout. The output format is + +n:c:v + +where n=task number c=count v=latency value in us + +Use this option in combination with -l + + +tglx's reference machine + +All tests have been run on a Pentium III 400MHz based PC. + +The tables show comparisons of vanilla Linux 2.6.16, Linux-2.6.16-hrt5 +and Linux-2.6.16-rt12. The tests for intervals less than the jiffy resolution +have not been run on vanilla Linux 2.6.16. The test thread runs in all cases +with SCHED_FIFO and priority 80. All numbers are in microseconds. + +* Test case: clock_nanosleep(TIME_ABSTIME), Interval 10000 microseconds,. 10000 loops, no load. + +Kernel min max avg +2.6.16 24 4043 1989 +2.6.16-hrt5 12 94 20 +2.6.16-rt12 6 40 10 + +* Test case: clock_nanosleep(TIME_ABSTIME), Interval 10000 micro seconds,. 10000 loops, 100% load. + +Kernel min max avg +2.6.16 55 4280 2198 +2.6.16-hrt5 11 458 55 +2.6.16-rt12 6 67 29 + +* Test case: POSIX interval timer, Interval 10000 micro seconds,. 10000 loops, no load. + +Kernel min max avg +2.6.16 21 4073 2098 +2.6.16-hrt5 22 120 35 +2.6.16-rt12 20 60 31 + +* Test case: POSIX interval timer, Interval 10000 micro seconds,. 10000 loops, 100% load. + +Kernel min max avg +2.6.16 82 4271 2089 +2.6.16-hrt5 31 458 53 +2.6.16-rt12 21 70 35 + +* Test case: clock_nanosleep(TIME_ABSTIME), Interval 500 micro seconds,. 100000 loops, no load. + +Kernel min max avg +2.6.16-hrt5 5 108 24 +2.6.16-rt12 5 48 7 + +* Test case: clock_nanosleep(TIME_ABSTIME), Interval 500 micro seconds,. 100000 loops, 100% load. + +Kernel min max avg +2.6.16-hrt5 9 684 56 +2.6.16-rt12 10 60 22 + +* Test case: POSIX interval timer, Interval 500 micro seconds,. 100000 loops, no load. + +Kernel min max avg +2.6.16-hrt5 8 119 22 +2.6.16-rt12 12 78 16 + +* Test case: POSIX interval timer, Interval 500 micro seconds,. 100000 loops, 100% load. + +Kernel min max avg +2.6.16-hrt5 16 489 58 +2.6.16-rt12 12 95 29 diff --git a/dbench/control b/dbench/control new file mode 100644 index 000000000..fa84bf053 --- /dev/null +++ b/dbench/control @@ -0,0 +1 @@ +job.runtest(None, 'dbench') diff --git a/dbench/dbench-3.04.tar.gz b/dbench/dbench-3.04.tar.gz new file mode 100644 index 000000000..c0bb2e21e Binary files /dev/null and b/dbench/dbench-3.04.tar.gz differ diff --git a/dbench/dbench.py b/dbench/dbench.py new file mode 100755 index 000000000..f5a0ef848 --- /dev/null +++ b/dbench/dbench.py @@ -0,0 +1,28 @@ +import test +from autotest_utils import * + +class dbench(test.test): + version = 1 + + # http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz + def setup(self, tarball = 'dbench-3.04.tar.gz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('./configure') + system('make') + + def execute(self, iterations = 1, nprocs = count_cpus(), args = ''): + for i in range(1, iterations+1): + args = args + ' -c '+self.srcdir+'/client.txt' + args += ' %s' % nprocs + system(self.srcdir + '/dbench ' + args) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(self.srcdir + '/dbench ' + args) + profilers.stop(self) + profilers.report(self) diff --git a/dbt2/control b/dbt2/control new file mode 100644 index 000000000..3982e72cb --- /dev/null +++ b/dbt2/control @@ -0,0 +1,3 @@ +job.runtest('pgsql', 'dbt2', 'pgsql', '-w 1 -c 20 -d 1800 -s 100 -n -z "autotest pgsql"') +job.runtest('pgpool', 'dbt2', 'pgpool', '-w 1 -c 20 -d 1800 -s 100 -n -z "autotest pgpool"') +job.runtest('mysql', 'dbt2', 'mysql', '-w 1 -c 20 -d 1800 -s 100 -n -z "autotest mysql"') diff --git a/dbt2/dbt2.py b/dbt2/dbt2.py new file mode 100644 index 000000000..5c970c110 --- /dev/null +++ b/dbt2/dbt2.py @@ -0,0 +1,71 @@ +import test +from autotest_utils import * + +# Dbt-2 is a fair-use implementation of the TPC-C benchmark. The test is +# currently hardcoded to use PostgreSQL but the kit also supports MySQL. + +class dbt2(test.test): + version = 2 + + # http://osdn.dl.sourceforge.net/sourceforge/osdldbt/dbt2-0.39.tar.gz + def setup(self, tarball = 'dbt2-0.39.tar.bz2'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + self.job.setup_dep(['pgsql', 'pgpool', 'mysql']) + + # + # Extract one copy of the kit for MySQL. + # + system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.mysql') + os.chdir(self.srcdir + '.mysql') + system('./configure --with-mysql=%s/deps/mysql/mysql' \ + % self.autodir) + system('make') + + # + # Extract one copy of the kit for PostgreSQL. + # + system('cp -pR ' + self.srcdir + ' ' + self.srcdir + '.pgsql') + os.chdir(self.srcdir + '.pgsql') + system('./configure --with-postgresql=%s/deps/pgsql/pgsql' \ + % self.autodir) + system('make') + + # Create symlinks to autotest's results directory from dbt-2's + # preferred results directory to self.resultsdir + system('ln -s %s %s' % (self.resultsdir, \ + self.srcdir + '.mysql/scripts/output')) + system('ln -s %s %s' % (self.resultsdir, \ + self.srcdir + '.pgsql/scripts/output')) + + def execute(self, db_type, args = ''): + logfile = self.resultsdir + '/dbt2.log' + + if (db_type == "mysql"): + self.execute_mysql(args) + elif (db_type == "pgpool"): + self.execute_pgpool(args) + elif (db_type == "pgsql"): + self.execute_pgsql(args) + + def execute_mysql(self, args = ''): + args = args + system(self.srcdir + '.mysql/scripts/mysql/build_db.sh -g -w 1') + system(self.srcdir + '.mysql/scripts/run_workload.sh ' + args) + + def execute_pgpool(self, args = ''): + system('%s/deps/pgpool/pgpool/bin/pgpool -f %s/../pgpool.conf' \ + % (self.autodir, self.srcdir)) + self.execute_pgsql(args) + system('%s/deps/pgpool/pgpool/bin/pgpool stop' % self.autodir) + + + def execute_pgsql(self, args = ''): + system(self.srcdir + '.pgsql/scripts/pgsql/build_db.sh -g -w 1') + system(self.srcdir + '.pgsql/scripts/run_workload.sh ' + args) + # + # Clean up by dropping the database after the test. + # + system(self.srcdir + '.pgsql/scripts/pgsql/start_db.sh') + system(self.srcdir + '.pgsql/scripts/pgsql/drop_db.sh') + system(self.srcdir + '.pgsql/scripts/pgsql/stop_db.sh') diff --git a/dbt2/pgpool.conf b/dbt2/pgpool.conf new file mode 100644 index 000000000..d5b2f07c4 --- /dev/null +++ b/dbt2/pgpool.conf @@ -0,0 +1,135 @@ +# +# pgpool configuration file sample +# $Header: /cvsroot/pgpool/pgpool-II/pgpool.conf.sample,v 1.1.1.1 2006/09/08 03:36:04 t-ishii Exp $ + +# Host name or IP address to listen on: '*' for all, '' for no TCP/IP +# connections +listen_addresses = 'localhost' + +# Port number for pgpool +port = 9999 + +# Port number for pgpool communication manager +pcp_port = 9898 + +# Unix domain socket path. (The Debian package defaults to +# /var/run/postgresql.) +socket_dir = '/tmp' + +# Unix domain socket path for pgpool communication manager. +# (Debian package default to /var/run/postgresql) +pcp_socket_dir = '/tmp' + +# Unix domain socket path for the backend. Debian package default to /var/run/postgresql! +backend_socket_dir = '/tmp' + +# pgpool communication manager timeout. 0 means no timeout, but strongly not recommended! +pcp_timeout = 10 + +# number of pre-forked child process +num_init_children = 32 + +# Number of connection pools allowed for a child process +max_pool = 4 + +# If idle for this many seconds, child exits. 0 means no timeout. +child_life_time = 300 + +# If idle for this many seconds, connection to PostgreSQL closes. +# 0 means no timeout. +connection_life_time = 0 + +# If child_max_connections connections were received, child exits. +# 0 means no exit. +child_max_connections = 0 + +# Logging directory +logdir = '/tmp' + +# Replication mode +#replication_mode = false + +# Set this to true if you want to avoid deadlock situations when +# replication is enabled. There will, however, be a noticable performance +# degration. A workaround is to set this to false and insert a /*STRICT*/ +# comment at the beginning of the SQL command. +#replication_strict = true + +# When replication_strict is set to false, there will be a chance for +# deadlocks. Set this to nonzero (in milliseconds) to detect this +# situation and resolve the deadlock by aborting current session. +#replication_timeout = 5000 + +# Load balancing mode, i.e., all SELECTs except in a transaction block +# are load balanced. This is ignored if replication_mode is false. +#load_balance_mode = false + +# if there's a data mismatch between master and secondary +# start degenration to stop replication mode +#replication_stop_on_mismatch = false + +# Semicolon separated list of queries to be issued at the end of a session +reset_query_list = 'ABORT; RESET ALL; SET SESSION AUTHORIZATION DEFAULT' + +# If true print time stamp on each log line. +print_timestamp = true + +# If true, operate in master/slave mode. +#master_slave_mode = false + +# If true, cache connection pool. +connection_cache = true + +# Health check timeout. 0 means no timeout. +health_check_timeout = 20 + +# Health check period. 0 means no health check. +health_check_period = 0 + +# Health check user +health_check_user = 'nobody' + +# If true, automatically lock table with INSERT statements to keep SERIAL +# data consistency. An /*INSERT LOCK*/ comment has the same effect. A +# /NO INSERT LOCK*/ comment disables the effect. +insert_lock = false + +# If true, ignore leading white spaces of each query while pgpool judges +# whether the query is a SELECT so that it can be load balanced. This +# is useful for certain APIs such as DBI/DBD which is known to adding an +# extra leading white space. +ignore_leading_white_space = false + +# If true, print all statements to the log. Like the log_statement option +# to PostgreSQL, this allows for observing queries without engaging in full +# debugging. +log_statement = true + +# if non 0, run in parallel query mode +#parallel_mode = false + +# if non 0, use query cache +#enable_query_cache = false + +#set pgpool2 hostname +#pgpool2_hostname = '' + +# system DB info +#system_db_hostname = 'localhost' +#system_db_port = 5432 +#system_db_dbname = 'pgpool' +#system_db_schema = 'pgpool_catalog' +#system_db_user = 'pgpool' +#system_db_password = '' + +# backend_hostname, backend_port, backend_weight +# here are examples +backend_hostname0 = 'localhost' +backend_port0 = 5432 +backend_weight0 = 1 +#backend_hostname0 = 'host1' +#backend_port0 = 5432 +#backend_weight0 = 1 +#backend_hostname1 = 'host2' +#backend_port1 = 5433 +#backend_weight1 = 1 diff --git a/fio/control b/fio/control new file mode 100644 index 000000000..8610eb4c3 --- /dev/null +++ b/fio/control @@ -0,0 +1 @@ +job.runtest(None, 'fio') diff --git a/fio/fio-1.6.tar.gz b/fio/fio-1.6.tar.gz new file mode 100644 index 000000000..9045ee0d6 Binary files /dev/null and b/fio/fio-1.6.tar.gz differ diff --git a/fio/fio-mixed.job b/fio/fio-mixed.job new file mode 100644 index 000000000..93ddfc981 --- /dev/null +++ b/fio/fio-mixed.job @@ -0,0 +1,40 @@ +; fio-mixed.job for autotest + +[global] +name=fio-sync +;directory=tmpfiles +rw=randrw +rwmixread=67 +rwmixwrite=33 +bsrange=16K-256K +direct=0 +end_fsync=1 +verify=crc32 +;ioscheduler=x +numjobs=4 + +[file1] +size=100M +ioengine=sync +mem=malloc + +[file2] +stonewall +size=100M +ioengine=aio +mem=shm +iodepth=4 + +[file3] +stonewall +size=100M +ioengine=mmap +mem=mmap +direct=1 + +[file4] +stonewall +size=100M +ioengine=splice +mem=malloc +direct=1 diff --git a/fio/fio.diff b/fio/fio.diff new file mode 100644 index 000000000..30fa1dfda --- /dev/null +++ b/fio/fio.diff @@ -0,0 +1,87 @@ +--- src/Makefile.old 2006-09-26 23:01:44.000000000 -0700 ++++ src/Makefile 2006-09-26 23:02:35.000000000 -0700 +@@ -1,12 +1,12 @@ + CC = gcc +-CFLAGS = -Wall -O2 -g -D_GNU_SOURCE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 ++CFLAGS += -Wall -O2 -g -D_GNU_SOURCE -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 + PROGS = fio + SCRIPTS = fio_generate_plots + + all: depend $(PROGS) $(SCRIPTS) + + fio: fio.o ioengines.o init.o stat.o log.o time.o md5.o crc32.o +- $(CC) $(CFLAGS) -o $@ $(filter %.o,$^) -lpthread -laio -lm -lrt ++ $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(filter %.o,$^) -lpthread -laio -lm -lrt + + clean: + -rm -f *.o .depend cscope.out $(PROGS) +diff -uprN trunk/tests/fio/2.6.18+kernel-headers.patch autotest/tests/fio/2.6.18+kernel-headers.patch +--- trunk/tests/fio/2.6.18+kernel-headers.patch 1970-01-01 01:00:00.000000000 +0100 ++++ autotest/tests/fio/2.6.18+kernel-headers.patch 2006-10-05 14:06:19.000000000 +0200 +@@ -0,0 +1,64 @@ ++If you have a new kernel headers (2.6.18+), you will need this patch ++ ++From: Jens Axboe ++ ++diff --git a/os-linux.h b/os-linux.h ++index fd5356b..4c54c8c 100644 ++--- a/os-linux.h +++++ b/os-linux.h ++@@ -5,6 +5,7 @@ #include ++ #include ++ #include ++ #include +++#include ++ #include ++ ++ #define FIO_HAVE_LIBAIO ++@@ -39,33 +40,35 @@ static inline int ioprio_set(int which, ++ return syscall(__NR_ioprio_set, which, who, ioprio); ++ } ++ ++-static _syscall6(int, sys_splice, int, fdin, loff_t *, off_in, int, fdout, loff_t *, off_out, size_t, len, unsigned int, flags); ++-static _syscall4(int, sys_vmsplice, int, fd, const struct iovec *, iov, unsigned long, nr_segs, unsigned int, flags); ++-static _syscall4(int, sys_tee, int, fdin, int, fdout, size_t, len, unsigned int, flags); +++/* +++ * Just check for SPLICE_F_MOVE, if that isn't there, assume the others +++ * aren't either. +++ */ +++#ifndef SPLICE_F_MOVE +++#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ +++#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ +++ /* we may still block on the fd we splice */ +++ /* from/to, of course */ +++#define SPLICE_F_MORE (0x04) /* expect more data */ +++#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */ ++ ++ static inline int splice(int fdin, loff_t *off_in, int fdout, loff_t *off_out, ++ size_t len, unsigned long flags) ++ { ++- return sys_splice(fdin, off_in, fdout, off_out, len, flags); +++ return syscall(__NR_sys_splice, fdin, off_in, fdout, off_out, len, flags); ++ } ++ ++ static inline int tee(int fdin, int fdout, size_t len, unsigned int flags) ++ { ++- return sys_tee(fdin, fdout, len, flags); +++ return syscall(__NR_sys_tee, fdin, fdout, len, flags); ++ } ++ ++ static inline int vmsplice(int fd, const struct iovec *iov, ++ unsigned long nr_segs, unsigned int flags) ++ { ++- return sys_vmsplice(fd, iov, nr_segs, flags); +++ return syscall(__NR_sys_vmsplice, fd, iov, nr_segs, flags); ++ } ++- ++-#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ ++-#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ ++- /* we may still block on the fd we splice */ ++- /* from/to, of course */ ++-#define SPLICE_F_MORE (0x04) /* expect more data */ ++-#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */ +++#endif ++ ++ #define SPLICE_DEF_SIZE (64*1024) + + diff --git a/fio/fio.py b/fio/fio.py new file mode 100644 index 000000000..f0bc41128 --- /dev/null +++ b/fio/fio.py @@ -0,0 +1,36 @@ +import test +from autotest_utils import * + +class fio(test.test): + version = 1 + + # http://brick.kernel.dk/snaps/fio-1.6.tar.gz + def setup(self, tarball = 'fio-1.6.tar.gz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + + self.job.setup_dep(['libaio']) + ldflags = '-L' + self.autodir + '/deps/libaio/lib' + cflags = '-I' + self.autodir + '/deps/libaio/include' + var_ldflags = 'LDFLAGS="' + ldflags + '"' + var_cflags = 'CFLAGS="' + cflags + '"' + + os.chdir(self.srcdir) + system('patch -p1 < ../fio.diff') + system('%s %s make' % (var_ldflags, var_cflags)) + + def execute(self, args = '', user = 'root'): + os.chdir(self.srcdir) + ##vars = 'TMPDIR=\"%s\" RESULTDIR=\"%s\"' % (self.tmpdir, self.resultsdir) + vars = 'LD_LIBRARY_PATH="' + self.autodir + '/deps/libaio/lib"' + ##args = '-m -o ' + self.resultsdir + '/fio-tio.log ' + self.srcdir + '/examples/tiobench-example'; + args = '-m -o ' + self.resultsdir + '/fio-mixed.log ' + self.bindir + '/fio-mixed.job'; + system(vars + ' ./fio ' + args) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(vars + ' ./fio ' + args) + profilers.stop(self) + profilers.report(self) diff --git a/fs_mark/control b/fs_mark/control new file mode 100644 index 000000000..f776a6bbd --- /dev/null +++ b/fs_mark/control @@ -0,0 +1 @@ +job.runtest(None, 'fs_mark', '/mnt') diff --git a/fs_mark/fs_mark-3.2.tgz b/fs_mark/fs_mark-3.2.tgz new file mode 100644 index 000000000..aea8a94cb Binary files /dev/null and b/fs_mark/fs_mark-3.2.tgz differ diff --git a/fs_mark/fs_mark.py b/fs_mark/fs_mark.py new file mode 100644 index 000000000..29fb419cb --- /dev/null +++ b/fs_mark/fs_mark.py @@ -0,0 +1,29 @@ +import test +from autotest_utils import * + +class fs_mark(test.test): + version = 1 + + # http://developer.osdl.org/dev/doubt/fs_mark/archive/fs_mark-3.2.tgz + def setup(self, tarball = 'fs_mark-3.2.tgz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + + def execute(self, dir, iterations = 2, args = None): + os.chdir(self.srcdir) + if not args: + # Just provide a sample run parameters + args = '-s 10240 -n 1000' + for i in range(1, iterations+1): + system('./fs_mark -d %s %s' %(dir, args)) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system('./fs_mark -d %s %s' %(dir, args)) + profilers.stop(self) + profilers.report(self) diff --git a/fsfuzzer/control b/fsfuzzer/control new file mode 100644 index 000000000..896f14be9 --- /dev/null +++ b/fsfuzzer/control @@ -0,0 +1 @@ +job.runtest(None, 'fsfuzzer') diff --git a/fsfuzzer/fsfuzzer-0.6.tar.gz b/fsfuzzer/fsfuzzer-0.6.tar.gz new file mode 100644 index 000000000..0de3c1061 Binary files /dev/null and b/fsfuzzer/fsfuzzer-0.6.tar.gz differ diff --git a/fsfuzzer/fsfuzzer.py b/fsfuzzer/fsfuzzer.py new file mode 100755 index 000000000..f996b2c53 --- /dev/null +++ b/fsfuzzer/fsfuzzer.py @@ -0,0 +1,26 @@ +import test +from autotest_utils import * + +class fsfuzzer(test.test): + version = 1 + + # http://people.redhat.com/sgrubb/files/fsfuzzer-0.6.tar.gz + def setup(self, tarball = 'fsfuzzer-0.6.tar.gz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + + def execute(self, iterations = 1, fstype = 'iso9660'): + for i in range(1, iterations+1): + args = fstype + ' 1' + system(self.srcdir + '/run_test ' + args) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(self.srcdir + '/run_test ' + args) + profilers.stop(self) + profilers.report(self) diff --git a/fsx/control b/fsx/control new file mode 100644 index 000000000..153d8a6ea --- /dev/null +++ b/fsx/control @@ -0,0 +1 @@ +job.runtest(None, 'fsx') diff --git a/fsx/ext3-tools.tar.gz b/fsx/ext3-tools.tar.gz new file mode 100644 index 000000000..db48be5ae Binary files /dev/null and b/fsx/ext3-tools.tar.gz differ diff --git a/fsx/fsx-linux.diff b/fsx/fsx-linux.diff new file mode 100644 index 000000000..ab708f3a2 --- /dev/null +++ b/fsx/fsx-linux.diff @@ -0,0 +1,54 @@ +--- src/fsx-linux.c 2006-10-18 04:18:14.000000000 -0400 ++++ src.new/fsx-linux.c 2006-10-18 04:16:05.000000000 -0400 +@@ -10,6 +10,8 @@ + * + * Small changes to work under Linux -- davej@suse.de + * ++ * Minor fixes to PAGE_SIZE handling -- Suzuki . ++ * + */ + + #undef _XOPEN_SOURCE +@@ -74,7 +76,7 @@ int logcount = 0; /* total ops */ + #define OP_SKIPPED 7 + + #ifndef PAGE_SIZE +-#define PAGE_SIZE 4096 ++#define PAGE_SIZE pagesize + #endif + #define PAGE_MASK (PAGE_SIZE - 1) + +@@ -129,6 +131,7 @@ int aio_rw(int rw, int fd, char *buf, un + FILE * fsxlogf = NULL; + int badoff = -1; + int closeopen = 0; ++int pagesize = 0; + + static void *round_up(void *ptr, unsigned long align, unsigned long offset) + { +@@ -493,7 +496,7 @@ domapread(unsigned offset, unsigned size + offset, offset + size - 1, size); + + pg_offset = offset & PAGE_MASK; +- map_size = pg_offset + size; ++ map_size = (pg_offset + size + PAGE_MASK) & ~PAGE_MASK; + + #ifdef linux + if ((p = (char *)mmap(0, map_size, PROT_READ, MAP_SHARED, fd, +@@ -638,7 +641,7 @@ domapwrite(unsigned offset, unsigned siz + } + } + pg_offset = offset & PAGE_MASK; +- map_size = pg_offset + size; ++ map_size = (pg_offset + size + PAGE_MASK) & ~PAGE_MASK; + + if ((p = (char *)mmap(0, map_size, PROT_READ | PROT_WRITE, + MAP_FILE | MAP_SHARED, fd, +@@ -1106,6 +1109,7 @@ main(int argc, char **argv) + if (argc != 1) + usage(); + fname = argv[0]; ++ pagesize = getpagesize(); + + signal(SIGHUP, cleanup); + signal(SIGINT, cleanup); diff --git a/fsx/fsx.py b/fsx/fsx.py new file mode 100755 index 000000000..70cb50813 --- /dev/null +++ b/fsx/fsx.py @@ -0,0 +1,47 @@ +# This requires aio headers to build. +# Should work automagically out of deps now. + +# NOTE - this should also have the ability to mount a filesystem, +# run the tests, unmount it, then fsck the filesystem + +import test +from autotest_utils import * + +class fsx(test.test): + version = 3 + + # http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz + def setup(self, tarball = 'ext3-tools.tar.gz'): + self.tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(self.tarball, self.srcdir) + + self.job.setup_dep(['libaio']) + ldflags = '-L' + self.autodir + '/deps/libaio/lib' + cflags = '-I' + self.autodir + '/deps/libaio/include' + var_ldflags = 'LDFLAGS="' + ldflags + '"' + var_cflags = 'CFLAGS="' + cflags + '"' + self.make_flags = var_ldflags + ' ' + var_cflags + + os.chdir(self.srcdir) + system('patch -p1 < ../fsx-linux.diff') + system(self.make_flags + ' make fsx-linux') + + + def execute(self, testdir = None, repeat = '100000'): + args = '-N ' + repeat + if not testdir: + testdir = self.tmpdir + os.chdir(testdir) + libs = self.autodir+'/deps/libaio/lib/' + ld_path = prepend_path(libs, environ('LD_LIBRARY_PATH')) + var_ld_path = 'LD_LIBRARY_PATH=' + ld_path + cmd = self.srcdir + '/fsx-linux ' + args + ' poo' + system(var_ld_path + ' ' + cmd) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(var_ld_path + ' ' + cmd) + profilers.stop(self) + profilers.report(self) diff --git a/interbench/control b/interbench/control new file mode 100644 index 000000000..1b77dfd0b --- /dev/null +++ b/interbench/control @@ -0,0 +1 @@ +job.runtest(None, 'interbench') diff --git a/interbench/interbench-0.30.tar.bz2 b/interbench/interbench-0.30.tar.bz2 new file mode 100644 index 000000000..275d99bab Binary files /dev/null and b/interbench/interbench-0.30.tar.bz2 differ diff --git a/interbench/interbench.py b/interbench/interbench.py new file mode 100644 index 000000000..bcad9b5f0 --- /dev/null +++ b/interbench/interbench.py @@ -0,0 +1,30 @@ +import test +from autotest_utils import * + +class interbench(test.test): + version = 1 + + # http://www.kernel.org/pub/linux/kernel/people/ck/apps/interbench/interbench-0.30.tar.bz2 + def setup(self, tarball = 'interbench-0.30.tar.bz2'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + + def execute(self, iterations = 1, args = ''): + os.chdir(self.tmpdir) + args += " -c" + + for i in range(1, iterations+1): + system("%s/interbench -m 'run #%s' %s" % \ + (self.srcdir, i, args)) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system("%s/interbench -m 'profile run' %s" % \ + (self.srcdir, args)) + profilers.stop(self) + profilers.report(self) diff --git a/iozone/control b/iozone/control new file mode 100644 index 000000000..c84b1b0b0 --- /dev/null +++ b/iozone/control @@ -0,0 +1 @@ +job.runtest(None, 'iozone','/mnt') diff --git a/iozone/iozone.py b/iozone/iozone.py new file mode 100644 index 000000000..1f5f8bc13 --- /dev/null +++ b/iozone/iozone.py @@ -0,0 +1,37 @@ +#!/usr/bin/python + +import test +from autotest_utils import * + +class iozone(test.test): + version = 1 + + #http://www.iozone.org/src/current/iozone3_263.tar + def setup(self, tarball = 'iozone3_263.tar'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(os.path.join(self.srcdir, 'src/current')) + + arch = get_current_kernel_arch() + if (arch == 'ppc'): + system('make linux-powerpc') + elif (arch == 'ppc64'): + system('make linux-powerpc64') + elif (arch == 'x86_64'): + system('make linux-AMD64') + else: + system('make linux') + + def execute(self, dir, args = None): + os.chdir(dir) + if not args: + args = '-a' + system('%s/src/current/iozone %s' % (self.srcdir, args)) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system('%s/src/current/iozone %s' % (self.srcdir, args)) + profilers.stop(self) + profilers.report(self) diff --git a/iozone/iozone3_263.tar b/iozone/iozone3_263.tar new file mode 100644 index 000000000..23e50696b Binary files /dev/null and b/iozone/iozone3_263.tar differ diff --git a/isic/control b/isic/control new file mode 100644 index 000000000..e9bf4c35f --- /dev/null +++ b/isic/control @@ -0,0 +1 @@ +job.runtest(None, 'isic') diff --git a/isic/help b/isic/help new file mode 100644 index 000000000..b573128ce --- /dev/null +++ b/isic/help @@ -0,0 +1,43 @@ +ISIC -- IP Stack Integrity Checker + +Description: +ISIC is a suite of utilities to exercise the stability of an IP Stack and its +component stacks (TCP, UDP, ICMP et. al.) It generates piles of pseudo random +packets of the target protocol. The packets be given tendancies to conform to. +Ie 50% of the packets generated can have IP Options. 25% of the packets can +be IP fragments... But the percentages are arbitrary and most of the packet +fields have a configurable tendancy. + +The packets are then sent against the target machine to either penetrate its +firewall rules or find bugs in the IP stack. + +ISIC also contains a utility generate raw ether frames to examine hardware +implementations. + +Other Uses: +Other novel uses people have found for ISIC include IDS testing, stack +fingerprinting, breaking sniffers and barraging the IRC kiddie. + + +Warning: +ISIC may break shit, melt your network, knock out your +firewall, or singe the fur off your cat + + +usage: isic [-v] [-D] -s -d + [-p ] [-k ] [-x ] + [-r ] [-m ] + Percentage Opts: [-F frags] [-V ] + [-I ] +notes: + [-D] causes packet info to be printed out -- DEBUGGING + + ex: -s a.b.c.d -d a.b.c.d -F100 + 100% of the packets will be ^^^^ fragments + ex: -s a.b.c.d -d a.b.c.d -p 100 -r 103334 + ex: -s rand -d rand -r 23342 + ^^^^ causes random source addr + ex: -s rand -d rand -k 10000 -p 10001 -r 666 + Will only send the 10001 packet with random seed 666 + this is especially useful if you suspect that packet is + causing a problem with the target stack. diff --git a/isic/isic-0.06.tar.bz2 b/isic/isic-0.06.tar.bz2 new file mode 100644 index 000000000..c35e13034 Binary files /dev/null and b/isic/isic-0.06.tar.bz2 differ diff --git a/isic/isic-0.06.tgz b/isic/isic-0.06.tgz new file mode 100644 index 000000000..6a27b1c49 Binary files /dev/null and b/isic/isic-0.06.tgz differ diff --git a/isic/isic-gcc41-fix.patch b/isic/isic-gcc41-fix.patch new file mode 100644 index 000000000..aea33900c --- /dev/null +++ b/isic/isic-gcc41-fix.patch @@ -0,0 +1,56 @@ +diff -uprN isic-old/icmpsic.c isic-new/icmpsic.c +--- isic-old/icmpsic.c 2004-11-06 21:11:11.000000000 +0100 ++++ isic-new/icmpsic.c 2006-05-02 16:43:42.000000000 +0200 +@@ -265,7 +265,8 @@ main(int argc, char **argv) + + payload = (short int *)((u_char *) icmp + 4); + for(cx = 0; cx <= (payload_s >> 1); cx+=1) +- (u_short) payload[cx] = rand() & 0xffff; ++// (u_short) payload[cx] = rand() & 0xffff; ++ payload[cx] = rand() & 0xffff; + + + if ( rand() <= (RAND_MAX * ICMPCksm) ) +diff -uprN isic-old/isic.c isic-new/isic.c +--- isic-old/isic.c 2004-11-06 21:11:14.000000000 +0100 ++++ isic-new/isic.c 2006-05-02 16:39:51.000000000 +0200 +@@ -229,8 +229,11 @@ main(int argc, char **argv) + + payload = (short int *)(buf + IP_H); + for(cx = 0; cx <= (payload_s >> 1); cx+=1) +- (u_int16_t) payload[cx] = rand() & 0xffff; +- (u_int16_t) payload[payload_s] = rand() & 0xffff; ++/* (u_int16_t) payload[cx] = rand() & 0xffff; ++ (u_int16_t) payload[payload_s] = rand() & 0xffff;*/ ++ payload[cx] = rand() & 0xffff; ++ payload[payload_s] = rand() & 0xffff; ++ + + if ( printout ) { + printf("%s ->", +diff -uprN isic-old/tcpsic.c isic-new/tcpsic.c +--- isic-old/tcpsic.c 2004-11-06 21:11:16.000000000 +0100 ++++ isic-new/tcpsic.c 2006-05-02 16:41:31.000000000 +0200 +@@ -317,7 +317,8 @@ main(int argc, char **argv) + + payload = (short int *)((u_char *) tcp + 20); + for(cx = 0; cx <= (payload_s >> 1); cx+=1) +- (u_int16_t) payload[cx] = rand() & 0xffff; ++// (u_int16_t) payload[cx] = rand() & 0xffff; ++ payload[cx] = rand() & 0xffff; + + if ( rand() <= (RAND_MAX * TCPCksm) ) + libnet_do_checksum(l, (u_int8_t *)buf, IPPROTO_TCP, (tcp->th_off << 2) +diff -uprN isic-old/udpsic.c isic-new/udpsic.c +--- isic-old/udpsic.c 2004-11-06 21:11:20.000000000 +0100 ++++ isic-new/udpsic.c 2006-05-02 16:42:55.000000000 +0200 +@@ -292,7 +292,8 @@ main(int argc, char **argv) + + payload = (short int *)((u_char *) udp + UDP_H); + for(cx = 0; cx <= (payload_s >> 1); cx+=1) +- (u_int16_t) payload[cx] = rand() & 0xffff; ++// (u_int16_t) payload[cx] = rand() & 0xffff; ++ payload[cx] = rand() & 0xffff; + + if ( printout ) { + printf("%s,%i ->", diff --git a/isic/isic.py b/isic/isic.py new file mode 100644 index 000000000..54427e80c --- /dev/null +++ b/isic/isic.py @@ -0,0 +1,20 @@ +import test, os_dep +from autotest_utils import * + +class isic(test.test): + version = 1 + + # http://www.packetfactory.net/Projects/ISIC/isic-0.06.tgz + # + http://www.stardust.webpages.pl/files/crap/isic-gcc41-fix.patch + + def setup(self, tarball = 'isic-0.06.tar.bz2'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + os_dep.library('libnet.so') + system('./configure') + system('make') + + def execute(self, args = '-s rand -d 127.0.0.1 -p 10000000'): + system(self.srcdir + '/isic ' + args) diff --git a/kernbench/config b/kernbench/config new file mode 100644 index 000000000..619913807 --- /dev/null +++ b/kernbench/config @@ -0,0 +1,1441 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.11.6 +# Thu Mar 31 21:15:11 2005 +# +CONFIG_X86=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_IOMAP=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_LOCALVERSION="" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_HOTPLUG=y +CONFIG_KOBJECT_UEVENT=y +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SHMEM=y +CONFIG_CC_ALIGN_FUNCTIONS=0 +CONFIG_CC_ALIGN_LABELS=0 +CONFIG_CC_ALIGN_LOOPS=0 +CONFIG_CC_ALIGN_JUMPS=0 +# CONFIG_TINY_SHMEM is not set + +# +# Loadable module support +# +CONFIG_MODULES=y +# CONFIG_MODULE_UNLOAD is not set +CONFIG_OBSOLETE_MODPARM=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y + +# +# Processor type and features +# +CONFIG_X86_PC=y +# CONFIG_X86_ELAN is not set +# CONFIG_X86_VOYAGER is not set +# CONFIG_X86_NUMAQ is not set +# CONFIG_X86_SUMMIT is not set +# CONFIG_X86_BIGSMP is not set +# CONFIG_X86_VISWS is not set +# CONFIG_X86_GENERICARCH is not set +# CONFIG_X86_ES7000 is not set +# CONFIG_M386 is not set +# CONFIG_M486 is not set +CONFIG_M586=y +# CONFIG_M586TSC is not set +# CONFIG_M586MMX is not set +# CONFIG_M686 is not set +# CONFIG_MPENTIUMII is not set +# CONFIG_MPENTIUMIII is not set +# CONFIG_MPENTIUMM is not set +# CONFIG_MPENTIUM4 is not set +# CONFIG_MK6 is not set +# CONFIG_MK7 is not set +# CONFIG_MK8 is not set +# CONFIG_MCRUSOE is not set +# CONFIG_MEFFICEON is not set +# CONFIG_MWINCHIPC6 is not set +# CONFIG_MWINCHIP2 is not set +# CONFIG_MWINCHIP3D is not set +# CONFIG_MCYRIXIII is not set +# CONFIG_MVIAC3_2 is not set +# CONFIG_X86_GENERIC is not set +CONFIG_X86_CMPXCHG=y +CONFIG_X86_XADD=y +CONFIG_X86_L1_CACHE_SHIFT=5 +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_X86_PPRO_FENCE=y +CONFIG_X86_F00F_BUG=y +CONFIG_X86_WP_WORKS_OK=y +CONFIG_X86_INVLPG=y +CONFIG_X86_BSWAP=y +CONFIG_X86_POPAD_OK=y +CONFIG_X86_ALIGNMENT_16=y +# CONFIG_HPET_TIMER is not set +# CONFIG_SMP is not set +# CONFIG_PREEMPT is not set +# CONFIG_X86_UP_APIC is not set +# CONFIG_X86_MCE is not set +# CONFIG_TOSHIBA is not set +# CONFIG_I8K is not set +# CONFIG_MICROCODE is not set +# CONFIG_X86_MSR is not set +# CONFIG_X86_CPUID is not set + +# +# Firmware Drivers +# +# CONFIG_EDD is not set +CONFIG_NOHIGHMEM=y +# CONFIG_HIGHMEM4G is not set +# CONFIG_HIGHMEM64G is not set +# CONFIG_MATH_EMULATION is not set +# CONFIG_MTRR is not set +# CONFIG_REGPARM is not set + +# +# Power management options (ACPI, APM) +# +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_SOFTWARE_SUSPEND is not set + +# +# ACPI (Advanced Configuration and Power Interface) Support +# +# CONFIG_ACPI is not set + +# +# APM (Advanced Power Management) BIOS Support +# +CONFIG_APM=y +# CONFIG_APM_IGNORE_USER_SUSPEND is not set +# CONFIG_APM_DO_ENABLE is not set +# CONFIG_APM_CPU_IDLE is not set +# CONFIG_APM_DISPLAY_BLANK is not set +# CONFIG_APM_RTC_IS_GMT is not set +# CONFIG_APM_ALLOW_INTS is not set +# CONFIG_APM_REAL_MODE_POWER_OFF is not set + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +CONFIG_PCI=y +# CONFIG_PCI_GOBIOS is not set +# CONFIG_PCI_GOMMCONFIG is not set +# CONFIG_PCI_GODIRECT is not set +CONFIG_PCI_GOANY=y +CONFIG_PCI_BIOS=y +CONFIG_PCI_DIRECT=y +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y +CONFIG_ISA=y +# CONFIG_EISA is not set +# CONFIG_MCA is not set +# CONFIG_SCx200 is not set + +# +# PCCARD (PCMCIA/CardBus) support +# +# CONFIG_PCCARD is not set + +# +# PC-card bridges +# +CONFIG_PCMCIA_PROBE=y + +# +# PCI Hotplug Support +# +# CONFIG_HOTPLUG_PCI is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_AOUT=y +CONFIG_BINFMT_MISC=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +CONFIG_PARPORT=y +CONFIG_PARPORT_PC=y +CONFIG_PARPORT_PC_CML1=y +# CONFIG_PARPORT_SERIAL is not set +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +# CONFIG_PARPORT_OTHER is not set +# CONFIG_PARPORT_1284 is not set + +# +# Plug and Play support +# +# CONFIG_PNP is not set + +# +# Block devices +# +CONFIG_BLK_DEV_FD=y +# CONFIG_BLK_DEV_XD is not set +# CONFIG_PARIDE is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SX8 is not set +# CONFIG_BLK_DEV_UB is not set +# CONFIG_BLK_DEV_RAM is not set +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_INITRAMFS_SOURCE="" +# CONFIG_LBD is not set +# CONFIG_CDROM_PKTCDVD is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_ATA_OVER_ETH is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +# CONFIG_BLK_DEV_IDE_SATA is not set +# CONFIG_BLK_DEV_HD_IDE is not set +CONFIG_BLK_DEV_IDEDISK=y +CONFIG_IDEDISK_MULTI_MODE=y +CONFIG_BLK_DEV_IDECD=y +# CONFIG_BLK_DEV_IDETAPE is not set +CONFIG_BLK_DEV_IDEFLOPPY=y +CONFIG_BLK_DEV_IDESCSI=y +# CONFIG_IDE_TASK_IOCTL is not set + +# +# IDE chipset support/bugfixes +# +CONFIG_IDE_GENERIC=y +CONFIG_BLK_DEV_CMD640=y +# CONFIG_BLK_DEV_CMD640_ENHANCED is not set +CONFIG_BLK_DEV_IDEPCI=y +CONFIG_IDEPCI_SHARE_IRQ=y +# CONFIG_BLK_DEV_OFFBOARD is not set +# CONFIG_BLK_DEV_GENERIC is not set +# CONFIG_BLK_DEV_OPTI621 is not set +CONFIG_BLK_DEV_RZ1000=y +CONFIG_BLK_DEV_IDEDMA_PCI=y +# CONFIG_BLK_DEV_IDEDMA_FORCED is not set +CONFIG_IDEDMA_PCI_AUTO=y +# CONFIG_IDEDMA_ONLYDISK is not set +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +# CONFIG_BLK_DEV_ATIIXP is not set +# CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_CY82C693 is not set +# CONFIG_BLK_DEV_CS5520 is not set +# CONFIG_BLK_DEV_CS5530 is not set +# CONFIG_BLK_DEV_HPT34X is not set +# CONFIG_BLK_DEV_HPT366 is not set +# CONFIG_BLK_DEV_SC1200 is not set +CONFIG_BLK_DEV_PIIX=y +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +CONFIG_BLK_DEV_SIS5513=y +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +# CONFIG_IDE_ARM is not set +# CONFIG_IDE_CHIPSETS is not set +CONFIG_BLK_DEV_IDEDMA=y +# CONFIG_IDEDMA_IVB is not set +CONFIG_IDEDMA_AUTO=y +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +CONFIG_SCSI=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=y + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +# CONFIG_SCSI_MULTI_LUN is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set + +# +# SCSI Transport Attributes +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set + +# +# SCSI low-level drivers +# +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_7000FASST is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AHA152X is not set +# CONFIG_SCSI_AHA1542 is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +CONFIG_SCSI_AIC7XXX_OLD=y +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_DPT_I2O is not set +# CONFIG_SCSI_IN2000 is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_SCSI_SATA is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_DTC3280 is not set +# CONFIG_SCSI_EATA is not set +# CONFIG_SCSI_EATA_PIO is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_GENERIC_NCR5380 is not set +# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_NCR53C406A is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_PAS16 is not set +# CONFIG_SCSI_PSI240I is not set +# CONFIG_SCSI_QLOGIC_FAS is not set +CONFIG_SCSI_QLOGIC_ISP=y +# CONFIG_SCSI_QLOGIC_FC is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA2XXX=y +# CONFIG_SCSI_QLA21XX is not set +# CONFIG_SCSI_QLA22XX is not set +# CONFIG_SCSI_QLA2300 is not set +# CONFIG_SCSI_QLA2322 is not set +# CONFIG_SCSI_QLA6312 is not set +# CONFIG_SCSI_SYM53C416 is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_DC390T is not set +# CONFIG_SCSI_T128 is not set +# CONFIG_SCSI_U14_34F is not set +# CONFIG_SCSI_ULTRASTOR is not set +# CONFIG_SCSI_NSP32 is not set +# CONFIG_SCSI_DEBUG is not set + +# +# Old CD-ROM drivers (not SCSI, not IDE) +# +# CONFIG_CD_NO_IDESCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_IP_TCPDIAG=y +# CONFIG_IP_TCPDIAG_IPV6 is not set + +# +# IP: Virtual Server Configuration +# +# CONFIG_IP_VS is not set +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set + +# +# IP: Netfilter Configuration +# +CONFIG_IP_NF_CONNTRACK=y +# CONFIG_IP_NF_CT_ACCT is not set +# CONFIG_IP_NF_CONNTRACK_MARK is not set +# CONFIG_IP_NF_CT_PROTO_SCTP is not set +# CONFIG_IP_NF_FTP is not set +# CONFIG_IP_NF_IRC is not set +# CONFIG_IP_NF_TFTP is not set +# CONFIG_IP_NF_AMANDA is not set +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=y +# CONFIG_IP_NF_MATCH_LIMIT is not set +# CONFIG_IP_NF_MATCH_IPRANGE is not set +# CONFIG_IP_NF_MATCH_MAC is not set +# CONFIG_IP_NF_MATCH_PKTTYPE is not set +# CONFIG_IP_NF_MATCH_MARK is not set +# CONFIG_IP_NF_MATCH_MULTIPORT is not set +# CONFIG_IP_NF_MATCH_TOS is not set +# CONFIG_IP_NF_MATCH_RECENT is not set +# CONFIG_IP_NF_MATCH_ECN is not set +# CONFIG_IP_NF_MATCH_DSCP is not set +# CONFIG_IP_NF_MATCH_AH_ESP is not set +# CONFIG_IP_NF_MATCH_LENGTH is not set +# CONFIG_IP_NF_MATCH_TTL is not set +# CONFIG_IP_NF_MATCH_TCPMSS is not set +# CONFIG_IP_NF_MATCH_HELPER is not set +CONFIG_IP_NF_MATCH_STATE=y +# CONFIG_IP_NF_MATCH_CONNTRACK is not set +# CONFIG_IP_NF_MATCH_OWNER is not set +# CONFIG_IP_NF_MATCH_ADDRTYPE is not set +# CONFIG_IP_NF_MATCH_REALM is not set +# CONFIG_IP_NF_MATCH_SCTP is not set +# CONFIG_IP_NF_MATCH_COMMENT is not set +# CONFIG_IP_NF_MATCH_HASHLIMIT is not set +CONFIG_IP_NF_FILTER=y +# CONFIG_IP_NF_TARGET_REJECT is not set +# CONFIG_IP_NF_TARGET_LOG is not set +# CONFIG_IP_NF_TARGET_ULOG is not set +# CONFIG_IP_NF_TARGET_TCPMSS is not set +# CONFIG_IP_NF_NAT is not set +# CONFIG_IP_NF_MANGLE is not set +# CONFIG_IP_NF_RAW is not set +# CONFIG_IP_NF_ARPTABLES is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +CONFIG_NET_VENDOR_3COM=y +# CONFIG_EL1 is not set +# CONFIG_EL2 is not set +# CONFIG_ELPLUS is not set +# CONFIG_EL16 is not set +CONFIG_EL3=y +# CONFIG_3C515 is not set +CONFIG_VORTEX=y +# CONFIG_TYPHOON is not set +# CONFIG_LANCE is not set +# CONFIG_NET_VENDOR_SMC is not set +# CONFIG_NET_VENDOR_RACAL is not set + +# +# Tulip family network device support +# +CONFIG_NET_TULIP=y +# CONFIG_DE2104X is not set +CONFIG_TULIP=y +# CONFIG_TULIP_MWI is not set +CONFIG_TULIP_MMIO=y +# CONFIG_TULIP_NAPI is not set +# CONFIG_DE4X5 is not set +# CONFIG_WINBOND_840 is not set +# CONFIG_DM9102 is not set +# CONFIG_AT1700 is not set +# CONFIG_DEPCA is not set +# CONFIG_HP100 is not set +CONFIG_NET_ISA=y +# CONFIG_E2100 is not set +# CONFIG_EWRK3 is not set +# CONFIG_EEXPRESS is not set +# CONFIG_EEXPRESS_PRO is not set +# CONFIG_HPLAN_PLUS is not set +# CONFIG_HPLAN is not set +# CONFIG_LP486E is not set +# CONFIG_ETH16I is not set +# CONFIG_NE2000 is not set +# CONFIG_ZNET is not set +# CONFIG_SEEQ8005 is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +CONFIG_ADAPTEC_STARFIRE=y +# CONFIG_ADAPTEC_STARFIRE_NAPI is not set +# CONFIG_AC3200 is not set +# CONFIG_APRICOT is not set +# CONFIG_B44 is not set +# CONFIG_FORCEDETH is not set +# CONFIG_CS89x0 is not set +# CONFIG_DGRS is not set +CONFIG_EEPRO100=y +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +CONFIG_NE2K_PCI=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +CONFIG_SIS900=y +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set +# CONFIG_NET_POCKET is not set + +# +# Ethernet (1000 Mbit) +# +CONFIG_ACENIC=y +# CONFIG_ACENIC_OMIT_TIGON_I is not set +# CONFIG_DL2K is not set +CONFIG_E1000=y +# CONFIG_E1000_NAPI is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_VIA_VELOCITY is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PLIP is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NET_FC is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +CONFIG_SERIO_I8042=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_INPORT is not set +# CONFIG_MOUSE_LOGIBM is not set +# CONFIG_MOUSE_PC110PAD is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_CONSOLE is not set +CONFIG_SERIAL_8250_NR_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +CONFIG_PRINTER=y +# CONFIG_LP_CONSOLE is not set +# CONFIG_PPDEV is not set +# CONFIG_TIPAR is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# CONFIG_SONYPI is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +CONFIG_AGP=y +# CONFIG_AGP_ALI is not set +# CONFIG_AGP_ATI is not set +# CONFIG_AGP_AMD is not set +# CONFIG_AGP_AMD64 is not set +CONFIG_AGP_INTEL=y +# CONFIG_AGP_INTEL_MCH is not set +# CONFIG_AGP_NVIDIA is not set +CONFIG_AGP_SIS=y +# CONFIG_AGP_SWORKS is not set +# CONFIG_AGP_VIA is not set +# CONFIG_AGP_EFFICEON is not set +# CONFIG_DRM is not set +# CONFIG_MWAVE is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_HANGCHECK_TIMER is not set + +# +# I2C support +# +CONFIG_I2C=y +# CONFIG_I2C_CHARDEV is not set + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_ELEKTOR is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_I810 is not set +# CONFIG_I2C_ISA is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_PARPORT is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_PROSAVAGE is not set +# CONFIG_I2C_SAVAGE4 is not set +# CONFIG_SCx200_ACB is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_VOODOO3 is not set +# CONFIG_I2C_PCA_ISA is not set + +# +# Hardware Sensors Chip support +# +# CONFIG_I2C_SENSOR is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83627HF is not set + +# +# Other I2C Chip support +# +# CONFIG_SENSORS_EEPROM is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_RTC8564 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set + +# +# Dallas's 1-wire bus +# +# CONFIG_W1 is not set + +# +# Misc devices +# +# CONFIG_IBM_ASM is not set + +# +# Multimedia devices +# +CONFIG_VIDEO_DEV=y + +# +# Video For Linux +# + +# +# Video Adapters +# +# CONFIG_VIDEO_BT848 is not set +# CONFIG_VIDEO_PMS is not set +# CONFIG_VIDEO_BWQCAM is not set +# CONFIG_VIDEO_CQCAM is not set +# CONFIG_VIDEO_CPIA is not set +# CONFIG_VIDEO_SAA5246A is not set +# CONFIG_VIDEO_SAA5249 is not set +# CONFIG_TUNER_3036 is not set +# CONFIG_VIDEO_STRADIS is not set +# CONFIG_VIDEO_ZORAN is not set +# CONFIG_VIDEO_SAA7134 is not set +# CONFIG_VIDEO_MXB is not set +# CONFIG_VIDEO_DPC is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_CX88 is not set +# CONFIG_VIDEO_OVCAMCHIP is not set + +# +# Radio Adapters +# +# CONFIG_RADIO_CADET is not set +# CONFIG_RADIO_RTRACK is not set +# CONFIG_RADIO_RTRACK2 is not set +# CONFIG_RADIO_AZTECH is not set +# CONFIG_RADIO_GEMTEK is not set +# CONFIG_RADIO_GEMTEK_PCI is not set +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_MAESTRO is not set +# CONFIG_RADIO_SF16FMI is not set +# CONFIG_RADIO_SF16FMR2 is not set +# CONFIG_RADIO_TERRATEC is not set +# CONFIG_RADIO_TRUST is not set +# CONFIG_RADIO_TYPHOON is not set +# CONFIG_RADIO_ZOLTRIX is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set +# CONFIG_VIDEO_SELECT is not set + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y + +# +# Sound +# +CONFIG_SOUND=y + +# +# Advanced Linux Sound Architecture +# +CONFIG_SND=y +CONFIG_SND_TIMER=y +CONFIG_SND_PCM=y +CONFIG_SND_RAWMIDI=y +CONFIG_SND_SEQUENCER=y +# CONFIG_SND_SEQ_DUMMY is not set +CONFIG_SND_OSSEMUL=y +CONFIG_SND_MIXER_OSS=y +CONFIG_SND_PCM_OSS=y +CONFIG_SND_SEQUENCER_OSS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set + +# +# Generic devices +# +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_VIRMIDI is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set + +# +# ISA devices +# +# CONFIG_SND_AD1848 is not set +# CONFIG_SND_CS4231 is not set +# CONFIG_SND_CS4232 is not set +# CONFIG_SND_CS4236 is not set +# CONFIG_SND_ES1688 is not set +# CONFIG_SND_ES18XX is not set +# CONFIG_SND_GUSCLASSIC is not set +# CONFIG_SND_GUSEXTREME is not set +# CONFIG_SND_GUSMAX is not set +# CONFIG_SND_INTERWAVE is not set +# CONFIG_SND_INTERWAVE_STB is not set +# CONFIG_SND_OPTI92X_AD1848 is not set +# CONFIG_SND_OPTI92X_CS4231 is not set +# CONFIG_SND_OPTI93X is not set +# CONFIG_SND_SB8 is not set +# CONFIG_SND_SB16 is not set +# CONFIG_SND_SBAWE is not set +# CONFIG_SND_WAVEFRONT is not set +# CONFIG_SND_CMI8330 is not set +# CONFIG_SND_OPL3SA2 is not set +# CONFIG_SND_SGALAXY is not set +# CONFIG_SND_SSCAPE is not set + +# +# PCI devices +# +CONFIG_SND_AC97_CODEC=y +# CONFIG_SND_ALI5451 is not set +# CONFIG_SND_ATIIXP is not set +# CONFIG_SND_ATIIXP_MODEM is not set +# CONFIG_SND_AU8810 is not set +# CONFIG_SND_AU8820 is not set +# CONFIG_SND_AU8830 is not set +# CONFIG_SND_AZT3328 is not set +# CONFIG_SND_BT87X is not set +CONFIG_SND_CS46XX=y +# CONFIG_SND_CS46XX_NEW_DSP is not set +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_EMU10K1 is not set +# CONFIG_SND_EMU10K1X is not set +# CONFIG_SND_CA0106 is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_MIXART is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_TRIDENT is not set +# CONFIG_SND_YMFPCI is not set +# CONFIG_SND_ALS4000 is not set +# CONFIG_SND_CMIPCI is not set +CONFIG_SND_ENS1370=y +CONFIG_SND_ENS1371=y +# CONFIG_SND_ES1938 is not set +# CONFIG_SND_ES1968 is not set +# CONFIG_SND_MAESTRO3 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_ICE1712 is not set +# CONFIG_SND_ICE1724 is not set +CONFIG_SND_INTEL8X0=y +# CONFIG_SND_INTEL8X0M is not set +# CONFIG_SND_SONICVIBES is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VIA82XX_MODEM is not set +# CONFIG_SND_VX222 is not set + +# +# USB devices +# +# CONFIG_SND_USB_AUDIO is not set +# CONFIG_SND_USB_USX2Y is not set + +# +# Open Sound System +# +# CONFIG_SOUND_PRIME is not set + +# +# USB support +# +CONFIG_USB=y +# CONFIG_USB_DEBUG is not set + +# +# Miscellaneous USB options +# +CONFIG_USB_DEVICEFS=y +# CONFIG_USB_BANDWIDTH is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_SUSPEND is not set +# CONFIG_USB_OTG is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB_ARCH_HAS_OHCI=y + +# +# USB Host Controller Drivers +# +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_SPLIT_ISO is not set +# CONFIG_USB_EHCI_ROOT_HUB_TT is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_BLUETOOTH_TTY is not set +# CONFIG_USB_MIDI is not set +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set + +# +# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_RW_DETECT is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_DPCM is not set +# CONFIG_USB_STORAGE_HP8200e is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set + +# +# USB Input Devices +# +# CONFIG_USB_HID is not set + +# +# USB HID Boot Protocol drivers +# +# CONFIG_USB_KBD is not set +# CONFIG_USB_MOUSE is not set +# CONFIG_USB_AIPTEK is not set +# CONFIG_USB_WACOM is not set +# CONFIG_USB_KBTAB is not set +# CONFIG_USB_POWERMATE is not set +# CONFIG_USB_MTOUCH is not set +# CONFIG_USB_EGALAX is not set +# CONFIG_USB_XPAD is not set +# CONFIG_USB_ATI_REMOTE is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set + +# +# USB Multimedia devices +# +# CONFIG_USB_DABUSB is not set +# CONFIG_USB_VICAM is not set +# CONFIG_USB_DSBR is not set +# CONFIG_USB_IBMCAM is not set +# CONFIG_USB_KONICAWC is not set +# CONFIG_USB_OV511 is not set +# CONFIG_USB_SE401 is not set +# CONFIG_USB_SN9C102 is not set +# CONFIG_USB_STV680 is not set + +# +# USB Network Adapters +# +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_USBNET is not set + +# +# USB port drivers +# +# CONFIG_USB_USS720 is not set + +# +# USB Serial Converter support +# +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_BELKIN is not set +# CONFIG_USB_SERIAL_WHITEHEAT is not set +# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +# CONFIG_USB_SERIAL_EMPEG is not set +# CONFIG_USB_SERIAL_FTDI_SIO is not set +CONFIG_USB_SERIAL_VISOR=y +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +# CONFIG_USB_SERIAL_GARMIN is not set +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +# CONFIG_USB_SERIAL_MCT_U232 is not set +# CONFIG_USB_SERIAL_PL2303 is not set +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +# CONFIG_USB_SERIAL_XIRCOM is not set +# CONFIG_USB_SERIAL_OMNINET is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_AUERSWALD is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_PHIDGETKIT is not set +# CONFIG_USB_PHIDGETSERVO is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_TEST is not set + +# +# USB ATM/DSL drivers +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# MMC/SD Card support +# +# CONFIG_MMC is not set + +# +# InfiniBand support +# +# CONFIG_INFINIBAND is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set + +# +# XFS support +# +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +CONFIG_DNOTIFY=y +# CONFIG_AUTOFS_FS is not set +CONFIG_AUTOFS4_FS=y + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +# CONFIG_ZISOFS is not set +CONFIG_UDF_FS=y +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_TMPFS_XATTR is not set +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +CONFIG_NFSD=y +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V4 is not set +# CONFIG_NFSD_TCP is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SMB_FS=y +# CONFIG_SMB_NLS_DEFAULT is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Profiling support +# +CONFIG_PROFILING=y +CONFIG_OPROFILE=y + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +CONFIG_MAGIC_SYSRQ=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_FS is not set +CONFIG_FRAME_POINTER=y +CONFIG_EARLY_PRINTK=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_KPROBES is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_4KSTACKS=y + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Hardware crypto devices +# + +# +# Library routines +# +# CONFIG_CRC_CCITT is not set +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_X86_BIOS_REBOOT=y +CONFIG_PC=y diff --git a/kernbench/control b/kernbench/control new file mode 100755 index 000000000..daf862abb --- /dev/null +++ b/kernbench/control @@ -0,0 +1 @@ +job.runtest(None, 'kernbench', 2, 5) diff --git a/kernbench/control.readprofile b/kernbench/control.readprofile new file mode 100755 index 000000000..f18f3773c --- /dev/null +++ b/kernbench/control.readprofile @@ -0,0 +1,3 @@ +job.profilers.add('readprofile') +job.runtest(None, 'kernbench', 0, 5) +job.profilers.delete('readprofile') diff --git a/kernbench/kernbench.old b/kernbench/kernbench.old new file mode 100755 index 000000000..504f53792 --- /dev/null +++ b/kernbench/kernbench.old @@ -0,0 +1,48 @@ +print "I AM KERNBENCH!" + +threads = 2 * count_cpus() +kernelver = autodir + '/src/linux-2.6.14.tar.bz2' # "2.6.7" +config = autodir + "/bin/tests/kernbench/config" +iterations = 1 + +def usage(): + print "kernbench [-j threads] [-i iterations] [-c config] [-k kernel]" + + +def getopts(): + try: + opts, args = getopt.getopt(argv, "hj:i:c:k:", ["help"]) + except getopt.GetoptError: + usage() + sys.exit(2) + + global threads,iterations,config,kernelver + + for o, a in opts: + if o == "-j": + threads = int(a) + if (threads == 0): + threads = "" + if o == "-i": + iterations = int(a) + if o == "-c": + config = a + if o == "-k": + kernelver = a + if o in ("-h", "--help"): + usage() + sys.exit() + + +getopts() +print "kernbench -j %d -i %d -c %s -k %s" % (threads, iterations, config, kernelver) + +top_dir = system.tmpdir+'/kernbench' +testkernel = kernel.kernel(system, top_dir, kernelver, '', config, None) + +testkernel.build_timed(threads) # warmup run +for i in range(1, iterations+1): + testkernel.build_timed(threads, '../log/time.%d' % i) + +os.chdir(top_dir + '/log') +os.system("grep elapsed time.* > time") diff --git a/kernbench/kernbench.py b/kernbench/kernbench.py new file mode 100755 index 000000000..733c9333a --- /dev/null +++ b/kernbench/kernbench.py @@ -0,0 +1,39 @@ +import test, pickle +from autotest_utils import * + +class kernbench(test.test): + version = 1 + + def setup(self): + # http://kernel.org/pub/linux/kernel/v2.6/linux-2.6.14.tar.bz2 + if (os.path.exists(self.bindir + '/linux-2.6.14.tar.bz2')): + tarball = self.bindir + '/linux-2.6.14.tar.bz2' + else: + tarball = '/usr/local/src/linux-2.6.14.tar.bz2' + kernel = self.job.kernel(self.srcdir, tarball) + kernel.config('') + # have to save this off, as we might use it in another run + kernel.pickle_dump(self.srcdir + '/.pickle') + + + def execute(self, iterations = 1, threads = 2 * count_cpus()): + kernel = pickle.load(open(self.srcdir + '/.pickle', 'r')) + print "kernbench x %d: %d threads" % (iterations, threads) + + kernel.build_timed(threads) # warmup run + for i in range(1, iterations+1): + logfile = self.resultsdir+'/time.%d' % i + kernel.build_timed(threads, logfile) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + logfile = self.resultsdir+'/time.profile' + kernel.build_timed(threads, logfile) + profilers.stop(self) + profilers.report(self) + + kernel.clean() # Don't leave litter lying around + os.chdir(self.resultsdir) + system("grep elapsed time.* > time") diff --git a/libhugetlbfs/control b/libhugetlbfs/control new file mode 100644 index 000000000..394aa675f --- /dev/null +++ b/libhugetlbfs/control @@ -0,0 +1 @@ +job.runtest(None, 'libhugetlbfs', '/mnt') diff --git a/libhugetlbfs/libhugetlbfs-1.0-pre4-1.tar.gz b/libhugetlbfs/libhugetlbfs-1.0-pre4-1.tar.gz new file mode 100644 index 000000000..beb01719a Binary files /dev/null and b/libhugetlbfs/libhugetlbfs-1.0-pre4-1.tar.gz differ diff --git a/libhugetlbfs/libhugetlbfs.py b/libhugetlbfs/libhugetlbfs.py new file mode 100644 index 000000000..43adc0144 --- /dev/null +++ b/libhugetlbfs/libhugetlbfs.py @@ -0,0 +1,48 @@ +import test, re +from autotest_utils import * + +class libhugetlbfs(test.test): + version = 1 + + # http://prdownloads.sourceforge.net/libhugetlbfs/libhugetlbfs-1.0-pre4-1.tar.gz?download + def setup(self, tarball = 'libhugetlbfs-1.0-pre4-1.tar.gz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + + def execute(self, dir, pages_requested = 20): + # Check kernel version, should >= 2.6.16 + version = system_output('uname -r') + (major, minor, sub) = re.split(r'[.-]', version)[0:3] + if int(major) < 2 or int(minor) < 6 or int(sub) < 16: + raise TestError('Kernel version %s < 2.6.16' % version) + + # Check huge page number + system('echo %d > /proc/sys/vm/nr_hugepages' % pages_requested, 1) + pages_available = 0 + if os.path.exists('/proc/sys/vm/nr_hugepages'): + pages_available = int(open('/proc/sys/vm/nr_hugepages', 'r').readline()) + # if pages == 0: + # raise TestError('No huge pages allocated, exiting test') + if pages_available < pages_requested: + raise TestError('%d huge pages available, < %d pages requested' % (pages_available, pages_requested)) + + # Check if hugetlbfs has been mounted + if not file_contains_pattern('/proc/mounts', 'hugetlbfs'): + system('mount -t hugetlbfs none %s' % dir) + + os.chdir(self.srcdir) + system('make check') + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + os.chdir(self.srcdir) + system('make check') + profilers.stop(self) + profilers.report(self) + + system('umount %s' % dir) diff --git a/lmbench/control b/lmbench/control new file mode 100644 index 000000000..0d0dfade3 --- /dev/null +++ b/lmbench/control @@ -0,0 +1 @@ +job.runtest(None, 'lmbench') diff --git a/lmbench/lmbench.py b/lmbench/lmbench.py new file mode 100755 index 000000000..452acae4b --- /dev/null +++ b/lmbench/lmbench.py @@ -0,0 +1,44 @@ +# This will need more work on the configuration stuff before it will function +import test +from autotest_utils import * + +class lmbench(test.test): + version = 2 + + def setup(self, tarball = 'lmbench3.tar.bz2'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + # http://www.bitmover.com/lm/lmbench/lmbench3.tar.gz + # + lmbench3.diff + # removes Makefile references to bitkeeper + # default mail to no, fix job placement defaults (masouds) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + + + def execute(self, iterations = 1, mem = '', fastmem = 'NO', + slowfs = 'NO', disks = '', disks_desc = '', + mhz = '', remote = '', enough = '5000', sync_max = '1', + fsdir = None, file = None): + if not fsdir: + fsdir = self.tmpdir + if not file: + file = self.tmpdir+'XXX' + + os.chdir(self.srcdir) + cmd = "yes '' | make rerun" + for i in range(1, iterations+1): + system(cmd) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(cmd) + profilers.stop(self) + profilers.report(self) + # Get the results: + outputdir = self.srcdir + "/results" + results = self.resultsdir + "/summary.txt" + system("make -C " + outputdir + " summary > " + results) diff --git a/lmbench/lmbench3.diff b/lmbench/lmbench3.diff new file mode 100644 index 000000000..05819c5aa --- /dev/null +++ b/lmbench/lmbench3.diff @@ -0,0 +1,90 @@ +--- lmbench3.old/src/Makefile 2005-08-22 17:19:54.000000000 -0700 ++++ lmbench3.new/src/Makefile 2006-04-22 13:10:59.000000000 -0700 +@@ -165,41 +165,6 @@ debug: + assembler: + @env CFLAGS=-O MAKE="$(MAKE)" MAKEFLAGS="$(MAKEFLAGS)" CC="${CC}" OS="${OS}" ../scripts/build asm + +-bk.ver: ../SCCS/s.ChangeSet +- rm -f bk.ver +- -echo `bk prs -hr+ -d'$$if(:SYMBOL:){:SYMBOL: }:UTC:' ../ChangeSet;` > bk.ver +- touch bk.ver +- +-dist: bk.ver +- @if [ "X`cd ..; bk sfiles -c`" != "X" ]; then \ +- echo "modified files!"; \ +- false; \ +- fi +- @if [ "X`cd ..; bk pending`" != "X" ]; then \ +- echo "pending changes!"; \ +- false; \ +- fi +- cd ..; \ +- SRCDIR=`pwd`; \ +- DIR=`basename $${SRCDIR}`; \ +- VERSION=`cat src/bk.ver| awk '{print $$1;}' | sed -e 's/Version-//g'`; \ +- cd ..; \ +- bk clone $${DIR} /tmp/lmbench-$${VERSION}; \ +- cd /tmp/lmbench-$${VERSION}; \ +- bk sfiles | xargs touch; \ +- sleep 5; \ +- bk get -s; \ +- for d in doc results scripts src; do \ +- cd $$d; bk get -s; cd ..; \ +- done; \ +- bk sfiles -U -g | xargs touch; \ +- cd src; \ +- make bk.ver; \ +- cd /tmp; \ +- tar czf $${SRCDIR}/../lmbench-$${VERSION}.tgz \ +- lmbench-$${VERSION}; \ +- rm -rf /tmp/lmbench-$${VERSION}; +- + get $(SRCS): + -get -s $(SRCS) + +@@ -228,9 +193,9 @@ testmake: $(SRCS) $(UTILS) # used by scr + install install-target dist get edit get-e clean clobber \ + share depend testmake + +-$O/lmbench : ../scripts/lmbench bk.ver ++$O/lmbench : ../scripts/lmbench + rm -f $O/lmbench +- sed -e "s//`cat bk.ver`/g" < ../scripts/lmbench > $O/lmbench ++ sed -e "s//666/g" < ../scripts/lmbench > $O/lmbench + chmod +x $O/lmbench + + $O/lmbench.a: $(LIBOBJS) +--- src/scripts/config-run.old 2006-06-07 12:46:15.000000000 -0700 ++++ src/scripts/config-run 2006-06-07 12:52:07.000000000 -0700 +@@ -115,9 +115,12 @@ + three benchmark processes reading data and doing the measurements. + + EOF +- echo $ECHON "Job placement selection: $ECHOC" ++ echo $ECHON "Job placement selection [DEFAULT: 1]: $ECHOC" + read LMBENCH_SCHED + AGAIN=N ++ if [ "$LMBENCH_SCHED" == "" ]; then ++ LMBENCH_SCHED=1 ++ fi + case "$LMBENCH_SCHED" in + 1) LMBENCH_SCHED=DEFAULT;; + 2) LMBENCH_SCHED=BALANCED;; +@@ -657,13 +660,13 @@ + + EOF + +-echo $ECHON "Mail results [default yes] $ECHOC" ++echo $ECHON "Mail results [default no] $ECHOC" + read MAIL + case $MAIL in +- [Nn]*) MAIL=no +- echo OK, no results mailed. ++ [Yy]*) MAIL=yes ++ echo OK, results will be mailed. + ;; +- *) MAIL=yes ++ *) MAIL=no + ;; + esac + diff --git a/lmbench/lmbench3.tar.bz2 b/lmbench/lmbench3.tar.bz2 new file mode 100644 index 000000000..fd05690c8 Binary files /dev/null and b/lmbench/lmbench3.tar.bz2 differ diff --git a/ltp/control b/ltp/control new file mode 100644 index 000000000..0dc5fe97a --- /dev/null +++ b/ltp/control @@ -0,0 +1 @@ +job.runtest(None, 'ltp') diff --git a/ltp/control.ballista b/ltp/control.ballista new file mode 100644 index 000000000..5b7428c0a --- /dev/null +++ b/ltp/control.ballista @@ -0,0 +1 @@ +job.runtest(None, 'ltp', '-f ballista') diff --git a/ltp/ltp-diff.py b/ltp/ltp-diff.py new file mode 100644 index 000000000..5b821495e --- /dev/null +++ b/ltp/ltp-diff.py @@ -0,0 +1,119 @@ +#!/usr/bin/python +# (C) Copyright IBM Corp. 2006 +# Author: Dustin Kirkland +# Description: +# Input: Two or more files containing results from different executions of +# the LTP. The input can either be file names or the url location +# of the ltp.results file. +# Output: A report on the following: +# - The total number of tests executed in each run +# - The testname, sequence number, and output of each run +# where the results of those runs differ +# Return: +# 0 if all runs had identical results +# Non-zero if results differ, or bad input + + +import sys +import string +import re +import urllib + +def usage(): + print "\nUsage: \n\ + ltp-diff results1 results2 ... locationN \n\ + Note: location[1,2,N] may be local files or URLs of LTP results\n" + sys.exit(1) + +def get_results(results_files): + """ + Download the results if needed. + Return results of each run in a numerically-indexed dictionary + of dictionaries keyed on testnames. + Return dictionary keyed on unique testnames across all runs. + """ + r = re.compile('(\S+\s+\S+)\s+(\S+)\s+:') + i = 0 + runs = {} + testnames = {} + for file in results_files: + runs[i] = {} + try: + fh = urllib.urlopen(file) + results = fh.readlines() + fh.close() + except: + print "ERROR: reading results resource [%s]" % (file) + usage() + for line in results: + try: + s = r.match(line) + testname = s.group(1) + status = s.group(2) + runs[i][testname] = status + testnames[testname] = 1 + except: + pass + i += 1 + return (runs, testnames) + + + +def compare_results(runs): + """ + Loop through all testnames alpahbetically. + Print any testnames with differing results across runs. + Return 1 if any test results across runs differ. + Return 0 if all test results match. + """ + rc = 0 + print "LTP Test Results to Compare" + for i in range(len(runs)): + print " Run[%d]: %d" % (i, len(runs[i].keys())) + print "" + header = 0 + all_testnames = testnames.keys() + all_testnames.sort() + for testname in all_testnames: + differ = 0 + for i in range(1,len(runs)): + # Must handle testcases that executed in one run + # but not another by setting status to "null" + if not runs[i].has_key(testname): + runs[i][testname] = "null" + if not runs[i-1].has_key(testname): + runs[i-i][testname] = "null" + # Check for the results inconsistencies + if runs[i][testname] != runs[i-1][testname]: + differ = 1 + if differ: + if header == 0: + # Print the differences header only once + print "Tests with Inconsistent Results across Runs" + print " %-35s:\t%s" % ("Testname,Sequence", "Run Results") + header = 1 + + # Print info if results differ + rc = 1 + testname_cleaned = re.sub('\s+', ',', testname) + print " %-35s:\t" % (testname_cleaned), + all_results = "" + for i in range(len(runs)): + all_results += runs[i][testname] + if i+1 + # -e + # -i + workfile = os.path.join('data', workfile) + args = "-f %s -s %d -e %d -i %d" %(workfile,start,end,increment) + config = os.path.join(self.srcdir, 'reaim.config') + system('cp -f %s/reaim.config %s' % (self.bindir, config)) + args += ' -c ./reaim.config' + open(config, 'a+').write("DISKDIR %s\n" % (tmpdir)) + os.chdir(self.srcdir) + print os.getcwd() + cmd = self.ldlib + ' ./reaim ' + args + ' ' + extra_args + + for i in range(1, iterations+1): + system(cmd) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(cmd) + profilers.stop(self) + profilers.report(self) diff --git a/rmaptest/rmap-test.c b/rmaptest/rmap-test.c new file mode 100644 index 000000000..77594d234 --- /dev/null +++ b/rmaptest/rmap-test.c @@ -0,0 +1,255 @@ +/* + * Create lots of VMA's mapped by lots of tasks. To tickle objrmap and the + * virtual scan. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +char *progname; +char *filename; +void *mapped_mem; + +int niters; +int ntasks = 100; +int nvmas = 100; +int vmasize = 1024*1024; +int vmas_to_do = -1; +int pagesize; +int fd; +char **vma_addresses; +volatile int *nr_children_running; +int verbose; + +enum access_pattern { + ap_random, + ap_linear, + ap_half +} access_pattern = ap_linear; + +void open_file() +{ + fd = open(filename, O_RDWR|O_TRUNC|O_CREAT, 0666); + if (fd < 0) { + fprintf(stderr, "%s: Cannot open `%s': %s\n", + progname, filename, strerror(errno)); + exit(1); + } +} + +void usage(void) +{ + fprintf(stderr, "Usage: %s [-hlrvV] [-iN] [-nN] [-sN] [-tN] filename\n", + progname); + fprintf(stderr, " -h: Pattern: half of memory is busy\n"); + fprintf(stderr, " -l: Pattern: linear\n"); + fprintf(stderr, " -r: Pattern: random\n"); + fprintf(stderr, " -iN: Number of iterations\n"); + fprintf(stderr, " -nN: Number of VMAs\n"); + fprintf(stderr, " -sN: VMA size (pages)\n"); + fprintf(stderr, " -tN: Run N tasks\n"); + fprintf(stderr, " -VN: Number of VMAs to process\n"); + fprintf(stderr, " -v: Verbose\n"); + exit(1); +} + +void touch_pages(int nr_vmas) +{ + int i; + + for (i = 0; i < nr_vmas; i++) { + char *p = vma_addresses[i]; + int page; + + for (page = 0; page < vmasize; page++) + p[page * pagesize]++; + } +} + +void msync_file(int nr_vmas) +{ + int i; + + for (i = 0; i < nr_vmas; i++) { + char *p = vma_addresses[i]; + + msync(p, vmasize * pagesize, MS_ASYNC); + } +} + +void touch_random_pages(void) +{ + int vma; + int page; + + srand(getpid() * time(0)); + + for (vma = 0; vma < vmas_to_do; vma++) { + for (page = 0; page < vmasize; page++) { + int rand_vma; + int rand_page; + char *p; + + rand_vma = rand() % nvmas; + rand_page = rand() % vmasize; + p = vma_addresses[rand_vma] + rand_page * pagesize; + (*p)++; + } + if (verbose > 1) + printf("vma %d/%d done\n", vma, nvmas); + } +} + +void child(int childno) +{ + int iter; + + sleep(1); + if (access_pattern == ap_half && childno == 0) { + while (*nr_children_running > 1) { + touch_pages(nvmas / 2); + } + return; + } + + for (iter = 0; iter < niters; iter++) { + if (access_pattern == ap_random) { + touch_random_pages(); + } else if (access_pattern == ap_linear) { + touch_pages(nvmas); + } else if (access_pattern == ap_half) { + touch_pages(nvmas); + } + if (verbose > 0) + printf("%d/%d\n", iter, niters); + } +} + +int main(int argc, char *argv[]) +{ + int c; + int i; + loff_t offset; + loff_t file_size; + int childno; + + progname = argv[0]; + + while ((c = getopt(argc, argv, "vrlhi:n:s:t:V:")) != -1) { + switch (c) { + case 'h': + access_pattern = ap_half; + break; + case 'l': + access_pattern = ap_linear; + break; + case 'r': + access_pattern = ap_random; + break; + case 'i': + niters = strtol(optarg, NULL, 10); + break; + case 'n': + nvmas = strtol(optarg, NULL, 10); + break; + case 's': + vmasize = strtol(optarg, NULL, 10); + break; + case 't': + ntasks = strtol(optarg, NULL, 10); + break; + case 'V': + vmas_to_do = strtol(optarg, NULL, 10); + break; + case 'v': + verbose++; + break; + } + } + + if (optind == argc) + usage(); + filename = argv[optind++]; + if (optind != argc) + usage(); + + if (vmas_to_do == -1) + vmas_to_do = nvmas; + + pagesize = getpagesize(); + open_file(); + + file_size = nvmas; + file_size *= vmasize; + file_size += nvmas - 1; + file_size *= pagesize; + + printf("Total file size: %lldk, Total memory: %lldk\n", + file_size / 1024, + ((long long)nvmas * vmasize * pagesize) / 1024); + + if (ftruncate(fd, file_size) < 0) { + perror("ftruncate"); + exit(1); + } + + vma_addresses = malloc(nvmas * sizeof(*vma_addresses)); + nr_children_running = (int *)mmap(0, sizeof(*nr_children_running), + PROT_READ|PROT_WRITE, + MAP_SHARED|MAP_ANONYMOUS, + -1, + 0); + if (nr_children_running == MAP_FAILED) { + perror("mmap1"); + exit(1); + } + + offset = 0; + + for (i = 0; i < nvmas; i++) { + char *p; + + p = mmap(0, vmasize * pagesize, PROT_READ|PROT_WRITE, + MAP_SHARED, fd, offset); + if (p == MAP_FAILED) { + perror("mmap"); + exit(1); + } + vma_addresses[i] = p; + offset += vmasize * pagesize + pagesize; + } + + touch_pages(nvmas); + msync_file(nvmas); + *nr_children_running = ntasks; + + for (childno = 0; childno < ntasks; childno++) { + if (fork() == 0) { + child(childno); + exit(0); + } + } + + signal(SIGINT, SIG_IGN); + + for (i = 0; i < ntasks; i++) { + pid_t pid; + int status; + + /* Catch each child error status and report. */ + pid = wait3(&status, 0, 0); + if (pid < 0) /* No more children? */ + break; + (*nr_children_running)--; + } + exit(0); +} diff --git a/scrashme/control b/scrashme/control new file mode 100644 index 000000000..11387e308 --- /dev/null +++ b/scrashme/control @@ -0,0 +1 @@ +job.runtest(None, 'scrashme') diff --git a/scrashme/scrashme-2006-08-29.tar.gz b/scrashme/scrashme-2006-08-29.tar.gz new file mode 100644 index 000000000..a452f6931 Binary files /dev/null and b/scrashme/scrashme-2006-08-29.tar.gz differ diff --git a/scrashme/scrashme.py b/scrashme/scrashme.py new file mode 100644 index 000000000..d01f86936 --- /dev/null +++ b/scrashme/scrashme.py @@ -0,0 +1,30 @@ +import test +from autotest_utils import * + +class scrashme(test.test): + version = 1 + + # http://www.codemonkey.org.uk/projects/git-snapshots/scrashme/scrashme-2006-08-29.tar.gz + def setup(self, tarball = 'scrashme-2006-08-29.tar.gz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + + def execute(self, iterations = 1, args_list = ''): + if len(args_list) != 0: + args = '' + args_list + else: + args = '-c100 -z' + + for i in range(1, iterations+1): + system(self.srcdir + '/scrashme ' + args) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(self.srcdir + '/scrashme ' + args) + profilers.stop(self) + profilers.report(self) diff --git a/selftest/selftest.py b/selftest/selftest.py new file mode 100644 index 000000000..f3e5dc828 --- /dev/null +++ b/selftest/selftest.py @@ -0,0 +1,51 @@ +import test +from autotest_utils import * +from error import * + +class selftest(test.test): + version = 1 + + def setup(self): + name = self.job.resultdir + '/sequence' + if (not os.path.exists(name)): + fd = file(name, 'w') + fd.write('0') + fd.close() + + def __mark(self, checkpoint): + name = self.job.resultdir + '/sequence' + fd = file(name, 'r') + current = int(fd.readline()) + fd.close() + + current += 1 + fd = file(name + '.new', 'w') + fd.write('%d' % current) + fd.close() + + os.rename(name + '.new', name) + + print "checkpoint %d %d" % (current, checkpoint) + + if (current != checkpoint): + raise JobError("selftest: sequence was " + + "%d when %d expected" % (current, checkpoint)) + + def __throw(self): + __does_not_exist = __does_not_exist_either + + def __print(self, msg): + sys.stdout.write(msg) + + def __warn(self, msg): + sys.stderr.write(msg) + + def execute(self, cmd, *args): + if cmd == 'mark': + self.__mark(*args) + elif cmd == 'throw': + self.__throw(*args) + elif cmd == 'print': + self.__print(*args) + elif cmd == 'warn': + self.__warn(*args) diff --git a/sleeptest/control b/sleeptest/control new file mode 100644 index 000000000..439dded5c --- /dev/null +++ b/sleeptest/control @@ -0,0 +1 @@ +job.runtest(None, 'sleeptest') diff --git a/sleeptest/sleeptest.py b/sleeptest/sleeptest.py new file mode 100755 index 000000000..6c5df713f --- /dev/null +++ b/sleeptest/sleeptest.py @@ -0,0 +1,12 @@ +import test, time +from autotest_utils import * + +class sleeptest(test.test): + version = 1 + + def execute(self, seconds = 1): + profilers = self.job.profilers + profilers.start(self) + time.sleep(seconds) + profilers.stop(self) + profilers.report(self) diff --git a/sparse/control b/sparse/control new file mode 100755 index 000000000..e99e0e114 --- /dev/null +++ b/sparse/control @@ -0,0 +1,2 @@ +job.runtest(None, 'sparse', '/usr/local/src/linux-2.6.14.tar.bz2', '/usr/local/src/patch-2.6.14-git6.bz2', 'http://ftp.kernel.org/pub/linux/kernel/people/mbligh/config/config.up') + diff --git a/sparse/sparse-2006-04-28.tar.gz b/sparse/sparse-2006-04-28.tar.gz new file mode 100644 index 000000000..9df9d693f Binary files /dev/null and b/sparse/sparse-2006-04-28.tar.gz differ diff --git a/sparse/sparse.py b/sparse/sparse.py new file mode 100755 index 000000000..52047afe0 --- /dev/null +++ b/sparse/sparse.py @@ -0,0 +1,25 @@ +import test +from autotest_utils import * + +class sparse(test.test): + version = 1 + + # http://www.codemonkey.org.uk/projects/git-snapshots/sparse/sparse-2006-04-28.tar.gz + def setup(self, tarball = 'sparse-2006-04-28.tar.gz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + system('ln check sparse') + + self.top_dir = self.job.tmpdir+'/sparse' + + def execute(self, base_tree, patches, config, config_list = None): + kernel = self.job.kernel(self.job.tmpdir+'/sparse', base_tree) + kernel.patch(patches) + kernel.config(config, config_list) + + os.environ['PATH'] = self.srcdir + ':' + os.environ['PATH'] + results = os.path.join (self.resultsdir, 'sparse') + kernel.build(make_opts = 'C=1', logfile = results) diff --git a/stress/control b/stress/control new file mode 100644 index 000000000..a40d66efd --- /dev/null +++ b/stress/control @@ -0,0 +1 @@ +job.runtest(None, 'stress') diff --git a/stress/stress-0.18.8.tar.gz b/stress/stress-0.18.8.tar.gz new file mode 100644 index 000000000..47a8abb9a Binary files /dev/null and b/stress/stress-0.18.8.tar.gz differ diff --git a/stress/stress.py b/stress/stress.py new file mode 100644 index 000000000..dbeaee1a1 --- /dev/null +++ b/stress/stress.py @@ -0,0 +1,56 @@ +import test +from autotest_utils import * + +class stress(test.test): + version = 1 + + # http://weather.ou.edu/~apw/projects/stress/stress-0.18.8.tar.gz + def setup(self, tarball = 'stress-0.18.8.tar.gz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('./configure') + system('make') + + + def execute(self, iterations = 1, args = ''): + if not args: + threads = 2*count_cpus() + args = '-c %d -i %d -m %d -d %d -t 60 -v' % \ + (threads, threads, threads, threads) + + for i in range(1, iterations+1): + system(self.srcdir + '/src/stress ' + args) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(self.srcdir + '/src/stress ' + args) + profilers.stop(self) + profilers.report(self) + +# -v Turn up verbosity. +# -q Turn down verbosity. +# -n Show what would have been done (dry-run) +# -t secs Time out after secs seconds. +# --backoff usecs Wait for factor of usecs microseconds before starting +# -c forks Spawn forks processes each spinning on sqrt(). +# -i forks Spawn forks processes each spinning on sync(). +# -m forks Spawn forks processes each spinning on malloc(). +# --vm-bytes bytes Allocate bytes number of bytes. The default is 1. +# --vm-hang Instruct each vm hog process to go to sleep after +# allocating memory. This contrasts with their normal +# behavior, which is to free the memory and reallocate +# ad infinitum. This is useful for simulating low memory +# conditions on a machine. For example, the following +# command allocates 256M of RAM and holds it until killed. +# +# % stress --vm 2 --vm-bytes 128M --vm-hang +# -d forks Spawn forks processes each spinning on write(). +# --hdd-bytes bytes Write bytes number of bytes. The default is 1GB. +# --hdd-noclean Do not unlink file(s) to which random data is written. +# +# Note: Suffixes may be s,m,h,d,y (time) or k,m,g (size). + diff --git a/tbench/control b/tbench/control new file mode 100644 index 000000000..d2842d474 --- /dev/null +++ b/tbench/control @@ -0,0 +1 @@ +job.runtest(None, 'tbench') diff --git a/tbench/dbench-3.04.tar.gz b/tbench/dbench-3.04.tar.gz new file mode 100644 index 000000000..c0bb2e21e Binary files /dev/null and b/tbench/dbench-3.04.tar.gz differ diff --git a/tbench/tbench.py b/tbench/tbench.py new file mode 100755 index 000000000..0fe1d8c6f --- /dev/null +++ b/tbench/tbench.py @@ -0,0 +1,41 @@ +import test,time,os,signal +from autotest_utils import * + +class tbench(test.test): + version = 1 + + # http://samba.org/ftp/tridge/dbench/dbench-3.04.tar.gz + def setup(self, tarball = 'dbench-3.04.tar.gz'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('./configure') + system('make') + + def execute(self, iterations = 1, args = '1'): + # only supports combined server+client model at the moment + # should support separate I suppose, but nobody uses it + for i in range(1, iterations+1): + self.run_tbench(args) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + self.run_tbench(args) + profilers.stop(self) + profilers.report(self) + + + def run_tbench(self, args): + pid = os.fork() + if pid: # parent + time.sleep(1) + client = self.srcdir + '/client.txt' + args = '-c ' + client + ' ' + args + system(self.srcdir + '/tbench ' + args) + os.kill(pid, signal.SIGTERM) # clean up the server + else: # child + server = self.srcdir + '/tbench_srv' + os.execlp(server, server) diff --git a/tiobench/control b/tiobench/control new file mode 100644 index 000000000..f8e57455a --- /dev/null +++ b/tiobench/control @@ -0,0 +1 @@ +job.runtest(None, 'tiobench','/mnt') diff --git a/tiobench/tiobench-0.3.3.tar.bz2 b/tiobench/tiobench-0.3.3.tar.bz2 new file mode 100644 index 000000000..39721baba Binary files /dev/null and b/tiobench/tiobench-0.3.3.tar.bz2 differ diff --git a/tiobench/tiobench.py b/tiobench/tiobench.py new file mode 100644 index 000000000..06708654d --- /dev/null +++ b/tiobench/tiobench.py @@ -0,0 +1,27 @@ +import test +from autotest_utils import * + +class tiobench(test.test): + version = 1 + + # http://prdownloads.sourceforge.net/tiobench/tiobench-0.3.3.tar.gz + def setup(self, tarball = 'tiobench-0.3.3.tar.bz2'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + + def execute(self, dir, args = None): + os.chdir(self.srcdir) + if not args: + args = '--block=4096 --block=8192 --threads=10 --size=1024 --numruns=2' + system('./tiobench.pl --dir %s %s' %(dir, args)) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system('./tiobench.pl --dir %s %s' %(dir, args)) + profilers.stop(self) + profilers.report(self) diff --git a/unixbench/control b/unixbench/control new file mode 100644 index 000000000..9f7643a26 --- /dev/null +++ b/unixbench/control @@ -0,0 +1 @@ +job.runtest(None, 'unixbench') diff --git a/unixbench/unixbench-4.1.0.tar.bz2 b/unixbench/unixbench-4.1.0.tar.bz2 new file mode 100644 index 000000000..e5a934c07 Binary files /dev/null and b/unixbench/unixbench-4.1.0.tar.bz2 differ diff --git a/unixbench/unixbench.diff b/unixbench/unixbench.diff new file mode 100644 index 000000000..a3613936d --- /dev/null +++ b/unixbench/unixbench.diff @@ -0,0 +1,19 @@ +--- src/Run.old 2006-05-17 14:15:36.000000000 -0700 ++++ src/Run 2006-05-17 14:16:24.000000000 -0700 +@@ -114,16 +114,6 @@ + SCRPDIR=`pwd` + cd $_WD + +-TMPDIR=${HOMEDIR}/tmp +-cd $TMPDIR +-TMPDIR=`pwd` +-cd $_WD +- +-RESULTDIR=${RESULTDIR-${HOMEDIR}/results} +-cd $RESULTDIR +-RESULTDIR=`pwd` +-cd $_WD +- + TIMEACCUM=${TIMEACCUM-${RESULTDIR}/times} + + TESTDIR=${TESTDIR-${HOMEDIR}/testdir} diff --git a/unixbench/unixbench.py b/unixbench/unixbench.py new file mode 100755 index 000000000..10c3d9e60 --- /dev/null +++ b/unixbench/unixbench.py @@ -0,0 +1,27 @@ +import test +from autotest_utils import * + +class unixbench(test.test): + version = 1 + + # http://www.tux.org/pub/tux/niemi/unixbench/unixbench-4.1.0.tgz + def setup(self, tarball = 'unixbench-4.1.0.tar.bz2'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('make') + + def execute(self, iterations = 1, args = ''): + for i in range(1, iterations+1): + os.chdir(self.srcdir) + vars = 'TMPDIR=\"%s\" RESULTDIR=\"%s\"' % (self.tmpdir, self.resultsdir) + system(vars + ' ./Run ' + args) + + # Do a profiling run if necessary + profilers = self.job.profilers + if profilers.present(): + profilers.start(self) + system(vars + ' ./Run ' + args) + profilers.stop(self) + profilers.report(self) diff --git a/xmtest/control b/xmtest/control new file mode 100644 index 000000000..a06ce29d4 --- /dev/null +++ b/xmtest/control @@ -0,0 +1 @@ +job.runtest(None, 'xmtest', '-e nobody@nowhere.org -d xmtest') diff --git a/xmtest/xm-test.tar.bz2 b/xmtest/xm-test.tar.bz2 new file mode 100644 index 000000000..759eb2422 Binary files /dev/null and b/xmtest/xm-test.tar.bz2 differ diff --git a/xmtest/xmtest.py b/xmtest/xmtest.py new file mode 100644 index 000000000..d4327523b --- /dev/null +++ b/xmtest/xmtest.py @@ -0,0 +1,31 @@ +# (C) Copyright IBM Corp. 2006 +# Author: Paul Larson +# Description: +# Autotest script for running Xen xm-test +# This should be run from a Xen domain0 + +import test +from autotest_utils import * + +class xmtest(test.test): + version = 1 + + # This test expects just the xm-test directory, as a tarball + # from the Xen source tree + # hg clone http://xenbits.xensource.com/xen-unstable.hg + # or wget http://www.cl.cam.ac.uk/Research/SRG/netos/xen/downloads/xen-unstable-src.tgz + # cd tools + # tar -czf xm-test.tgz xm-test + def setup(self, tarball = 'xm-test.tar.bz2'): + tarball = unmap_url(self.bindir, tarball, self.tmpdir) + extract_tarball_to_dir(tarball, self.srcdir) + os.chdir(self.srcdir) + + system('./autogen') + system('./configure') + system('make existing') + + def execute(self, args = ''): + os.chdir(self.srcdir) + system('./runtest.sh ' + args) + system('mv xmtest.* ' + self.resultsdir)