blob: 68ee79afe31c223218e8fab1ffd829b7be1226d3 [file] [log] [blame]
Steven Rostedt5092dbc2009-05-05 22:47:18 -04001/*
2 * ring buffer tester and benchmark
3 *
4 * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/completion.h>
8#include <linux/kthread.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +01009#include <uapi/linux/sched/types.h>
Steven Rostedt5092dbc2009-05-05 22:47:18 -040010#include <linux/module.h>
Tina Ruchandanida194932015-01-28 19:46:11 +053011#include <linux/ktime.h>
Christoph Lameter79615762010-01-05 15:34:50 +090012#include <asm/local.h>
Steven Rostedt5092dbc2009-05-05 22:47:18 -040013
14struct rb_page {
15 u64 ts;
16 local_t commit;
17 char data[4080];
18};
19
20/* run time and sleep time in seconds */
Tina Ruchandanida194932015-01-28 19:46:11 +053021#define RUN_TIME 10ULL
Steven Rostedt5092dbc2009-05-05 22:47:18 -040022#define SLEEP_TIME 10
23
24/* number of events for writer to wake up the reader */
25static int wakeup_interval = 100;
26
27static int reader_finish;
Petr Mladek8b46ff62015-09-07 14:38:37 +020028static DECLARE_COMPLETION(read_start);
29static DECLARE_COMPLETION(read_done);
Steven Rostedt5092dbc2009-05-05 22:47:18 -040030
31static struct ring_buffer *buffer;
32static struct task_struct *producer;
33static struct task_struct *consumer;
34static unsigned long read;
35
Wang Long33d657d2015-06-10 08:12:07 +000036static unsigned int disable_reader;
Steven Rostedt5092dbc2009-05-05 22:47:18 -040037module_param(disable_reader, uint, 0644);
38MODULE_PARM_DESC(disable_reader, "only run producer");
39
Wang Long33d657d2015-06-10 08:12:07 +000040static unsigned int write_iteration = 50;
Steven Rostedta6f0eb62009-11-11 17:14:07 -050041module_param(write_iteration, uint, 0644);
42MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
43
Dongsheng Yang2b3942e2014-02-24 22:12:01 +080044static int producer_nice = MAX_NICE;
45static int consumer_nice = MAX_NICE;
Steven Rostedt7ac07432009-11-25 13:22:21 -050046
47static int producer_fifo = -1;
48static int consumer_fifo = -1;
49
Wang Long7364e862015-06-10 08:11:13 +000050module_param(producer_nice, int, 0644);
Steven Rostedt7ac07432009-11-25 13:22:21 -050051MODULE_PARM_DESC(producer_nice, "nice prio for producer");
52
Wang Long7364e862015-06-10 08:11:13 +000053module_param(consumer_nice, int, 0644);
Steven Rostedt7ac07432009-11-25 13:22:21 -050054MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
55
Wang Long7364e862015-06-10 08:11:13 +000056module_param(producer_fifo, int, 0644);
Steven Rostedt7ac07432009-11-25 13:22:21 -050057MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
58
Wang Long7364e862015-06-10 08:11:13 +000059module_param(consumer_fifo, int, 0644);
Steven Rostedt7ac07432009-11-25 13:22:21 -050060MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
61
Steven Rostedt5092dbc2009-05-05 22:47:18 -040062static int read_events;
63
Petr Mladekf47cb662015-09-07 14:38:38 +020064static int test_error;
Steven Rostedt5092dbc2009-05-05 22:47:18 -040065
Petr Mladekf47cb662015-09-07 14:38:38 +020066#define TEST_ERROR() \
Steven Rostedt5092dbc2009-05-05 22:47:18 -040067 do { \
Petr Mladekf47cb662015-09-07 14:38:38 +020068 if (!test_error) { \
69 test_error = 1; \
Steven Rostedt5092dbc2009-05-05 22:47:18 -040070 WARN_ON(1); \
71 } \
72 } while (0)
73
74enum event_status {
75 EVENT_FOUND,
76 EVENT_DROPPED,
77};
78
Petr Mladekf47cb662015-09-07 14:38:38 +020079static bool break_test(void)
80{
81 return test_error || kthread_should_stop();
82}
83
Steven Rostedt5092dbc2009-05-05 22:47:18 -040084static enum event_status read_event(int cpu)
85{
86 struct ring_buffer_event *event;
87 int *entry;
88 u64 ts;
89
Steven Rostedt66a8cb92010-03-31 13:21:56 -040090 event = ring_buffer_consume(buffer, cpu, &ts, NULL);
Steven Rostedt5092dbc2009-05-05 22:47:18 -040091 if (!event)
92 return EVENT_DROPPED;
93
94 entry = ring_buffer_event_data(event);
95 if (*entry != cpu) {
Petr Mladekf47cb662015-09-07 14:38:38 +020096 TEST_ERROR();
Steven Rostedt5092dbc2009-05-05 22:47:18 -040097 return EVENT_DROPPED;
98 }
99
100 read++;
101 return EVENT_FOUND;
102}
103
104static enum event_status read_page(int cpu)
105{
106 struct ring_buffer_event *event;
107 struct rb_page *rpage;
108 unsigned long commit;
109 void *bpage;
110 int *entry;
111 int ret;
112 int inc;
113 int i;
114
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -0700115 bpage = ring_buffer_alloc_read_page(buffer, cpu);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -0400116 if (IS_ERR(bpage))
Steven Rostedt00c81a52009-05-06 12:40:51 -0400117 return EVENT_DROPPED;
118
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400119 ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
120 if (ret >= 0) {
121 rpage = bpage;
Steven Rostedta838b2e2010-04-27 13:26:58 -0400122 /* The commit may have missed event flags set, clear them */
123 commit = local_read(&rpage->commit) & 0xfffff;
Petr Mladekf47cb662015-09-07 14:38:38 +0200124 for (i = 0; i < commit && !test_error ; i += inc) {
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400125
126 if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
Petr Mladekf47cb662015-09-07 14:38:38 +0200127 TEST_ERROR();
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400128 break;
129 }
130
131 inc = -1;
132 event = (void *)&rpage->data[i];
133 switch (event->type_len) {
134 case RINGBUF_TYPE_PADDING:
Steven Rostedt9086c7b902009-06-16 11:46:09 -0400135 /* failed writes may be discarded events */
136 if (!event->time_delta)
Petr Mladekf47cb662015-09-07 14:38:38 +0200137 TEST_ERROR();
Steven Rostedt9086c7b902009-06-16 11:46:09 -0400138 inc = event->array[0] + 4;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400139 break;
140 case RINGBUF_TYPE_TIME_EXTEND:
141 inc = 8;
142 break;
143 case 0:
144 entry = ring_buffer_event_data(event);
145 if (*entry != cpu) {
Petr Mladekf47cb662015-09-07 14:38:38 +0200146 TEST_ERROR();
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400147 break;
148 }
149 read++;
150 if (!event->array[0]) {
Petr Mladekf47cb662015-09-07 14:38:38 +0200151 TEST_ERROR();
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400152 break;
153 }
Steven Rostedt9086c7b902009-06-16 11:46:09 -0400154 inc = event->array[0] + 4;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400155 break;
156 default:
157 entry = ring_buffer_event_data(event);
158 if (*entry != cpu) {
Petr Mladekf47cb662015-09-07 14:38:38 +0200159 TEST_ERROR();
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400160 break;
161 }
162 read++;
163 inc = ((event->type_len + 1) * 4);
164 }
Petr Mladekf47cb662015-09-07 14:38:38 +0200165 if (test_error)
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400166 break;
167
168 if (inc <= 0) {
Petr Mladekf47cb662015-09-07 14:38:38 +0200169 TEST_ERROR();
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400170 break;
171 }
172 }
173 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -0400174 ring_buffer_free_read_page(buffer, cpu, bpage);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400175
176 if (ret < 0)
177 return EVENT_DROPPED;
178 return EVENT_FOUND;
179}
180
181static void ring_buffer_consumer(void)
182{
183 /* toggle between reading pages and events */
184 read_events ^= 1;
185
186 read = 0;
Petr Mladek8b46ff62015-09-07 14:38:37 +0200187 /*
188 * Continue running until the producer specifically asks to stop
189 * and is ready for the completion.
190 */
191 while (!READ_ONCE(reader_finish)) {
192 int found = 1;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400193
Petr Mladekf47cb662015-09-07 14:38:38 +0200194 while (found && !test_error) {
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400195 int cpu;
196
197 found = 0;
198 for_each_online_cpu(cpu) {
199 enum event_status stat;
200
201 if (read_events)
202 stat = read_event(cpu);
203 else
204 stat = read_page(cpu);
205
Petr Mladekf47cb662015-09-07 14:38:38 +0200206 if (test_error)
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400207 break;
Petr Mladek8b46ff62015-09-07 14:38:37 +0200208
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400209 if (stat == EVENT_FOUND)
210 found = 1;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400211
Petr Mladek8b46ff62015-09-07 14:38:37 +0200212 }
213 }
214
215 /* Wait till the producer wakes us up when there is more data
216 * available or when the producer wants us to finish reading.
217 */
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400218 set_current_state(TASK_INTERRUPTIBLE);
219 if (reader_finish)
220 break;
221
222 schedule();
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400223 }
Petr Mladek8b46ff62015-09-07 14:38:37 +0200224 __set_current_state(TASK_RUNNING);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400225 reader_finish = 0;
226 complete(&read_done);
227}
228
229static void ring_buffer_producer(void)
230{
Tina Ruchandanida194932015-01-28 19:46:11 +0530231 ktime_t start_time, end_time, timeout;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400232 unsigned long long time;
233 unsigned long long entries;
234 unsigned long long overruns;
235 unsigned long missed = 0;
236 unsigned long hit = 0;
237 unsigned long avg;
238 int cnt = 0;
239
240 /*
241 * Hammer the buffer for 10 secs (this may
242 * make the system stall)
243 */
Steven Rostedt4b221f02009-06-17 17:01:09 -0400244 trace_printk("Starting ring buffer hammer\n");
Tina Ruchandanida194932015-01-28 19:46:11 +0530245 start_time = ktime_get();
246 timeout = ktime_add_ns(start_time, RUN_TIME * NSEC_PER_SEC);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400247 do {
248 struct ring_buffer_event *event;
249 int *entry;
Steven Rostedta6f0eb62009-11-11 17:14:07 -0500250 int i;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400251
Steven Rostedta6f0eb62009-11-11 17:14:07 -0500252 for (i = 0; i < write_iteration; i++) {
253 event = ring_buffer_lock_reserve(buffer, 10);
254 if (!event) {
255 missed++;
256 } else {
257 hit++;
258 entry = ring_buffer_event_data(event);
259 *entry = smp_processor_id();
260 ring_buffer_unlock_commit(buffer, event);
261 }
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400262 }
Tina Ruchandanida194932015-01-28 19:46:11 +0530263 end_time = ktime_get();
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400264
Steven Rostedt0574ea42009-05-07 14:20:28 -0400265 cnt++;
266 if (consumer && !(cnt % wakeup_interval))
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400267 wake_up_process(consumer);
268
Steven Rostedt0574ea42009-05-07 14:20:28 -0400269#ifndef CONFIG_PREEMPT
Steven Rostedt29c80002009-05-07 11:13:42 -0400270 /*
271 * If we are a non preempt kernel, the 10 second run will
272 * stop everything while it runs. Instead, we will call
273 * cond_resched and also add any time that was lost by a
274 * rescedule.
Steven Rostedt0574ea42009-05-07 14:20:28 -0400275 *
276 * Do a cond resched at the same frequency we would wake up
277 * the reader.
Steven Rostedt29c80002009-05-07 11:13:42 -0400278 */
Steven Rostedt0574ea42009-05-07 14:20:28 -0400279 if (cnt % wakeup_interval)
280 cond_resched();
281#endif
Petr Mladekf47cb662015-09-07 14:38:38 +0200282 } while (ktime_before(end_time, timeout) && !break_test());
Steven Rostedt4b221f02009-06-17 17:01:09 -0400283 trace_printk("End ring buffer hammer\n");
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400284
285 if (consumer) {
286 /* Init both completions here to avoid races */
287 init_completion(&read_start);
288 init_completion(&read_done);
289 /* the completions must be visible before the finish var */
290 smp_wmb();
291 reader_finish = 1;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400292 wake_up_process(consumer);
293 wait_for_completion(&read_done);
294 }
295
Tina Ruchandanida194932015-01-28 19:46:11 +0530296 time = ktime_us_delta(end_time, start_time);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400297
298 entries = ring_buffer_entries(buffer);
299 overruns = ring_buffer_overruns(buffer);
300
Petr Mladekf47cb662015-09-07 14:38:38 +0200301 if (test_error)
Steven Rostedt4b221f02009-06-17 17:01:09 -0400302 trace_printk("ERROR!\n");
Steven Rostedt7ac07432009-11-25 13:22:21 -0500303
304 if (!disable_reader) {
305 if (consumer_fifo < 0)
306 trace_printk("Running Consumer at nice: %d\n",
307 consumer_nice);
308 else
309 trace_printk("Running Consumer at SCHED_FIFO %d\n",
310 consumer_fifo);
311 }
312 if (producer_fifo < 0)
313 trace_printk("Running Producer at nice: %d\n",
314 producer_nice);
315 else
316 trace_printk("Running Producer at SCHED_FIFO %d\n",
317 producer_fifo);
318
319 /* Let the user know that the test is running at low priority */
320 if (producer_fifo < 0 && consumer_fifo < 0 &&
Dongsheng Yang2b3942e2014-02-24 22:12:01 +0800321 producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
Steven Rostedt7ac07432009-11-25 13:22:21 -0500322 trace_printk("WARNING!!! This test is running at lowest priority.\n");
323
Steven Rostedt4b221f02009-06-17 17:01:09 -0400324 trace_printk("Time: %lld (usecs)\n", time);
325 trace_printk("Overruns: %lld\n", overruns);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400326 if (disable_reader)
Steven Rostedt4b221f02009-06-17 17:01:09 -0400327 trace_printk("Read: (reader disabled)\n");
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400328 else
Steven Rostedt4b221f02009-06-17 17:01:09 -0400329 trace_printk("Read: %ld (by %s)\n", read,
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400330 read_events ? "events" : "pages");
Steven Rostedt4b221f02009-06-17 17:01:09 -0400331 trace_printk("Entries: %lld\n", entries);
332 trace_printk("Total: %lld\n", entries + overruns + read);
333 trace_printk("Missed: %ld\n", missed);
334 trace_printk("Hit: %ld\n", hit);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400335
Steven Rostedt5a772b22009-05-08 10:56:33 -0400336 /* Convert time from usecs to millisecs */
337 do_div(time, USEC_PER_MSEC);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400338 if (time)
339 hit /= (long)time;
340 else
Steven Rostedt4b221f02009-06-17 17:01:09 -0400341 trace_printk("TIME IS ZERO??\n");
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400342
Steven Rostedt4b221f02009-06-17 17:01:09 -0400343 trace_printk("Entries per millisec: %ld\n", hit);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400344
345 if (hit) {
Steven Rostedt5a772b22009-05-08 10:56:33 -0400346 /* Calculate the average time in nanosecs */
347 avg = NSEC_PER_MSEC / hit;
Steven Rostedt4b221f02009-06-17 17:01:09 -0400348 trace_printk("%ld ns per entry\n", avg);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400349 }
Steven Rostedt7da30462009-05-07 19:52:20 -0400350
Steven Rostedt7da30462009-05-07 19:52:20 -0400351 if (missed) {
352 if (time)
353 missed /= (long)time;
354
Steven Rostedt4b221f02009-06-17 17:01:09 -0400355 trace_printk("Total iterations per millisec: %ld\n",
356 hit + missed);
Steven Rostedt7da30462009-05-07 19:52:20 -0400357
Steven Rostedtd988ff92009-05-08 11:03:57 -0400358 /* it is possible that hit + missed will overflow and be zero */
359 if (!(hit + missed)) {
Steven Rostedt4b221f02009-06-17 17:01:09 -0400360 trace_printk("hit + missed overflowed and totalled zero!\n");
Steven Rostedtd988ff92009-05-08 11:03:57 -0400361 hit--; /* make it non zero */
362 }
363
Steven Rostedt5a772b22009-05-08 10:56:33 -0400364 /* Caculate the average time in nanosecs */
365 avg = NSEC_PER_MSEC / (hit + missed);
Steven Rostedt4b221f02009-06-17 17:01:09 -0400366 trace_printk("%ld ns per entry\n", avg);
Steven Rostedt7da30462009-05-07 19:52:20 -0400367 }
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400368}
369
370static void wait_to_die(void)
371{
372 set_current_state(TASK_INTERRUPTIBLE);
373 while (!kthread_should_stop()) {
374 schedule();
375 set_current_state(TASK_INTERRUPTIBLE);
376 }
377 __set_current_state(TASK_RUNNING);
378}
379
380static int ring_buffer_consumer_thread(void *arg)
381{
Petr Mladekf47cb662015-09-07 14:38:38 +0200382 while (!break_test()) {
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400383 complete(&read_start);
384
385 ring_buffer_consumer();
386
387 set_current_state(TASK_INTERRUPTIBLE);
Petr Mladekf47cb662015-09-07 14:38:38 +0200388 if (break_test())
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400389 break;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400390 schedule();
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400391 }
392 __set_current_state(TASK_RUNNING);
393
Petr Mladekb44754d2015-06-15 15:53:10 +0200394 if (!kthread_should_stop())
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400395 wait_to_die();
396
397 return 0;
398}
399
400static int ring_buffer_producer_thread(void *arg)
401{
Petr Mladekf47cb662015-09-07 14:38:38 +0200402 while (!break_test()) {
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400403 ring_buffer_reset(buffer);
404
405 if (consumer) {
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400406 wake_up_process(consumer);
407 wait_for_completion(&read_start);
408 }
409
410 ring_buffer_producer();
Petr Mladekf47cb662015-09-07 14:38:38 +0200411 if (break_test())
Petr Mladekb44754d2015-06-15 15:53:10 +0200412 goto out_kill;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400413
Steven Rostedt4b221f02009-06-17 17:01:09 -0400414 trace_printk("Sleeping for 10 secs\n");
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400415 set_current_state(TASK_INTERRUPTIBLE);
Petr Mladekf47cb662015-09-07 14:38:38 +0200416 if (break_test())
417 goto out_kill;
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400418 schedule_timeout(HZ * SLEEP_TIME);
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400419 }
420
Petr Mladekb44754d2015-06-15 15:53:10 +0200421out_kill:
Petr Mladekf47cb662015-09-07 14:38:38 +0200422 __set_current_state(TASK_RUNNING);
Petr Mladekb44754d2015-06-15 15:53:10 +0200423 if (!kthread_should_stop())
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400424 wait_to_die();
425
426 return 0;
427}
428
429static int __init ring_buffer_benchmark_init(void)
430{
431 int ret;
432
433 /* make a one meg buffer in overwite mode */
434 buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
435 if (!buffer)
436 return -ENOMEM;
437
438 if (!disable_reader) {
439 consumer = kthread_create(ring_buffer_consumer_thread,
440 NULL, "rb_consumer");
441 ret = PTR_ERR(consumer);
442 if (IS_ERR(consumer))
443 goto out_fail;
444 }
445
446 producer = kthread_run(ring_buffer_producer_thread,
447 NULL, "rb_producer");
448 ret = PTR_ERR(producer);
449
450 if (IS_ERR(producer))
451 goto out_kill;
452
Ingo Molnar98e48332009-11-23 08:03:09 +0100453 /*
454 * Run them as low-prio background tasks by default:
455 */
Steven Rostedt7ac07432009-11-25 13:22:21 -0500456 if (!disable_reader) {
457 if (consumer_fifo >= 0) {
458 struct sched_param param = {
459 .sched_priority = consumer_fifo
460 };
461 sched_setscheduler(consumer, SCHED_FIFO, &param);
462 } else
463 set_user_nice(consumer, consumer_nice);
464 }
465
466 if (producer_fifo >= 0) {
467 struct sched_param param = {
Wang Long10802932015-06-10 08:12:37 +0000468 .sched_priority = producer_fifo
Steven Rostedt7ac07432009-11-25 13:22:21 -0500469 };
470 sched_setscheduler(producer, SCHED_FIFO, &param);
471 } else
472 set_user_nice(producer, producer_nice);
Ingo Molnar98e48332009-11-23 08:03:09 +0100473
Steven Rostedt5092dbc2009-05-05 22:47:18 -0400474 return 0;
475
476 out_kill:
477 if (consumer)
478 kthread_stop(consumer);
479
480 out_fail:
481 ring_buffer_free(buffer);
482 return ret;
483}
484
485static void __exit ring_buffer_benchmark_exit(void)
486{
487 kthread_stop(producer);
488 if (consumer)
489 kthread_stop(consumer);
490 ring_buffer_free(buffer);
491}
492
493module_init(ring_buffer_benchmark_init);
494module_exit(ring_buffer_benchmark_exit);
495
496MODULE_AUTHOR("Steven Rostedt");
497MODULE_DESCRIPTION("ring_buffer_benchmark");
498MODULE_LICENSE("GPL");