Root/kernel/rtmutex-tester.c

1/*
2 * RT-Mutex-tester: scriptable tester for rt mutexes
3 *
4 * started by Thomas Gleixner:
5 *
6 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
7 *
8 */
9#include <linux/kthread.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/smp_lock.h>
13#include <linux/spinlock.h>
14#include <linux/sysdev.h>
15#include <linux/timer.h>
16#include <linux/freezer.h>
17
18#include "rtmutex.h"
19
20#define MAX_RT_TEST_THREADS 8
21#define MAX_RT_TEST_MUTEXES 8
22
23static spinlock_t rttest_lock;
24static atomic_t rttest_event;
25
26struct test_thread_data {
27    int opcode;
28    int opdata;
29    int mutexes[MAX_RT_TEST_MUTEXES];
30    int bkl;
31    int event;
32    struct sys_device sysdev;
33};
34
35static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
36static struct task_struct *threads[MAX_RT_TEST_THREADS];
37static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
38
39enum test_opcodes {
40    RTTEST_NOP = 0,
41    RTTEST_SCHEDOT, /* 1 Sched other, data = nice */
42    RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */
43    RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */
44    RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
45    RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */
46    RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
47    RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
48    RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
49    RTTEST_LOCKBKL, /* 9 Lock BKL */
50    RTTEST_UNLOCKBKL, /* 10 Unlock BKL */
51    RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */
52    RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
53    RTTEST_RESET = 99, /* 99 Reset all pending operations */
54};
55
56static int handle_op(struct test_thread_data *td, int lockwakeup)
57{
58    int i, id, ret = -EINVAL;
59
60    switch(td->opcode) {
61
62    case RTTEST_NOP:
63        return 0;
64
65    case RTTEST_LOCKCONT:
66        td->mutexes[td->opdata] = 1;
67        td->event = atomic_add_return(1, &rttest_event);
68        return 0;
69
70    case RTTEST_RESET:
71        for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
72            if (td->mutexes[i] == 4) {
73                rt_mutex_unlock(&mutexes[i]);
74                td->mutexes[i] = 0;
75            }
76        }
77
78        if (!lockwakeup && td->bkl == 4) {
79            unlock_kernel();
80            td->bkl = 0;
81        }
82        return 0;
83
84    case RTTEST_RESETEVENT:
85        atomic_set(&rttest_event, 0);
86        return 0;
87
88    default:
89        if (lockwakeup)
90            return ret;
91    }
92
93    switch(td->opcode) {
94
95    case RTTEST_LOCK:
96    case RTTEST_LOCKNOWAIT:
97        id = td->opdata;
98        if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
99            return ret;
100
101        td->mutexes[id] = 1;
102        td->event = atomic_add_return(1, &rttest_event);
103        rt_mutex_lock(&mutexes[id]);
104        td->event = atomic_add_return(1, &rttest_event);
105        td->mutexes[id] = 4;
106        return 0;
107
108    case RTTEST_LOCKINT:
109    case RTTEST_LOCKINTNOWAIT:
110        id = td->opdata;
111        if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
112            return ret;
113
114        td->mutexes[id] = 1;
115        td->event = atomic_add_return(1, &rttest_event);
116        ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
117        td->event = atomic_add_return(1, &rttest_event);
118        td->mutexes[id] = ret ? 0 : 4;
119        return ret ? -EINTR : 0;
120
121    case RTTEST_UNLOCK:
122        id = td->opdata;
123        if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
124            return ret;
125
126        td->event = atomic_add_return(1, &rttest_event);
127        rt_mutex_unlock(&mutexes[id]);
128        td->event = atomic_add_return(1, &rttest_event);
129        td->mutexes[id] = 0;
130        return 0;
131
132    case RTTEST_LOCKBKL:
133        if (td->bkl)
134            return 0;
135        td->bkl = 1;
136        lock_kernel();
137        td->bkl = 4;
138        return 0;
139
140    case RTTEST_UNLOCKBKL:
141        if (td->bkl != 4)
142            break;
143        unlock_kernel();
144        td->bkl = 0;
145        return 0;
146
147    default:
148        break;
149    }
150    return ret;
151}
152
153/*
154 * Schedule replacement for rtsem_down(). Only called for threads with
155 * PF_MUTEX_TESTER set.
156 *
157 * This allows us to have finegrained control over the event flow.
158 *
159 */
160void schedule_rt_mutex_test(struct rt_mutex *mutex)
161{
162    int tid, op, dat;
163    struct test_thread_data *td;
164
165    /* We have to lookup the task */
166    for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
167        if (threads[tid] == current)
168            break;
169    }
170
171    BUG_ON(tid == MAX_RT_TEST_THREADS);
172
173    td = &thread_data[tid];
174
175    op = td->opcode;
176    dat = td->opdata;
177
178    switch (op) {
179    case RTTEST_LOCK:
180    case RTTEST_LOCKINT:
181    case RTTEST_LOCKNOWAIT:
182    case RTTEST_LOCKINTNOWAIT:
183        if (mutex != &mutexes[dat])
184            break;
185
186        if (td->mutexes[dat] != 1)
187            break;
188
189        td->mutexes[dat] = 2;
190        td->event = atomic_add_return(1, &rttest_event);
191        break;
192
193    case RTTEST_LOCKBKL:
194    default:
195        break;
196    }
197
198    schedule();
199
200
201    switch (op) {
202    case RTTEST_LOCK:
203    case RTTEST_LOCKINT:
204        if (mutex != &mutexes[dat])
205            return;
206
207        if (td->mutexes[dat] != 2)
208            return;
209
210        td->mutexes[dat] = 3;
211        td->event = atomic_add_return(1, &rttest_event);
212        break;
213
214    case RTTEST_LOCKNOWAIT:
215    case RTTEST_LOCKINTNOWAIT:
216        if (mutex != &mutexes[dat])
217            return;
218
219        if (td->mutexes[dat] != 2)
220            return;
221
222        td->mutexes[dat] = 1;
223        td->event = atomic_add_return(1, &rttest_event);
224        return;
225
226    case RTTEST_LOCKBKL:
227        return;
228    default:
229        return;
230    }
231
232    td->opcode = 0;
233
234    for (;;) {
235        set_current_state(TASK_INTERRUPTIBLE);
236
237        if (td->opcode > 0) {
238            int ret;
239
240            set_current_state(TASK_RUNNING);
241            ret = handle_op(td, 1);
242            set_current_state(TASK_INTERRUPTIBLE);
243            if (td->opcode == RTTEST_LOCKCONT)
244                break;
245            td->opcode = ret;
246        }
247
248        /* Wait for the next command to be executed */
249        schedule();
250    }
251
252    /* Restore previous command and data */
253    td->opcode = op;
254    td->opdata = dat;
255}
256
257static int test_func(void *data)
258{
259    struct test_thread_data *td = data;
260    int ret;
261
262    current->flags |= PF_MUTEX_TESTER;
263    set_freezable();
264    allow_signal(SIGHUP);
265
266    for(;;) {
267
268        set_current_state(TASK_INTERRUPTIBLE);
269
270        if (td->opcode > 0) {
271            set_current_state(TASK_RUNNING);
272            ret = handle_op(td, 0);
273            set_current_state(TASK_INTERRUPTIBLE);
274            td->opcode = ret;
275        }
276
277        /* Wait for the next command to be executed */
278        schedule();
279        try_to_freeze();
280
281        if (signal_pending(current))
282            flush_signals(current);
283
284        if(kthread_should_stop())
285            break;
286    }
287    return 0;
288}
289
290/**
291 * sysfs_test_command - interface for test commands
292 * @dev: thread reference
293 * @buf: command for actual step
294 * @count: length of buffer
295 *
296 * command syntax:
297 *
298 * opcode:data
299 */
300static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr,
301                  const char *buf, size_t count)
302{
303    struct sched_param schedpar;
304    struct test_thread_data *td;
305    char cmdbuf[32];
306    int op, dat, tid, ret;
307
308    td = container_of(dev, struct test_thread_data, sysdev);
309    tid = td->sysdev.id;
310
311    /* strings from sysfs write are not 0 terminated! */
312    if (count >= sizeof(cmdbuf))
313        return -EINVAL;
314
315    /* strip of \n: */
316    if (buf[count-1] == '\n')
317        count--;
318    if (count < 1)
319        return -EINVAL;
320
321    memcpy(cmdbuf, buf, count);
322    cmdbuf[count] = 0;
323
324    if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
325        return -EINVAL;
326
327    switch (op) {
328    case RTTEST_SCHEDOT:
329        schedpar.sched_priority = 0;
330        ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
331        if (ret)
332            return ret;
333        set_user_nice(current, 0);
334        break;
335
336    case RTTEST_SCHEDRT:
337        schedpar.sched_priority = dat;
338        ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
339        if (ret)
340            return ret;
341        break;
342
343    case RTTEST_SIGNAL:
344        send_sig(SIGHUP, threads[tid], 0);
345        break;
346
347    default:
348        if (td->opcode > 0)
349            return -EBUSY;
350        td->opdata = dat;
351        td->opcode = op;
352        wake_up_process(threads[tid]);
353    }
354
355    return count;
356}
357
358/**
359 * sysfs_test_status - sysfs interface for rt tester
360 * @dev: thread to query
361 * @buf: char buffer to be filled with thread status info
362 */
363static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr,
364                 char *buf)
365{
366    struct test_thread_data *td;
367    struct task_struct *tsk;
368    char *curr = buf;
369    int i;
370
371    td = container_of(dev, struct test_thread_data, sysdev);
372    tsk = threads[td->sysdev.id];
373
374    spin_lock(&rttest_lock);
375
376    curr += sprintf(curr,
377        "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
378        td->opcode, td->event, tsk->state,
379            (MAX_RT_PRIO - 1) - tsk->prio,
380            (MAX_RT_PRIO - 1) - tsk->normal_prio,
381        tsk->pi_blocked_on, td->bkl);
382
383    for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
384        curr += sprintf(curr, "%d", td->mutexes[i]);
385
386    spin_unlock(&rttest_lock);
387
388    curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
389            mutexes[td->sysdev.id].owner);
390
391    return curr - buf;
392}
393
394static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
395static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
396
397static struct sysdev_class rttest_sysclass = {
398    .name = "rttest",
399};
400
401static int init_test_thread(int id)
402{
403    thread_data[id].sysdev.cls = &rttest_sysclass;
404    thread_data[id].sysdev.id = id;
405
406    threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
407    if (IS_ERR(threads[id]))
408        return PTR_ERR(threads[id]);
409
410    return sysdev_register(&thread_data[id].sysdev);
411}
412
413static int init_rttest(void)
414{
415    int ret, i;
416
417    spin_lock_init(&rttest_lock);
418
419    for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
420        rt_mutex_init(&mutexes[i]);
421
422    ret = sysdev_class_register(&rttest_sysclass);
423    if (ret)
424        return ret;
425
426    for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
427        ret = init_test_thread(i);
428        if (ret)
429            break;
430        ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
431        if (ret)
432            break;
433        ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
434        if (ret)
435            break;
436    }
437
438    printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
439
440    return ret;
441}
442
443device_initcall(init_rttest);
444

Archive Download this file



interactive