#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/kthread.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/slab.h>
#define USE_CHECK_FNS
#define PROC_DIR_NAME "lockdoc"
#define PROC_FILE_CONTROL_NAME "control"
#define PROC_FILE_ITER_NAME "iterations"
#define DEFAULT_ITERATIONS [31mCONFIG_LOCKDOC_TEST_ITERATIONS[0m
/*
* For some reasons, our ring buffer (aka BSB ring buffer)
* can only hold size - 1 elements.
* If we want to store DEFAULT_ITERATIONS elements, as desired,
* the buffer must be one element larger.
* Hence, RING_BUFFER_SIZE_REAL is used for allocating the actual buffer
* and used for the size member.
* In contrast, RING_BUFFER_SIZE_VIRT is used when asigning a new value
* for iterations in procfile_iter_write.
*/
#define RING_BUFFER_SIZE_REAL (DEFAULT_ITERATIONS + 1)
#define RING_BUFFER_SIZE_VIRT (RING_BUFFER_SIZE_REAL - 1)
#define RING_BUFFER_STORAGE_TYPE int
#define MK_STRING(x) #x
#define START_AND_WAIT_THREAD(x) start_and_wait_thread(MK_STRING(x), x)
DEFINE_SPINLOCK(rb_lock);
DEFINE_SPINLOCK(consumer_lock);
DEFINE_SPINLOCK(producer_lock);
static struct proc_dir_entry *proc_dir, *proc_control, *proc_iter;
static int iterations = DEFAULT_ITERATIONS;
static struct task_struct *control_thread = NULL;
struct lockdoc_ring_buffer {
int next_in;
int next_out;
int size;
RING_BUFFER_STORAGE_TYPE data[RING_BUFFER_SIZE_REAL];
};
static struct lockdoc_ring_buffer *ring_buffer = NULL;
static noinline int is_full(volatile struct lockdoc_ring_buffer *buffer) { return (buffer->next_in + 1) % buffer->size == buffer->next_out; }
static noinline int is_empty(volatile struct lockdoc_ring_buffer *buffer) { return buffer->next_out == buffer->next_in; }
static noinline int produce(volatile struct lockdoc_ring_buffer *buffer, RING_BUFFER_STORAGE_TYPE data) {
if (is_full(buffer)) {
return -1;
}
buffer->data[buffer->next_in] = data;
buffer->next_in = (buffer->next_in + 1) % buffer->size;
return 0;
}
static noinline RING_BUFFER_STORAGE_TYPE consume(volatile struct lockdoc_ring_buffer *buffer) {
RING_BUFFER_STORAGE_TYPE result;
if (is_empty(buffer)) {
return -1;
}
result = buffer->data[buffer->next_out];
buffer->next_out = (buffer->next_out + 1) % buffer->size;
return result;
}
static int producer_thread_work(void *data) {
int i, ret;
/*
* Produce 'iterations' elements.
* This fills every element in the ring buffer.
* The 'iterations'+1 call to produce() would fail due to a full buffer.
* The consumer thread will completely empty the buffer.
*/
for (i = 0; i < iterations; i++) {
#ifdef USE_CHECK_FNS
spin_lock(&rb_lock);
ret = is_full(ring_buffer);
spin_unlock(&rb_lock);
if (ret) {
printk("%s: Ring buffer is full\n", __func__);
}
#endif
spin_lock(&producer_lock);
spin_lock(&rb_lock);
ret = produce(ring_buffer, i + 30);
spin_unlock(&producer_lock);
spin_unlock(&rb_lock);
printk("%s-%03d: Produced(%d): %03d\n", __func__, i, ret, i + 30);
msleep(100);
}
return 0;
}
static int consumer_thread_work(void *data) {
int i, ret;
for (i = 0; i < iterations; i++) {
#ifdef USE_CHECK_FNS
spin_lock(&rb_lock);
ret = is_empty(ring_buffer);
spin_unlock(&rb_lock);
if (ret) {
printk("%s: Ring buffer is empty\n", __func__);
}
#endif
spin_lock(&consumer_lock);
spin_lock(&rb_lock);
ret = consume(ring_buffer);
spin_unlock(&consumer_lock);
spin_unlock(&rb_lock);
printk("%s-%03d: Consumed: %03d\n", __func__, i, ret);
msleep(100);
}
return 0;
}
static int dirty_nolocks_thread_work(void *data) {
int i = 0, ret;
/*
* Wait a bit. Otherwise the call to kthread_stop() in control_thread_work() will fail,
* because this has terminated and the corresponding task_struct has gone away.
* However, the control thread still holds a reference to the task_struct.
*/
msleep(500);
#ifdef USE_CHECK_FNS
ret = is_full(ring_buffer);
if (ret) {
printk("%s: Ring buffer is full\n", __func__);
}
#endif
ret = produce(ring_buffer, i - 1);
printk("%s-%03d: Produced(%d): %03d\n", __func__, i, ret, i - 1);
#ifdef USE_CHECK_FNS
ret = is_empty(ring_buffer);
if (ret) {
printk("%s: Ring buffer is empty\n",__func__);
}
#endif
ret = consume(ring_buffer);
printk("%s-%03d: Consumed: %03d\n", __func__, i, ret);
return 0;
}
static int dirty_fewlocks_thread_work(void *data) {
int i = 0, ret;
/*
* Wait a bit. Otherwise the call to kthread_stop() in control_thread_work() will fail,
* because this has terminated and the corresponding task_struct has gone away.
* However, the control thread still holds a reference to the task_struct.
*/
msleep(500);
#ifdef USE_CHECK_FNS
spin_lock(&rb_lock);
ret = is_full(ring_buffer);
spin_unlock(&rb_lock);
if (ret) {
printk("%s: Ring buffer is full\n", __func__);
}
#endif
spin_lock(&rb_lock);
ret = produce(ring_buffer, i - 1);
spin_unlock(&rb_lock);
printk("%s-%03d: Produced(%d): %03d\n", __func__, i, ret, i - 1);
#ifdef USE_CHECK_FNS
spin_lock(&rb_lock);
ret = is_empty(ring_buffer);
spin_unlock(&rb_lock);
if (ret) {
printk("%s: Ring buffer is empty\n", __func__);
}
#endif
spin_lock(&rb_lock);
ret = consume(ring_buffer);
spin_unlock(&rb_lock);
printk("%s-%03d: Consumed: %03d\n", __func__, i, ret);
return 0;
}
static int dirty_alllocks_thread_work(void *data) {
int i = 0, ret;
/*
* Wait a bit. Otherwise the call to kthread_stop() in control_thread_work() will fail,
* because this has terminated and the corresponding task_struct has gone away.
* However, the control thread still holds a reference to the task_struct.
*/
msleep(500);
#ifdef USE_CHECK_FNS
spin_lock(&producer_lock);
spin_lock(&consumer_lock);
spin_lock(&rb_lock);
ret = is_full(ring_buffer);
spin_unlock(&rb_lock);
spin_unlock(&consumer_lock);
spin_unlock(&producer_lock);
if (ret) {
printk("%s: Ring buffer is full\n", __func__);
}
#endif
spin_lock(&producer_lock);
spin_lock(&consumer_lock);
spin_lock(&rb_lock);
ret = produce(ring_buffer, i - 1);
spin_unlock(&rb_lock);
spin_unlock(&consumer_lock);
spin_unlock(&producer_lock);
printk("%s-%03d: Produced(%d): %03d\n", __func__, i, ret, i - 1);
#ifdef USE_CHECK_FNS
spin_lock(&producer_lock);
spin_lock(&consumer_lock);
spin_lock(&rb_lock);
ret = is_empty(ring_buffer);
spin_unlock(&rb_lock);
spin_unlock(&consumer_lock);
spin_unlock(&producer_lock);
if (ret) {
printk("%s: Ring buffer is empty\n", __func__);
}
#endif
spin_lock(&producer_lock);
spin_lock(&consumer_lock);
spin_lock(&rb_lock);
ret = consume(ring_buffer);
spin_unlock(&rb_lock);
spin_unlock(&consumer_lock);
spin_unlock(&producer_lock);
printk("%s-%03d: Consumed: %03d\n", __func__, i, ret);
return 0;
}
static int dirty_order_thread_work(void *data) {
int i = 0, ret;
/*
* Wait a bit. Otherwise the call to kthread_stop() in control_thread_work() will fail,
* because this has terminated and the corresponding task_struct has gone away.
* However, the control thread still holds a reference to the task_struct.
*/
msleep(500);
spin_lock(&rb_lock);
spin_lock(&producer_lock);
ret = produce(ring_buffer, i - 1);
spin_unlock(&producer_lock);
spin_unlock(&rb_lock);
printk("%s-%03d: Produced(%d): %03d\n", __func__, i, ret, i - 1);
spin_lock(&rb_lock);
spin_lock(&consumer_lock);
ret = consume(ring_buffer);
spin_unlock(&consumer_lock);
spin_unlock(&rb_lock);
printk("%s-%03d: Consumed: %03d\n", __func__, i, ret);
return 0;
}
static void start_and_wait_thread(const char *fn_name, int (*work_fn)(void*)) {
struct task_struct *temp = NULL;
printk("%s: Starting %s thread...\n", __func__, fn_name);
temp = kthread_create(work_fn, NULL, "lockdoc-%s", fn_name);
if (IS_ERR(control_thread)) {
return;
}
wake_up_process(temp);
/*
* Wait for the thread to start up.
* Otherwise, kthread_stop() will cleanup the thread we've just created before it gets started.
*/
msleep(200);
printk("%s: Waiting for %s thread to terminate...\n", __func__, fn_name);
kthread_stop(temp);
}
static int control_thread_work(void *data) {
ring_buffer = kzalloc(sizeof(*ring_buffer), GFP_KERNEL);
if (!ring_buffer) {
printk("Cannot allocate %u bytes for ring buffer\n", sizeof(*ring_buffer));
return 0;
}
ring_buffer->size = RING_BUFFER_SIZE_REAL;
log_memory(1, "lockdoc_ring_buffer", ring_buffer, sizeof(*ring_buffer));
START_AND_WAIT_THREAD(producer_thread_work);
START_AND_WAIT_THREAD(consumer_thread_work);
START_AND_WAIT_THREAD(dirty_nolocks_thread_work);
START_AND_WAIT_THREAD(dirty_fewlocks_thread_work);
START_AND_WAIT_THREAD(dirty_alllocks_thread_work);
START_AND_WAIT_THREAD(dirty_order_thread_work);
log_memory(0, "lockdoc_ring_buffer", ring_buffer, sizeof(*ring_buffer));
kfree(ring_buffer);
return 0;
}
static ssize_t procfile_control_write(struct file *file, const char __user *user_buffer,
size_t count, loff_t *ppos) {
unsigned long value = 0;
char *buffer;
buffer = memdup_user_nul(user_buffer, count);
if (IS_ERR(buffer)) {
return PTR_ERR(buffer);
}
/* parse input */
if (kstrtoul(buffer, 10, &value)) {
printk("%s: Could not parse input\n", __func__);
return -EINVAL;
}
if (value == 1) {
control_thread = kthread_create(control_thread_work, NULL, "lockdoc-control");
if (IS_ERR(control_thread)) {
return -EFAULT;
}
wake_up_process(control_thread);
/*
* Wait for the thread to start up.
* Otherwise, kthread_stop() will cleanup the thread we've just created before it gets started.
*/
msleep(200);
printk("%s: Waiting for control_thread to terminate...\n", __func__);
// This will block the caller until all threads terminated
kthread_stop(control_thread);
} else {
return -EINVAL;
}
return count;
}
static ssize_t procfile_iter_read(struct file *fil, char __user *user_buffer, size_t user_count, loff_t *ppos) {
char iter_buffer[20];
int ret = snprintf(iter_buffer, 20, "%u\n", iterations);
return simple_read_from_buffer(user_buffer, user_count, ppos, iter_buffer, ret);
}
static ssize_t procfile_iter_write(struct file *file, const char __user *user_buffer,
size_t count, loff_t *ppos) {
unsigned long value = 0;
char *buffer;
buffer = memdup_user_nul(user_buffer, count);
if (IS_ERR(buffer)) {
return PTR_ERR(buffer);
}
/* parse input */
if (kstrtoul(buffer, 10, &value)) {
printk("%s: Could not parse input\n", __func__);
return -EINVAL;
}
/*
* Iterations cannot be larger than the buffer size.
* If 'iterations' is larger than the buffer size,
* the consumer/producer thread will execute different code paths (iterations - RING_BUFFER_SIZE_VIRT) times.
* We want to the consumer and producer thread to execute the same code 'iterations' times.
*/
if (value > RING_BUFFER_SIZE_VIRT) {
printk("%s: Desired iterations (%lu) is larger than buffer size (%d)\n", __func__, value, RING_BUFFER_SIZE_VIRT);
return -EINVAL;
}
iterations = value;
printk("Setting iterations to %d\n", iterations);
return count;
}
static const struct file_operations proc_control_ops = {
.write = procfile_control_write
};
static const struct file_operations proc_iter_ops = {
.write = procfile_iter_write,
.read = procfile_iter_read
};
static int __init lockdoc_test_module_init(void)
{
kuid_t fileUID;
kgid_t fileGID;
fileUID.val = 0;
fileGID.val = 0;
proc_dir = proc_mkdir(PROC_DIR_NAME, NULL);
if (proc_dir == NULL) {
printk("Could not create /proc/%s\n", PROC_DIR_NAME);
return -ENOMEM;
}
proc_control = proc_create(PROC_FILE_CONTROL_NAME, 0644, proc_dir, &proc_control_ops);
if (proc_control == NULL) {
printk("Could not create /proc/%s/%s\n", PROC_DIR_NAME, PROC_FILE_CONTROL_NAME);
return -ENOMEM;
}
proc_set_user(proc_control, fileUID, fileGID);
proc_set_size(proc_control, 0);
proc_iter = proc_create(PROC_FILE_ITER_NAME, 0644, proc_dir, &proc_iter_ops);
if (proc_control == NULL) {
printk("Could not create /proc/%s/%s\n", PROC_DIR_NAME, PROC_FILE_ITER_NAME);
return -ENOMEM;
}
proc_set_user(proc_iter, fileUID, fileGID);
proc_set_size(proc_iter, 8);
return 0;
}
static void __exit lockdoc_test_module_exit(void)
{
proc_remove(proc_control);
proc_remove(proc_iter);
proc_remove(proc_dir);
}
module_init(lockdoc_test_module_init);
module_exit(lockdoc_test_module_exit);
MODULE_LICENSE("GPL");