Skip to content
This repository has been archived by the owner on Feb 26, 2020. It is now read-only.

Commit

Permalink
Add support for rw semaphore changes under PREEMPT_RT_FULL
Browse files Browse the repository at this point in the history
  • Loading branch information
clefru committed Dec 17, 2016
1 parent 0636e96 commit 3d96ef4
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 3 deletions.
9 changes: 7 additions & 2 deletions include/linux/rwsem_compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,10 @@

#include <linux/rwsem.h>

#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
#if defined(CONFIG_PREEMPT_RT_FULL)
#define SPL_RWSEM_SINGLE_READER_VALUE (1)
#define SPL_RWSEM_SINGLE_WRITER_VALUE (0)
#elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
#define SPL_RWSEM_SINGLE_READER_VALUE (1)
#define SPL_RWSEM_SINGLE_WRITER_VALUE (-1)
#else
Expand All @@ -36,7 +39,9 @@
#endif

/* Linux 3.16 changed activity to count for rwsem-spinlock */
#if defined(HAVE_RWSEM_ACTIVITY)
#if defined(CONFIG_PREEMPT_RT_FULL)
#define RWSEM_COUNT(sem) sem->read_depth
#elif defined(HAVE_RWSEM_ACTIVITY)
#define RWSEM_COUNT(sem) sem->activity
/* Linux 4.8 changed count to an atomic_long_t for !rwsem-spinlock */
#elif defined(HAVE_RWSEM_ATOMIC_LONG_COUNT)
Expand Down
30 changes: 29 additions & 1 deletion module/spl/spl-rwlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,35 @@

#define DEBUG_SUBSYSTEM S_RWLOCK

#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
#if defined(CONFIG_PREEMPT_RT_FULL)

#include <linux/rtmutex.h>

static int
__rwsem_tryupgrade(struct rw_semaphore *rwsem)
{
ASSERT(rt_mutex_owner(&rwsem->lock) == current);

/* Under the realtime patch series, rwsem is implemented as a
* single mutex held by readers and writers alike. However,
* this implementation would prevent a thread from taking a
* read lock twice, as the mutex would already be locked on
* the second attempt. Therefore the implementation allows a
* single thread to take a rwsem as read lock multiple times
* tracking that nesting as read_depth counter.
*/
if (rwsem->read_depth <= 1) {
/* In case, the current thread has not taken the lock
* more than once as read lock, we can allow an
* upgrade to a write lock. rwsem_rt.h implements
* write locks as read_depth == 0.
*/
rwsem->read_depth = 0;
return (1);
}
return (0);
}
#elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
static int
__rwsem_tryupgrade(struct rw_semaphore *rwsem)
{
Expand Down
16 changes: 16 additions & 0 deletions module/splat/splat-rwlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,15 @@ void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
rwp->rw_type = 0;
}

#if defined(CONFIG_PREEMPT_RT_FULL)
static int
splat_rwlock_test1(struct file *file, void *arg)
{
// This test will never succeed on PREEMPT_RT_FULL because
// locks can only be held by a single thread.
return 0;
}
#else
static int
splat_rwlock_wr_thr(void *arg)
{
Expand Down Expand Up @@ -297,6 +306,7 @@ splat_rwlock_test1(struct file *file, void *arg)

return rc;
}
#endif

static void
splat_rwlock_test2_func(void *arg)
Expand Down Expand Up @@ -518,7 +528,13 @@ splat_rwlock_test4(struct file *file, void *arg)
rc1 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_WRITER);
rc2 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_READER);
rc3 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_WRITER);
#if defined(CONFIG_PREEMPT_RT_FULL)
// Under PREEMPT_RT_FULL, two read locks can only be taken by
// the same thread.
rc4 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_READER);
#else
rc4 = splat_rwlock_test4_type(tq, rwp, 0, RW_READER, RW_READER);
#endif
rc5 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_WRITER);
rc6 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_READER);

Expand Down

0 comments on commit 3d96ef4

Please sign in to comment.