From b5822c2a0a4240ac51251467d859b0fb91b36292 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Fri, 17 Nov 2017 10:49:31 +0100 Subject: Revert "SX(9): Implement with reader/writer lock" This was accidentally committed. This reverts commit cc7a8d87e7307db738bf39ab1ca3ce1053f1c163. --- freebsd/sys/sys/_sx.h | 4 +- freebsd/sys/sys/sx.h | 3 + rtemsbsd/rtems/rtems-kernel-sx.c | 121 +++++++++------------------------------ 3 files changed, 33 insertions(+), 95 deletions(-) diff --git a/freebsd/sys/sys/_sx.h b/freebsd/sys/sys/_sx.h index b3bcd275..b07ac47a 100644 --- a/freebsd/sys/sys/_sx.h +++ b/freebsd/sys/sys/_sx.h @@ -31,7 +31,7 @@ #ifndef _SYS__SX_H_ #define _SYS__SX_H_ #ifdef __rtems__ -#include +#include #endif /* __rtems__ */ /* @@ -42,7 +42,7 @@ struct sx { #ifndef __rtems__ volatile uintptr_t sx_lock; #else /* __rtems__ */ - rtems_bsd_rwlock rwlock; + rtems_bsd_mutex mutex; #endif /* __rtems__ */ }; diff --git a/freebsd/sys/sys/sx.h b/freebsd/sys/sys/sx.h index f8eba5e1..0c95df16 100644 --- a/freebsd/sys/sys/sx.h +++ b/freebsd/sys/sys/sx.h @@ -44,6 +44,9 @@ #ifdef __rtems__ #define SX_NOINLINE 1 +#define _sx_slock _bsd__sx_xlock +#define sx_try_slock_ _bsd_sx_try_xlock_ +#define _sx_sunlock _bsd__sx_xunlock #endif /* __rtems__ */ /* * In general, the sx locks and rwlocks use very similar algorithms. diff --git a/rtemsbsd/rtems/rtems-kernel-sx.c b/rtemsbsd/rtems/rtems-kernel-sx.c index ded58f5f..7f47bd71 100644 --- a/rtemsbsd/rtems/rtems-kernel-sx.c +++ b/rtemsbsd/rtems/rtems-kernel-sx.c @@ -7,7 +7,7 @@ */ /* - * Copyright (c) 2009, 2017 embedded brains GmbH. All rights reserved. + * Copyright (c) 2009-2015 embedded brains GmbH. All rights reserved. * * embedded brains GmbH * Dornierstr. 4 @@ -15,9 +15,6 @@ * Germany * * - * Copyright (c) 2006 John Baldwin - * All rights reserved. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -41,7 +38,7 @@ */ #include -#include +#include #include #include @@ -62,9 +59,9 @@ struct lock_class lock_class_sx = { .lc_unlock = unlock_sx, }; -#define sx_xholder(sx) rtems_bsd_rwlock_wowner(&(sx)->rwlock) +#define sx_xholder(sx) rtems_bsd_mutex_owner(&(sx)->mutex) -#define sx_recursed(sx) rtems_bsd_rwlock_recursed(&(sx)->rwlock) +#define sx_recursed(sx) rtems_bsd_mutex_recursed(&(sx)->mutex) void assert_sx(const struct lock_object *lock, int what) @@ -76,29 +73,16 @@ assert_sx(const struct lock_object *lock, int what) void lock_sx(struct lock_object *lock, uintptr_t how) { - struct sx *sx; - sx = (struct sx *)lock; - if (how) - sx_slock(sx); - else - sx_xlock(sx); + sx_xlock((struct sx *)lock); } uintptr_t unlock_sx(struct lock_object *lock) { - struct sx *sx; - sx = (struct sx *)lock; - sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); - if (sx->rwlock.readers > 0) { - sx_sunlock(sx); - return (1); - } else { - sx_xunlock(sx); - return (0); - } + sx_xunlock((struct sx *)lock); + return (0); } void @@ -118,7 +102,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts) if (opts & SX_RECURSE) flags |= LO_RECURSABLE; - rtems_bsd_rwlock_init(&sx->lock_object, &sx->rwlock, &lock_class_sx, + rtems_bsd_mutex_init(&sx->lock_object, &sx->mutex, &lock_class_sx, description, NULL, flags); } @@ -126,65 +110,39 @@ void sx_destroy(struct sx *sx) { - rtems_bsd_rwlock_destroy(&sx->lock_object, &sx->rwlock); + rtems_bsd_mutex_destroy(&sx->lock_object, &sx->mutex); } int _sx_xlock(struct sx *sx, int opts, const char *file, int line) { + rtems_bsd_mutex_lock(&sx->lock_object, &sx->mutex); - rtems_bsd_rwlock_wlock(&sx->lock_object, &sx->rwlock); return (0); } int sx_try_xlock_(struct sx *sx, const char *file, int line) { - - return (rtems_bsd_rwlock_try_wlock(&sx->lock_object, &sx->rwlock)); + return (rtems_bsd_mutex_trylock(&sx->lock_object, &sx->mutex)); } void _sx_xunlock(struct sx *sx, const char *file, int line) { - - rtems_bsd_rwlock_wunlock(&sx->rwlock); + rtems_bsd_mutex_unlock(&sx->mutex); } int sx_try_upgrade_(struct sx *sx, const char *file, int line) { - - return (rtems_bsd_rwlock_try_upgrade(&sx->rwlock)); + return (1); } void sx_downgrade_(struct sx *sx, const char *file, int line) { - - rtems_bsd_rwlock_downgrade(&sx->rwlock); -} - -int -_sx_slock(struct sx *sx, int opts, const char *file, int line) -{ - - rtems_bsd_rwlock_rlock(&sx->lock_object, &sx->rwlock); - return (0); -} - -int -sx_try_slock_(struct sx *sx, const char *file, int line) -{ - - return (rtems_bsd_rwlock_try_rlock(&sx->lock_object, &sx->rwlock)); -} - -void -_sx_sunlock(struct sx *sx, const char *file, int line) -{ - - rtems_bsd_rwlock_runlock(&sx->rwlock); + /* Do nothing */ } #ifdef INVARIANT_SUPPORT @@ -196,64 +154,42 @@ _sx_sunlock(struct sx *sx, const char *file, int line) void _sx_assert(const struct sx *sx, int what, const char *file, int line) { - const char *name = rtems_bsd_rwlock_name(&sx->rwlock); - int slocked = 0; + const char *name = rtems_bsd_mutex_name(&sx->mutex); switch (what) { case SA_SLOCKED: case SA_SLOCKED | SA_NOTRECURSED: case SA_SLOCKED | SA_RECURSED: - slocked = 1; - /* FALLTHROUGH */ case SA_LOCKED: case SA_LOCKED | SA_NOTRECURSED: case SA_LOCKED | SA_RECURSED: - /* - * If some other thread has an exclusive lock or we - * have one and are asserting a shared lock, fail. - * Also, if no one has a lock at all, fail. - */ - if ((sx->rwlock.readers == 0 && sx_xholder(sx) == NULL) || - (sx->rwlock.readers == 0 && (slocked || - sx_xholder(sx) != _Thread_Get_executing()))) - panic("Lock %s not %slocked @ %s:%d\n", - name, slocked ? "share " : "", - file, line); - - if (sx->rwlock.readers == 0) { - if (sx_recursed(sx)) { - if (what & SA_NOTRECURSED) - panic("Lock %s recursed @ %s:%d\n", - name, file, - line); - } else if (what & SA_RECURSED) - panic("Lock %s not recursed @ %s:%d\n", - name, file, line); - } - break; case SA_XLOCKED: case SA_XLOCKED | SA_NOTRECURSED: case SA_XLOCKED | SA_RECURSED: if (sx_xholder(sx) != _Thread_Get_executing()) - panic("Lock %s not exclusively locked @ %s:%d\n", - name, file, line); + panic("Lock %s not exclusively locked @ %s:%d\n", name, + file, line); if (sx_recursed(sx)) { if (what & SA_NOTRECURSED) - panic("Lock %s recursed @ %s:%d\n", - name, file, line); + panic("Lock %s recursed @ %s:%d\n", name, file, + line); } else if (what & SA_RECURSED) - panic("Lock %s not recursed @ %s:%d\n", - name, file, line); + panic("Lock %s not recursed @ %s:%d\n", name, file, + line); break; case SA_UNLOCKED: +#ifdef WITNESS + witness_assert(&sx->lock_object, what, file, line); +#else /* * If we hold an exclusve lock fail. We can't * reliably check to see if we hold a shared lock or * not. */ if (sx_xholder(sx) == _Thread_Get_executing()) - panic("Lock %s exclusively locked @ %s:%d\n", - name, file, line); + panic("Lock %s exclusively locked @ %s:%d\n", name, + file, line); +#endif break; default: panic("Unknown sx lock assertion: %d @ %s:%d", what, file, @@ -265,6 +201,5 @@ _sx_assert(const struct sx *sx, int what, const char *file, int line) int sx_xlocked(struct sx *sx) { - - return (rtems_bsd_rwlock_wowned(&sx->rwlock)); + return (rtems_bsd_mutex_owned(&sx->mutex)); } -- cgit v1.2.3