mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 07:47:50 +00:00
A new version of herd7 provides a -lkmmv2 switch which overrides the old herd7 behavior of simply ignoring any softcoded tags in the .def and .bell files. We port LKMM to this version of herd7 by providing the switch in linux-kernel.cfg and reporting an error if the LKMM is used without this switch. To preserve the semantics of LKMM, we also softcode the Noreturn tag on atomic RMW which do not return a value and define atomic_add_unless with an Mb tag in linux-kernel.def. We update the herd-representation.txt accordingly and clarify some of the resulting combinations. Co-developed-by: Hernan Ponce de Leon <hernan.poncedeleon@huaweicloud.com> Signed-off-by: Hernan Ponce de Leon <hernan.poncedeleon@huaweicloud.com> Signed-off-by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Boqun Feng <boqun.feng@gmail.com> Tested-by: Boqun Feng <boqun.feng@gmail.com> Tested-by: Akira Yokosawa <akiyks@gmail.com> # herdtools7.7.58
149 lines
6.2 KiB
Modula-2
149 lines
6.2 KiB
Modula-2
// SPDX-License-Identifier: GPL-2.0+
|
|
//
|
|
// An earlier version of this file appeared in the companion webpage for
|
|
// "Frightening small children and disconcerting grown-ups: Concurrency
|
|
// in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
|
|
// which appeared in ASPLOS 2018.
|
|
|
|
// ONCE
|
|
READ_ONCE(X) __load{once}(X)
|
|
WRITE_ONCE(X,V) { __store{once}(X,V); }
|
|
|
|
// Release Acquire and friends
|
|
smp_store_release(X,V) { __store{release}(*X,V); }
|
|
smp_load_acquire(X) __load{acquire}(*X)
|
|
rcu_assign_pointer(X,V) { __store{release}(X,V); }
|
|
rcu_dereference(X) __load{once}(X)
|
|
smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
|
|
|
|
// Fences
|
|
smp_mb() { __fence{mb}; }
|
|
smp_rmb() { __fence{rmb}; }
|
|
smp_wmb() { __fence{wmb}; }
|
|
smp_mb__before_atomic() { __fence{before-atomic}; }
|
|
smp_mb__after_atomic() { __fence{after-atomic}; }
|
|
smp_mb__after_spinlock() { __fence{after-spinlock}; }
|
|
smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
|
|
smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
|
|
barrier() { __fence{barrier}; }
|
|
|
|
// Exchange
|
|
xchg(X,V) __xchg{mb}(X,V)
|
|
xchg_relaxed(X,V) __xchg{once}(X,V)
|
|
xchg_release(X,V) __xchg{release}(X,V)
|
|
xchg_acquire(X,V) __xchg{acquire}(X,V)
|
|
cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
|
|
cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
|
|
cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
|
|
cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
|
|
|
|
// Spinlocks
|
|
spin_lock(X) { __lock(X); }
|
|
spin_unlock(X) { __unlock(X); }
|
|
spin_trylock(X) __trylock(X)
|
|
spin_is_locked(X) __islocked(X)
|
|
|
|
// RCU
|
|
rcu_read_lock() { __fence{rcu-lock}; }
|
|
rcu_read_unlock() { __fence{rcu-unlock}; }
|
|
synchronize_rcu() { __fence{sync-rcu}; }
|
|
synchronize_rcu_expedited() { __fence{sync-rcu}; }
|
|
|
|
// SRCU
|
|
srcu_read_lock(X) __load{srcu-lock}(*X)
|
|
srcu_read_unlock(X,Y) { __store{srcu-unlock}(*X,Y); }
|
|
srcu_down_read(X) __load{srcu-lock}(*X)
|
|
srcu_up_read(X,Y) { __store{srcu-unlock}(*X,Y); }
|
|
synchronize_srcu(X) { __srcu{sync-srcu}(X); }
|
|
synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); }
|
|
|
|
// Atomic
|
|
atomic_read(X) READ_ONCE(*X)
|
|
atomic_set(X,V) { WRITE_ONCE(*X,V); }
|
|
atomic_read_acquire(X) smp_load_acquire(X)
|
|
atomic_set_release(X,V) { smp_store_release(X,V); }
|
|
|
|
atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); }
|
|
atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); }
|
|
atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); }
|
|
atomic_or(V,X) { __atomic_op{noreturn}(X,|,V); }
|
|
atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); }
|
|
atomic_inc(X) { __atomic_op{noreturn}(X,+,1); }
|
|
atomic_dec(X) { __atomic_op{noreturn}(X,-,1); }
|
|
atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); }
|
|
|
|
atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
|
|
atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
|
|
atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
|
|
atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
|
|
atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
|
|
atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
|
|
atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
|
|
atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
|
|
|
|
atomic_fetch_and(V,X) __atomic_fetch_op{mb}(X,&,V)
|
|
atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{once}(X,&,V)
|
|
atomic_fetch_and_acquire(V,X) __atomic_fetch_op{acquire}(X,&,V)
|
|
atomic_fetch_and_release(V,X) __atomic_fetch_op{release}(X,&,V)
|
|
|
|
atomic_fetch_or(V,X) __atomic_fetch_op{mb}(X,|,V)
|
|
atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{once}(X,|,V)
|
|
atomic_fetch_or_acquire(V,X) __atomic_fetch_op{acquire}(X,|,V)
|
|
atomic_fetch_or_release(V,X) __atomic_fetch_op{release}(X,|,V)
|
|
|
|
atomic_fetch_xor(V,X) __atomic_fetch_op{mb}(X,^,V)
|
|
atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{once}(X,^,V)
|
|
atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{acquire}(X,^,V)
|
|
atomic_fetch_xor_release(V,X) __atomic_fetch_op{release}(X,^,V)
|
|
|
|
atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
|
|
atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
|
|
atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
|
|
atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
|
|
atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
|
|
atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
|
|
atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
|
|
atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
|
|
|
|
atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
|
|
atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
|
|
atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
|
|
atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
|
|
atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
|
|
atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
|
|
atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
|
|
atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
|
|
|
|
atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
|
|
atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
|
|
atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
|
|
atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
|
|
atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
|
|
atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
|
|
atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
|
|
atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
|
|
|
|
atomic_xchg(X,V) __xchg{mb}(X,V)
|
|
atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
|
|
atomic_xchg_release(X,V) __xchg{release}(X,V)
|
|
atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
|
|
atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
|
|
atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
|
|
atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
|
|
atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
|
|
|
|
atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
|
|
atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
|
|
atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
|
|
atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
|
|
atomic_add_negative_relaxed(V,X) __atomic_op_return{once}(X,+,V) < 0
|
|
atomic_add_negative_acquire(V,X) __atomic_op_return{acquire}(X,+,V) < 0
|
|
atomic_add_negative_release(V,X) __atomic_op_return{release}(X,+,V) < 0
|
|
|
|
atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V)
|
|
atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V)
|
|
atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V)
|
|
atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V)
|
|
|
|
atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W)
|