From owner-svn-src-head@freebsd.org Wed May 11 07:58:44 2016 Return-Path: Delivered-To: svn-src-head@mailman.ysv.freebsd.org Received: from mx1.freebsd.org (mx1.freebsd.org [IPv6:2001:1900:2254:206a::19:1]) by mailman.ysv.freebsd.org (Postfix) with ESMTP id 78559B376AD; Wed, 11 May 2016 07:58:44 +0000 (UTC) (envelope-from hselasky@FreeBSD.org) Received: from repo.freebsd.org (repo.freebsd.org [IPv6:2610:1c1:1:6068::e6a:0]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (Client did not present a certificate) by mx1.freebsd.org (Postfix) with ESMTPS id 3B48E1844; Wed, 11 May 2016 07:58:44 +0000 (UTC) (envelope-from hselasky@FreeBSD.org) Received: from repo.freebsd.org ([127.0.1.37]) by repo.freebsd.org (8.15.2/8.15.2) with ESMTP id u4B7whGN069457; Wed, 11 May 2016 07:58:43 GMT (envelope-from hselasky@FreeBSD.org) Received: (from hselasky@localhost) by repo.freebsd.org (8.15.2/8.15.2/Submit) id u4B7wh3x069455; Wed, 11 May 2016 07:58:43 GMT (envelope-from hselasky@FreeBSD.org) Message-Id: <201605110758.u4B7wh3x069455@repo.freebsd.org> X-Authentication-Warning: repo.freebsd.org: hselasky set sender to hselasky@FreeBSD.org using -f From: Hans Petter Selasky Date: Wed, 11 May 2016 07:58:43 +0000 (UTC) To: src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org Subject: svn commit: r299415 - head/sys/compat/linuxkpi/common/include/asm X-SVN-Group: head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: svn-src-head@freebsd.org X-Mailman-Version: 2.1.22 Precedence: list List-Id: SVN commit messages for the src tree for head/-current List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 11 May 2016 07:58:44 -0000 Author: hselasky Date: Wed May 11 07:58:43 2016 New Revision: 299415 URL: https://svnweb.freebsd.org/changeset/base/299415 Log: Add more atomic LinuxKPI functions. Obtained from: kmacy @ MFC after: 1 week Sponsored by: Mellanox Technologies Modified: head/sys/compat/linuxkpi/common/include/asm/atomic.h head/sys/compat/linuxkpi/common/include/asm/atomic64.h Modified: head/sys/compat/linuxkpi/common/include/asm/atomic.h ============================================================================== --- head/sys/compat/linuxkpi/common/include/asm/atomic.h Wed May 11 07:50:35 2016 (r299414) +++ head/sys/compat/linuxkpi/common/include/asm/atomic.h Wed May 11 07:58:43 2016 (r299415) @@ -49,6 +49,7 @@ typedef struct { #define atomic_sub(i, v) atomic_sub_return((i), (v)) #define atomic_inc_return(v) atomic_add_return(1, (v)) #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) +#define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) @@ -73,6 +74,12 @@ atomic_set(atomic_t *v, int i) atomic_store_rel_int(&v->counter, i); } +static inline void +atomic_set_mask(unsigned int mask, atomic_t *v) +{ + atomic_set_int(&v->counter, mask); +} + static inline int atomic_read(atomic_t *v) { @@ -106,4 +113,82 @@ atomic_add_unless(atomic_t *v, int a, in return (c != u); } +static inline void +atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + atomic_clear_int(&v->counter, mask); +} + +static inline int +atomic_xchg(atomic_t *v, int i) +{ +#if defined(__i386__) || defined(__amd64__) || \ + defined(__arm__) || defined(__aarch64__) + return (atomic_swap_int(&v->counter, i)); +#else + int ret; + for (;;) { + ret = atomic_load_acq_int(&v->counter); + if (atomic_cmpset_int(&v->counter, ret, i)) + break; + } + return (ret); +#endif +} + +static inline int +atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret = old; + + for (;;) { + if (atomic_cmpset_int(&v->counter, old, new)) + break; + ret = atomic_load_acq_int(&v->counter); + if (ret != old) + break; + } + return (ret); +} + +#define cmpxchg(ptr, old, new) ({ \ + __typeof(*(ptr)) __ret = (old); \ + CTASSERT(sizeof(__ret) == 4 || sizeof(__ret) == 8); \ + for (;;) { \ + if (sizeof(__ret) == 4) { \ + if (atomic_cmpset_int((volatile int *) \ + (ptr), (old), (new))) \ + break; \ + __ret = atomic_load_acq_int( \ + (volatile int *)(ptr)); \ + if (__ret != (old)) \ + break; \ + } else { \ + if (atomic_cmpset_64( \ + (volatile int64_t *)(ptr), \ + (old), (new))) \ + break; \ + __ret = atomic_load_acq_64( \ + (volatile int64_t *)(ptr)); \ + if (__ret != (old)) \ + break; \ + } \ + } \ + __ret; \ +}) + +#define LINUX_ATOMIC_OP(op, c_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + int c, old; \ + \ + c = v->counter; \ + while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \ + c = old; \ +} + +LINUX_ATOMIC_OP(or, |) +LINUX_ATOMIC_OP(and, &) +LINUX_ATOMIC_OP(xor, ^) + #endif /* _ASM_ATOMIC_H_ */ Modified: head/sys/compat/linuxkpi/common/include/asm/atomic64.h ============================================================================== --- head/sys/compat/linuxkpi/common/include/asm/atomic64.h Wed May 11 07:50:35 2016 (r299414) +++ head/sys/compat/linuxkpi/common/include/asm/atomic64.h Wed May 11 07:58:43 2016 (r299415) @@ -44,6 +44,7 @@ typedef struct { #define atomic64_sub(i, v) atomic64_sub_return((i), (v)) #define atomic64_inc_return(v) atomic64_add_return(1, (v)) #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) +#define atomic64_add_and_test(i, v) (atomic64_add_return((i), (v)) == 0) #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) @@ -101,4 +102,36 @@ atomic64_add_unless(atomic64_t *v, int64 return (c != u); } +static inline int64_t +atomic64_xchg(atomic64_t *v, int64_t i) +{ +#if defined(__i386__) || defined(__amd64__) || \ + defined(__arm__) || defined(__aarch64__) + return (atomic_swap_64(&v->counter, i)); +#else + int64_t ret; + for (;;) { + ret = atomic_load_acq_64(&v->counter); + if (atomic_cmpset_64(&v->counter, ret, i)) + break; + } + return (ret); +#endif +} + +static inline int64_t +atomic64_cmpxchg(atomic64_t *v, int64_t old, int64_t new) +{ + int64_t ret = old; + + for (;;) { + if (atomic_cmpset_64(&v->counter, old, new)) + break; + ret = atomic_load_acq_64(&v->counter); + if (ret != old) + break; + } + return (ret); +} + #endif /* _ASM_ATOMIC64_H_ */