Skip site navigation (1)Skip section navigation (2)
Date:      Fri, 1 Aug 2014 22:56:41 +0000 (UTC)
From:      Ian Lepore <ian@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r269405 - head/sys/arm/include
Message-ID:  <201408012256.s71Muf47061544@svn.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: ian
Date: Fri Aug  1 22:56:41 2014
New Revision: 269405
URL: http://svnweb.freebsd.org/changeset/base/269405

Log:
  Add 64-bit atomic ops for armv4, only for kernel code, mostly so that we
  don't need any #ifdef stuff to use atomic_load/store_64() elsewhere in
  the kernel.  For armv4 the atomics are trivial to implement for kernel
  code (just disable interrupts), less so for user mode, so this only has
  the kernel mode implementations for now.

Modified:
  head/sys/arm/include/atomic.h

Modified: head/sys/arm/include/atomic.h
==============================================================================
--- head/sys/arm/include/atomic.h	Fri Aug  1 22:33:23 2014	(r269404)
+++ head/sys/arm/include/atomic.h	Fri Aug  1 22:56:41 2014	(r269405)
@@ -729,11 +729,23 @@ atomic_set_32(volatile uint32_t *address
 }
 
 static __inline void
+atomic_set_64(volatile uint64_t *address, uint64_t setmask)
+{
+	__with_interrupts_disabled(*address |= setmask);
+}
+
+static __inline void
 atomic_clear_32(volatile uint32_t *address, uint32_t clearmask)
 {
 	__with_interrupts_disabled(*address &= ~clearmask);
 }
 
+static __inline void
+atomic_clear_64(volatile uint64_t *address, uint64_t clearmask)
+{
+	__with_interrupts_disabled(*address &= ~clearmask);
+}
+
 static __inline u_int32_t
 atomic_cmpset_32(volatile u_int32_t *p, volatile u_int32_t cmpval, volatile u_int32_t newval)
 {
@@ -751,6 +763,23 @@ atomic_cmpset_32(volatile u_int32_t *p, 
 	return (ret);
 }
 
+static __inline u_int64_t
+atomic_cmpset_64(volatile u_int64_t *p, volatile u_int64_t cmpval, volatile u_int64_t newval)
+{
+	int ret;
+	
+	__with_interrupts_disabled(
+	 {
+	    	if (*p == cmpval) {
+			*p = newval;
+			ret = 1;
+		} else {
+			ret = 0;
+		}
+	});
+	return (ret);
+}
+
 static __inline void
 atomic_add_32(volatile u_int32_t *p, u_int32_t val)
 {
@@ -758,11 +787,23 @@ atomic_add_32(volatile u_int32_t *p, u_i
 }
 
 static __inline void
+atomic_add_64(volatile u_int64_t *p, u_int64_t val)
+{
+	__with_interrupts_disabled(*p += val);
+}
+
+static __inline void
 atomic_subtract_32(volatile u_int32_t *p, u_int32_t val)
 {
 	__with_interrupts_disabled(*p -= val);
 }
 
+static __inline void
+atomic_subtract_64(volatile u_int64_t *p, u_int64_t val)
+{
+	__with_interrupts_disabled(*p -= val);
+}
+
 static __inline uint32_t
 atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
 {
@@ -776,6 +817,34 @@ atomic_fetchadd_32(volatile uint32_t *p,
 	return (value);
 }
 
+static __inline uint64_t
+atomic_fetchadd_64(volatile uint64_t *p, uint64_t v)
+{
+	uint64_t value;
+
+	__with_interrupts_disabled(
+	{
+	    	value = *p;
+		*p += v;
+	});
+	return (value);
+}
+
+static __inline uint64_t
+atomic_load_64(volatile uint64_t *p)
+{
+	uint64_t value;
+
+	__with_interrupts_disabled(value = *p);
+	return (value);
+}
+
+static __inline void
+atomic_store_64(volatile uint64_t *p, uint64_t value)
+{
+	__with_interrupts_disabled(*p = value);
+}
+
 #else /* !_KERNEL */
 
 static __inline u_int32_t



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201408012256.s71Muf47061544>