Skip site navigation (1)Skip section navigation (2)
Date:      Wed, 21 Oct 2015 10:04:36 +0000 (UTC)
From:      =?UTF-8?Q?Roger_Pau_Monn=c3=a9?= <royger@FreeBSD.org>
To:        src-committers@freebsd.org, svn-src-all@freebsd.org, svn-src-head@freebsd.org
Subject:   svn commit: r289685 - in head/sys: amd64/include/xen i386/include/xen x86/include/xen
Message-ID:  <201510211004.t9LA4ala013630@repo.freebsd.org>

next in thread | raw e-mail | index | archive | help
Author: royger
Date: Wed Oct 21 10:04:35 2015
New Revision: 289685
URL: https://svnweb.freebsd.org/changeset/base/289685

Log:
  x86/xen: Consolidate xen-os.h in a single place
  
  amd64 and i386 platform code contain very similar xen/xen-os.h
  
  The only differences are:
   - Functions/variables/types which were unused in i386/xen/xen-os.h:
      * xen_xchg
      * __xchg_dummy
      * __xg
      * __xchg
      * atomic_t
      * atomic_inc
      * rdtscll
  
  The functions/variables/types unused in xen-os.h can be dropped and there
  is no more differences betwen amd64 and i386.
  
  The new header is placed in x86/include/xen and each platform will have
  dummy headers include x86/xen/*.h. This is to be able to include
  machine/xen/*.h in the PV drivers.
  
  Submitted by:		Julien Grall <julien.grall@citrix.com>
  Reviewed by:		royger
  Differential Revision:	https://reviews.freebsd.org/D3880
  Sponsored by:		Citrix Systems R&D

Added:
  head/sys/x86/include/xen/
  head/sys/x86/include/xen/xen-os.h   (contents, props changed)
Modified:
  head/sys/amd64/include/xen/xen-os.h
  head/sys/i386/include/xen/xen-os.h

Modified: head/sys/amd64/include/xen/xen-os.h
==============================================================================
--- head/sys/amd64/include/xen/xen-os.h	Wed Oct 21 09:44:50 2015	(r289684)
+++ head/sys/amd64/include/xen/xen-os.h	Wed Oct 21 10:04:35 2015	(r289685)
@@ -1,132 +1,6 @@
-/******************************************************************************
- * amd64/xen/xen-os.h
- * 
- * Random collection of macros and definition
- *
- * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
- * DEALINGS IN THE SOFTWARE.
- *
- * $FreeBSD$
+/*-
+ * This file is in the public domain.
  */
+/* $FreeBSD$ */
 
-#ifndef _MACHINE_XEN_XEN_OS_H_
-#define _MACHINE_XEN_XEN_OS_H_
-
-#ifdef PAE
-#define CONFIG_X86_PAE
-#endif
-
-/* Everything below this point is not included by assembler (.S) files. */
-#ifndef __ASSEMBLY__
-
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
-    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
-}
-#define cpu_relax() rep_nop()
-
-/* This is a barrier for the compiler only, NOT the processor! */
-#define barrier() __asm__ __volatile__("": : :"memory")
-
-#define LOCK_PREFIX ""
-#define LOCK ""
-#define ADDR (*(volatile long *) addr)
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.  
- * It also implies a memory barrier.
- */
-static __inline int test_and_clear_bit(int nr, volatile void * addr)
-{
-        int oldbit;
-
-        __asm__ __volatile__( LOCK_PREFIX
-                "btrl %2,%1\n\tsbbl %0,%0"
-                :"=r" (oldbit),"=m" (ADDR)
-                :"Ir" (nr) : "memory");
-        return oldbit;
-}
-
-static __inline int constant_test_bit(int nr, const volatile void * addr)
-{
-    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline int variable_test_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    
-    __asm__ __volatile__(
-        "btl %2,%1\n\tsbbl %0,%0"
-        :"=r" (oldbit)
-        :"m" (ADDR),"Ir" (nr));
-    return oldbit;
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered.  See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __inline__ void set_bit(int nr, volatile void * addr)
-{
-        __asm__ __volatile__( LOCK_PREFIX
-                "btsl %1,%0"
-                :"=m" (ADDR)
-                :"Ir" (nr));
-}
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered.  However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static __inline__ void clear_bit(int nr, volatile void * addr)
-{
-        __asm__ __volatile__( LOCK_PREFIX
-                "btrl %1,%0"
-                :"=m" (ADDR)
-                :"Ir" (nr));
-}
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _MACHINE_XEN_XEN_OS_H_ */
+#include <x86/xen/xen-os.h>

Modified: head/sys/i386/include/xen/xen-os.h
==============================================================================
--- head/sys/i386/include/xen/xen-os.h	Wed Oct 21 09:44:50 2015	(r289684)
+++ head/sys/i386/include/xen/xen-os.h	Wed Oct 21 10:04:35 2015	(r289685)
@@ -1,188 +1,6 @@
-/*****************************************************************************
- * i386/xen/xen-os.h
- * 
- * Random collection of macros and definition
- *
- * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
- * DEALINGS IN THE SOFTWARE.
- *
- * $FreeBSD$
+/*-
+ * This file is in the public domain.
  */
+/* $FreeBSD$ */
 
-#ifndef _MACHINE_XEN_XEN_OS_H_
-#define _MACHINE_XEN_XEN_OS_H_
-
-#ifdef PAE
-#define CONFIG_X86_PAE
-#endif
-
-/* Everything below this point is not included by assembler (.S) files. */
-#ifndef __ASSEMBLY__
-
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
-    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
-}
-#define cpu_relax() rep_nop()
-
-/* This is a barrier for the compiler only, NOT the processor! */
-#define barrier() __asm__ __volatile__("": : :"memory")
-
-#define LOCK_PREFIX ""
-#define LOCK ""
-#define ADDR (*(volatile long *) addr)
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-typedef struct { volatile int counter; } atomic_t;
-
-#define xen_xchg(ptr,v) \
-        ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-static __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
-                                   int size)
-{
-    switch (size) {
-    case 1:
-        __asm__ __volatile__("xchgb %b0,%1"
-                             :"=q" (x)
-                             :"m" (*__xg(ptr)), "0" (x)
-                             :"memory");
-        break;
-    case 2:
-        __asm__ __volatile__("xchgw %w0,%1"
-                             :"=r" (x)
-                             :"m" (*__xg(ptr)), "0" (x)
-                             :"memory");
-        break;
-    case 4:
-        __asm__ __volatile__("xchgl %0,%1"
-                             :"=r" (x)
-                             :"m" (*__xg(ptr)), "0" (x)
-                             :"memory");
-        break;
-    }
-    return x;
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.  
- * It also implies a memory barrier.
- */
-static __inline int test_and_clear_bit(int nr, volatile void * addr)
-{
-        int oldbit;
-
-        __asm__ __volatile__( LOCK_PREFIX
-                "btrl %2,%1\n\tsbbl %0,%0"
-                :"=r" (oldbit),"=m" (ADDR)
-                :"Ir" (nr) : "memory");
-        return oldbit;
-}
-
-static __inline int constant_test_bit(int nr, const volatile void * addr)
-{
-    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline int variable_test_bit(int nr, volatile void * addr)
-{
-    int oldbit;
-    
-    __asm__ __volatile__(
-        "btl %2,%1\n\tsbbl %0,%0"
-        :"=r" (oldbit)
-        :"m" (ADDR),"Ir" (nr));
-    return oldbit;
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
-
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered.  See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __inline__ void set_bit(int nr, volatile void * addr)
-{
-        __asm__ __volatile__( LOCK_PREFIX
-                "btsl %1,%0"
-                :"=m" (ADDR)
-                :"Ir" (nr));
-}
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered.  However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static __inline__ void clear_bit(int nr, volatile void * addr)
-{
-        __asm__ __volatile__( LOCK_PREFIX
-                "btrl %1,%0"
-                :"=m" (ADDR)
-                :"Ir" (nr));
-}
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- * 
- * Atomically increments @v by 1.  Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */ 
-static __inline__ void atomic_inc(atomic_t *v)
-{
-        __asm__ __volatile__(
-                LOCK "incl %0"
-                :"=m" (v->counter)
-                :"m" (v->counter));
-}
-
-
-#define rdtscll(val) \
-     __asm__ __volatile__("rdtsc" : "=A" (val))
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* _MACHINE_XEN_XEN_OS_H_ */
+#include <x86/xen/xen-os.h>

Added: head/sys/x86/include/xen/xen-os.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/x86/include/xen/xen-os.h	Wed Oct 21 10:04:35 2015	(r289685)
@@ -0,0 +1,132 @@
+/*****************************************************************************
+ * x86/xen/xen-os.h
+ *
+ * Random collection of macros and definition
+ *
+ * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_X86_XEN_XEN_OS_H_
+#define _MACHINE_X86_XEN_XEN_OS_H_
+
+#ifdef PAE
+#define CONFIG_X86_PAE
+#endif
+
+/* Everything below this point is not included by assembler (.S) files. */
+#ifndef __ASSEMBLY__
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
+}
+#define cpu_relax() rep_nop()
+
+/* This is a barrier for the compiler only, NOT the processor! */
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+#define LOCK_PREFIX ""
+#define LOCK ""
+#define ADDR (*(volatile long *) addr)
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static __inline int test_and_clear_bit(int nr, volatile void * addr)
+{
+        int oldbit;
+
+        __asm__ __volatile__( LOCK_PREFIX
+                "btrl %2,%1\n\tsbbl %0,%0"
+                :"=r" (oldbit),"=m" (ADDR)
+                :"Ir" (nr) : "memory");
+        return oldbit;
+}
+
+static __inline int constant_test_bit(int nr, const volatile void * addr)
+{
+    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline int variable_test_bit(int nr, volatile void * addr)
+{
+    int oldbit;
+
+    __asm__ __volatile__(
+        "btl %2,%1\n\tsbbl %0,%0"
+        :"=r" (oldbit)
+        :"m" (ADDR),"Ir" (nr));
+    return oldbit;
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ constant_test_bit((nr),(addr)) : \
+ variable_test_bit((nr),(addr)))
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+        __asm__ __volatile__( LOCK_PREFIX
+                "btsl %1,%0"
+                :"=m" (ADDR)
+                :"Ir" (nr));
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static __inline__ void clear_bit(int nr, volatile void * addr)
+{
+        __asm__ __volatile__( LOCK_PREFIX
+                "btrl %1,%0"
+                :"=m" (ADDR)
+                :"Ir" (nr));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _MACHINE_X86_XEN_XEN_OS_H_ */



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?201510211004.t9LA4ala013630>