symbian-qemu-0.9.1-12/qemu-symbian-svp/qemu_spinlock.h
changeset 1 2fb8b9db1c86
equal deleted inserted replaced
0:ffa851df0825 1:2fb8b9db1c86
       
     1 /*
       
     2  * Atomic operation helper include
       
     3  *
       
     4  *  Copyright (c) 2005 Fabrice Bellard
       
     5  *
       
     6  * This library is free software; you can redistribute it and/or
       
     7  * modify it under the terms of the GNU Lesser General Public
       
     8  * License as published by the Free Software Foundation; either
       
     9  * version 2 of the License, or (at your option) any later version.
       
    10  *
       
    11  * This library is distributed in the hope that it will be useful,
       
    12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
       
    14  * Lesser General Public License for more details.
       
    15  *
       
    16  * You should have received a copy of the GNU Lesser General Public
       
    17  * License along with this library; if not, write to the Free Software
       
    18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
       
    19  */
       
    20 #ifndef QEMU_SPINLOCK_H
       
    21 #define QEMU_SPINLOCK_H
       
    22 
       
    23 #if defined(__hppa__)
       
    24 
       
    25 typedef int spinlock_t[4];
       
    26 
       
    27 #define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
       
    28 
       
    29 static inline void resetlock (spinlock_t *p)
       
    30 {
       
    31     (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
       
    32 }
       
    33 
       
    34 #else
       
    35 
       
    36 typedef int spinlock_t;
       
    37 
       
    38 #define SPIN_LOCK_UNLOCKED 0
       
    39 
       
    40 static inline void resetlock (spinlock_t *p)
       
    41 {
       
    42     *p = SPIN_LOCK_UNLOCKED;
       
    43 }
       
    44 
       
    45 #endif
       
    46 
       
    47 #if defined(__powerpc__)
       
    48 static inline int testandset (int *p)
       
    49 {
       
    50     int ret;
       
    51     __asm__ __volatile__ (
       
    52                           "0:    lwarx %0,0,%1\n"
       
    53                           "      xor. %0,%3,%0\n"
       
    54                           "      bne 1f\n"
       
    55                           "      stwcx. %2,0,%1\n"
       
    56                           "      bne- 0b\n"
       
    57                           "1:    "
       
    58                           : "=&r" (ret)
       
    59                           : "r" (p), "r" (1), "r" (0)
       
    60                           : "cr0", "memory");
       
    61     return ret;
       
    62 }
       
    63 #elif defined(__i386__)
       
    64 static inline int testandset (int *p)
       
    65 {
       
    66     long int readval = 0;
       
    67 
       
    68     __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
       
    69                           : "+m" (*p), "+a" (readval)
       
    70                           : "r" (1)
       
    71                           : "cc");
       
    72     return readval;
       
    73 }
       
    74 #elif defined(__x86_64__)
       
    75 static inline int testandset (int *p)
       
    76 {
       
    77     long int readval = 0;
       
    78 
       
    79     __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
       
    80                           : "+m" (*p), "+a" (readval)
       
    81                           : "r" (1)
       
    82                           : "cc");
       
    83     return readval;
       
    84 }
       
    85 #elif defined(__s390__)
       
    86 static inline int testandset (int *p)
       
    87 {
       
    88     int ret;
       
    89 
       
    90     __asm__ __volatile__ ("0: cs    %0,%1,0(%2)\n"
       
    91 			  "   jl    0b"
       
    92 			  : "=&d" (ret)
       
    93 			  : "r" (1), "a" (p), "0" (*p)
       
    94 			  : "cc", "memory" );
       
    95     return ret;
       
    96 }
       
    97 #elif defined(__alpha__)
       
    98 static inline int testandset (int *p)
       
    99 {
       
   100     int ret;
       
   101     unsigned long one;
       
   102 
       
   103     __asm__ __volatile__ ("0:	mov 1,%2\n"
       
   104 			  "	ldl_l %0,%1\n"
       
   105 			  "	stl_c %2,%1\n"
       
   106 			  "	beq %2,1f\n"
       
   107 			  ".subsection 2\n"
       
   108 			  "1:	br 0b\n"
       
   109 			  ".previous"
       
   110 			  : "=r" (ret), "=m" (*p), "=r" (one)
       
   111 			  : "m" (*p));
       
   112     return ret;
       
   113 }
       
   114 #elif defined(__sparc__)
       
   115 static inline int testandset (int *p)
       
   116 {
       
   117 	int ret;
       
   118 
       
   119 	__asm__ __volatile__("ldstub	[%1], %0"
       
   120 			     : "=r" (ret)
       
   121 			     : "r" (p)
       
   122 			     : "memory");
       
   123 
       
   124 	return (ret ? 1 : 0);
       
   125 }
       
   126 #elif defined(__arm__)
       
   127 static inline int testandset (int *spinlock)
       
   128 {
       
   129     register unsigned int ret;
       
   130     __asm__ __volatile__("swp %0, %1, [%2]"
       
   131                          : "=r"(ret)
       
   132                          : "0"(1), "r"(spinlock));
       
   133 
       
   134     return ret;
       
   135 }
       
   136 #elif defined(__mc68000)
       
   137 static inline int testandset (int *p)
       
   138 {
       
   139     char ret;
       
   140     __asm__ __volatile__("tas %1; sne %0"
       
   141                          : "=r" (ret)
       
   142                          : "m" (p)
       
   143                          : "cc","memory");
       
   144     return ret;
       
   145 }
       
   146 #elif defined(__hppa__)
       
   147 
       
   148 /* Because malloc only guarantees 8-byte alignment for malloc'd data,
       
   149    and GCC only guarantees 8-byte alignment for stack locals, we can't
       
   150    be assured of 16-byte alignment for atomic lock data even if we
       
   151    specify "__attribute ((aligned(16)))" in the type declaration.  So,
       
   152    we use a struct containing an array of four ints for the atomic lock
       
   153    type and dynamically select the 16-byte aligned int from the array
       
   154    for the semaphore.  */
       
   155 #define __PA_LDCW_ALIGNMENT 16
       
   156 static inline void *ldcw_align (void *p) {
       
   157     unsigned long a = (unsigned long)p;
       
   158     a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
       
   159     return (void *)a;
       
   160 }
       
   161 
       
   162 static inline int testandset (spinlock_t *p)
       
   163 {
       
   164     unsigned int ret;
       
   165     p = ldcw_align(p);
       
   166     __asm__ __volatile__("ldcw 0(%1),%0"
       
   167                          : "=r" (ret)
       
   168                          : "r" (p)
       
   169                          : "memory" );
       
   170     return !ret;
       
   171 }
       
   172 
       
   173 #elif defined(__ia64)
       
   174 
       
   175 #include <ia64intrin.h>
       
   176 
       
   177 static inline int testandset (int *p)
       
   178 {
       
   179     return __sync_lock_test_and_set (p, 1);
       
   180 }
       
   181 #elif defined(__mips__)
       
   182 static inline int testandset (int *p)
       
   183 {
       
   184     int ret;
       
   185 
       
   186     __asm__ __volatile__ (
       
   187 	"	.set push		\n"
       
   188 	"	.set noat		\n"
       
   189 	"	.set mips2		\n"
       
   190 	"1:	li	$1, 1		\n"
       
   191 	"	ll	%0, %1		\n"
       
   192 	"	sc	$1, %1		\n"
       
   193 	"	beqz	$1, 1b		\n"
       
   194 	"	.set pop		"
       
   195 	: "=r" (ret), "+R" (*p)
       
   196 	:
       
   197 	: "memory");
       
   198 
       
   199     return ret;
       
   200 }
       
   201 #else
       
   202 #error unimplemented CPU support
       
   203 #endif
       
   204 
       
   205 #if defined(CONFIG_USER_ONLY)
       
   206 static inline void spin_lock(spinlock_t *lock)
       
   207 {
       
   208     while (testandset(lock));
       
   209 }
       
   210 
       
   211 static inline void spin_unlock(spinlock_t *lock)
       
   212 {
       
   213     resetlock(lock);
       
   214 }
       
   215 
       
   216 static inline int spin_trylock(spinlock_t *lock)
       
   217 {
       
   218     return !testandset(lock);
       
   219 }
       
   220 #else
       
   221 static inline void spin_lock(spinlock_t *lock)
       
   222 {
       
   223 }
       
   224 
       
   225 static inline void spin_unlock(spinlock_t *lock)
       
   226 {
       
   227 }
       
   228 
       
   229 static inline int spin_trylock(spinlock_t *lock)
       
   230 {
       
   231     return 1;
       
   232 }
       
   233 #endif
       
   234 
       
   235 #endif