aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include/internal/ppc32
diff options
context:
space:
mode:
Diffstat (limited to 'erts/include/internal/ppc32')
-rw-r--r--erts/include/internal/ppc32/atomic.h209
-rw-r--r--erts/include/internal/ppc32/ethread.h34
-rw-r--r--erts/include/internal/ppc32/rwlock.h153
-rw-r--r--erts/include/internal/ppc32/spinlock.h93
4 files changed, 489 insertions, 0 deletions
diff --git a/erts/include/internal/ppc32/atomic.h b/erts/include/internal/ppc32/atomic.h
new file mode 100644
index 0000000000..fa701c6a92
--- /dev/null
+++ b/erts/include/internal/ppc32/atomic.h
@@ -0,0 +1,209 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread atomics on PowerPC.
+ * Author: Mikael Pettersson.
+ *
+ * Based on the examples in Appendix E of Motorola's
+ * "Programming Environments Manual For 32-Bit Implementations
+ * of the PowerPC Architecture".
+ */
+#ifndef ETHREAD_PPC_ATOMIC_H
+#define ETHREAD_PPC_ATOMIC_H
+
+typedef struct {
+ volatile int counter;
+} ethr_native_atomic_t;
+
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_atomic_init(ethr_native_atomic_t *var, int i)
+{
+ var->counter = i;
+}
+#define ethr_native_atomic_set(v, i) ethr_native_atomic_init((v), (i))
+
+static ETHR_INLINE int
+ethr_native_atomic_read(ethr_native_atomic_t *var)
+{
+ return var->counter;
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_add_return(ethr_native_atomic_t *var, int incr)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "add %0,%2,%0\n\t"
+ "stwcx. %0,0,%1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(tmp)
+ : "r"(&var->counter), "r"(incr)
+ : "cc", "memory");
+ return tmp;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_add(ethr_native_atomic_t *var, int incr)
+{
+ /* XXX: could use weaker version here w/o eieio+isync */
+ (void)ethr_native_atomic_add_return(var, incr);
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_inc_return(ethr_native_atomic_t *var)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "addic %0,%0,1\n\t" /* due to addi's (rA|0) behaviour */
+ "stwcx. %0,0,%1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(tmp)
+ : "r"(&var->counter)
+ : "cc", "memory");
+ return tmp;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_inc(ethr_native_atomic_t *var)
+{
+ /* XXX: could use weaker version here w/o eieio+isync */
+ (void)ethr_native_atomic_inc_return(var);
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_dec_return(ethr_native_atomic_t *var)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "addic %0,%0,-1\n\t"
+ "stwcx. %0,0,%1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(tmp)
+ : "r"(&var->counter)
+ : "cc", "memory");
+ return tmp;
+}
+
+static ETHR_INLINE void
+ethr_native_atomic_dec(ethr_native_atomic_t *var)
+{
+ /* XXX: could use weaker version here w/o eieio+isync */
+ (void)ethr_native_atomic_dec_return(var);
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_and_retold(ethr_native_atomic_t *var, int mask)
+{
+ int old, new;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%2\n\t"
+ "and %1,%0,%3\n\t"
+ "stwcx. %1,0,%2\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(old), "=&r"(new)
+ : "r"(&var->counter), "r"(mask)
+ : "cc", "memory");
+ return old;
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_or_retold(ethr_native_atomic_t *var, int mask)
+{
+ int old, new;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%2\n\t"
+ "or %1,%0,%3\n\t"
+ "stwcx. %1,0,%2\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(old), "=&r"(new)
+ : "r"(&var->counter), "r"(mask)
+ : "cc", "memory");
+ return old;
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_xchg(ethr_native_atomic_t *var, int val)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "stwcx. %2,0,%1\n\t"
+ "bne- 1b\n\t"
+ "isync"
+ : "=&r"(tmp)
+ : "r"(&var->counter), "r"(val)
+ : "cc", "memory");
+ return tmp;
+}
+
+static ETHR_INLINE int
+ethr_native_atomic_cmpxchg(ethr_native_atomic_t *var, int new, int expected)
+{
+ int old;
+
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%2\n\t"
+ "cmpw 0,%0,%3\n\t"
+ "bne 2f\n\t"
+ "stwcx. %1,0,%2\n\t"
+ "bne- 1b\n\t"
+ "isync\n"
+ "2:"
+ : "=&r"(old)
+ : "r"(new), "r"(&var->counter), "r"(expected)
+ : "cc", "memory");
+
+ return old;
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_PPC_ATOMIC_H */
diff --git a/erts/include/internal/ppc32/ethread.h b/erts/include/internal/ppc32/ethread.h
new file mode 100644
index 0000000000..d2a72c3dc1
--- /dev/null
+++ b/erts/include/internal/ppc32/ethread.h
@@ -0,0 +1,34 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Low-level ethread support on PowerPC.
+ * Author: Mikael Pettersson.
+ */
+#ifndef ETHREAD_PPC32_ETHREAD_H
+#define ETHREAD_PPC32_ETHREAD_H
+
+#include "atomic.h"
+#include "spinlock.h"
+#include "rwlock.h"
+
+#define ETHR_HAVE_NATIVE_ATOMICS 1
+#define ETHR_HAVE_NATIVE_LOCKS 1
+
+#endif /* ETHREAD_PPC32_ETHREAD_H */
diff --git a/erts/include/internal/ppc32/rwlock.h b/erts/include/internal/ppc32/rwlock.h
new file mode 100644
index 0000000000..9bdab12826
--- /dev/null
+++ b/erts/include/internal/ppc32/rwlock.h
@@ -0,0 +1,153 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread rwlocks on PowerPC.
+ * Author: Mikael Pettersson.
+ *
+ * Based on the examples in Appendix E of Motorola's
+ * "Programming Environments Manual For 32-Bit Implementations
+ * of the PowerPC Architecture". Uses eieio instead of sync
+ * in the unlock sequence, as suggested in the manual.
+ */
+#ifndef ETHREAD_PPC_RWLOCK_H
+#define ETHREAD_PPC_RWLOCK_H
+
+/* Unlocked if zero, read-locked if negative, write-locked if +1. */
+typedef struct {
+ volatile int lock;
+} ethr_native_rwlock_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
+{
+ lock->lock = 0;
+}
+
+static ETHR_INLINE void
+ethr_native_read_unlock(ethr_native_rwlock_t *lock)
+{
+ int tmp;
+
+ /* this is eieio + ethr_native_atomic_inc() - isync */
+ __asm__ __volatile__(
+ "eieio\n\t"
+ "1:\t"
+ "lwarx %0,0,%1\n\t"
+ "addic %0,%0,1\n\t"
+ "stwcx. %0,0,%1\n\t"
+ "bne- 1b"
+ : "=&r"(tmp)
+ : "r"(&lock->lock)
+ : "cr0", "memory");
+}
+
+static ETHR_INLINE int
+ethr_native_read_trylock(ethr_native_rwlock_t *lock)
+{
+ int counter;
+
+ __asm__ __volatile__(
+ "1:\t"
+ "lwarx %0,0,%1\n\t" /* read lock to counter */
+ "addic. %0,%0,-1\n\t" /* decrement counter */
+ "bge- 2f\n\t" /* bail if >= 0 (write-locked) */
+ "stwcx. %0,0,%1\n\t" /* try to store decremented counter */
+ "bne- 1b\n\t" /* loop if lost reservation */
+ "isync\n\t" /* wait for previous insns to complete */
+ "2:"
+ : "=&r"(counter)
+ : "r"(&lock->lock)
+ : "cr0", "memory"
+#if __GNUC__ > 2
+ ,"xer"
+#endif
+ );
+ return counter < 0;
+}
+
+static ETHR_INLINE int
+ethr_native_read_is_locked(ethr_native_rwlock_t *lock)
+{
+ return lock->lock > 0;
+}
+
+static ETHR_INLINE void
+ethr_native_read_lock(ethr_native_rwlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_read_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("":::"memory");
+ } while (ethr_native_read_is_locked(lock));
+ }
+}
+
+static ETHR_INLINE void
+ethr_native_write_unlock(ethr_native_rwlock_t *lock)
+{
+ __asm__ __volatile__("eieio" : : : "memory");
+ lock->lock = 0;
+}
+
+static ETHR_INLINE int
+ethr_native_write_trylock(ethr_native_rwlock_t *lock)
+{
+ int prev;
+
+ /* identical to ethr_native_spin_trylock() */
+ __asm__ __volatile__(
+ "1:\t"
+ "lwarx %0,0,%1\n\t" /* read lock to prev */
+ "cmpwi 0,%0,0\n\t"
+ "bne- 2f\n\t" /* bail if non-zero (any lock) */
+ "stwcx. %2,0,%1\n\t" /* try to make the lock positive */
+ "bne- 1b\n\t" /* loop if lost reservation */
+ "isync\n\t" /* wait for previous insns to complete */
+ "2:"
+ : "=&r"(prev)
+ : "r"(&lock->lock), "r"(1)
+ : "cr0", "memory");
+ return prev == 0;
+}
+
+static ETHR_INLINE int
+ethr_native_write_is_locked(ethr_native_rwlock_t *lock)
+{
+ return lock->lock != 0;
+}
+
+static ETHR_INLINE void
+ethr_native_write_lock(ethr_native_rwlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_write_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("":::"memory");
+ } while (ethr_native_write_is_locked(lock));
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_PPC_RWLOCK_H */
diff --git a/erts/include/internal/ppc32/spinlock.h b/erts/include/internal/ppc32/spinlock.h
new file mode 100644
index 0000000000..034c20c143
--- /dev/null
+++ b/erts/include/internal/ppc32/spinlock.h
@@ -0,0 +1,93 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2005-2009. All Rights Reserved.
+ *
+ * The contents of this file are subject to the Erlang Public License,
+ * Version 1.1, (the "License"); you may not use this file except in
+ * compliance with the License. You should have received a copy of the
+ * Erlang Public License along with this software. If not, it can be
+ * retrieved online at http://www.erlang.org/.
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and limitations
+ * under the License.
+ *
+ * %CopyrightEnd%
+ */
+
+/*
+ * Native ethread spinlocks on PowerPC.
+ * Author: Mikael Pettersson.
+ *
+ * Based on the examples in Appendix E of Motorola's
+ * "Programming Environments Manual For 32-Bit Implementations
+ * of the PowerPC Architecture". Uses eieio instead of sync
+ * in the unlock sequence, as suggested in the manual.
+ */
+#ifndef ETHREAD_PPC_SPINLOCK_H
+#define ETHREAD_PPC_SPINLOCK_H
+
+/* Unlocked if zero, locked if non-zero. */
+typedef struct {
+ volatile unsigned int lock;
+} ethr_native_spinlock_t;
+
+#ifdef ETHR_TRY_INLINE_FUNCS
+
+static ETHR_INLINE void
+ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
+{
+ lock->lock = 0;
+}
+
+static ETHR_INLINE void
+ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
+{
+ __asm__ __volatile__("eieio" : : : "memory");
+ lock->lock = 0;
+}
+
+static ETHR_INLINE int
+ethr_native_spin_trylock(ethr_native_spinlock_t *lock)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__(
+ "1:\t"
+ "lwarx %0,0,%1\n\t" /* read lock to prev */
+ "cmpwi 0,%0,0\n\t"
+ "bne- 2f\n\t" /* bail if non-zero/locked */
+ "stwcx. %2,0,%1\n\t" /* try to make the lock non-zero */
+ "bne- 1b\n\t" /* loop if lost reservation */
+ "isync\n\t" /* wait for previous insns to complete */
+ "2:"
+ : "=&r"(prev)
+ : "r"(&lock->lock), "r"(1)
+ : "cr0", "memory");
+ return prev == 0;
+}
+
+static ETHR_INLINE int
+ethr_native_spin_is_locked(ethr_native_spinlock_t *lock)
+{
+
+ return lock->lock != 0;
+}
+
+static ETHR_INLINE void
+ethr_native_spin_lock(ethr_native_spinlock_t *lock)
+{
+ for(;;) {
+ if (__builtin_expect(ethr_native_spin_trylock(lock) != 0, 1))
+ break;
+ do {
+ __asm__ __volatile__("":::"memory");
+ } while (ethr_native_spin_is_locked(lock));
+ }
+}
+
+#endif /* ETHR_TRY_INLINE_FUNCS */
+
+#endif /* ETHREAD_PPC_SPINLOCK_H */