aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include/internal/ethr_optimized_fallbacks.h
blob: 2f9f987d0bfa3345d9d08af37983bedd82c0c75e (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
/*
 * %CopyrightBegin%
 *
 * Copyright Ericsson AB 2010. All Rights Reserved.
 *
 * The contents of this file are subject to the Erlang Public License,
 * Version 1.1, (the "License"); you may not use this file except in
 * compliance with the License. You should have received a copy of the
 * Erlang Public License along with this software. If not, it can be
 * retrieved online at http://www.erlang.org/.
 *
 * Software distributed under the License is distributed on an "AS IS"
 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
 * the License for the specific language governing rights and limitations
 * under the License.
 *
 * %CopyrightEnd%
 */

/*
 * Description: "Optimized" fallbacks used when native ops are missing
 * Author: Rickard Green
 */

#ifndef ETHR_OPTIMIZED_FALLBACKS_H__
#define ETHR_OPTIMIZED_FALLBACKS_H__

#ifdef ETHR_HAVE_NATIVE_ATOMICS
#define ETHR_HAVE_OPTIMIZED_ATOMIC_OPS 1
#endif

#ifdef ETHR_HAVE_NATIVE_SPINLOCKS
#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1
#elif defined(ETHR_HAVE_PTHREAD_SPIN_LOCK)
/* --- Optimized spinlocks using pthread spinlocks -------------------------- */
#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1

typedef pthread_spinlock_t ethr_opt_spinlock_t;

#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)

static ETHR_INLINE int
ethr_opt_spinlock_init(ethr_opt_spinlock_t *lock)
{
    return pthread_spin_init((pthread_spinlock_t *) lock, 0);
}

static ETHR_INLINE int
ethr_opt_spinlock_destroy(ethr_opt_spinlock_t *lock)
{
    return pthread_spin_destroy((pthread_spinlock_t *) lock);
}


static ETHR_INLINE int
ethr_opt_spin_unlock(ethr_opt_spinlock_t *lock)
{
    return pthread_spin_unlock((pthread_spinlock_t *) lock);
}

static ETHR_INLINE int
ethr_opt_spin_lock(ethr_opt_spinlock_t *lock)
{
    return pthread_spin_lock((pthread_spinlock_t *) lock);
}

#endif

#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
/* --- Native spinlocks using native atomics -------------------------------- */
#define ETHR_HAVE_NATIVE_SPINLOCKS 1
#define ETHR_HAVE_OPTIMIZED_SPINLOCKS 1

typedef ethr_native_atomic_t ethr_native_spinlock_t;

#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)

static ETHR_INLINE void
ethr_native_spinlock_init(ethr_native_spinlock_t *lock)
{
    ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
}

static ETHR_INLINE void
ethr_native_spin_unlock(ethr_native_spinlock_t *lock)
{
    ETHR_COMPILER_BARRIER;
    ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) == 1);
    ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
}

static ETHR_INLINE void
ethr_native_spin_lock(ethr_native_spinlock_t *lock)
{
    while (ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
					   (long) 1, (long) 0) != 0) {
	ETHR_SPIN_BODY;
    }
    ETHR_COMPILER_BARRIER;
}

#endif

#endif


#ifdef ETHR_HAVE_NATIVE_RWSPINLOCKS
#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1
#elif defined(ETHR_HAVE_NATIVE_ATOMICS)
/* --- Native rwspinlocks using native atomics ------------------------------ */
#define ETHR_HAVE_NATIVE_RWSPINLOCKS 1
#define ETHR_HAVE_OPTIMIZED_RWSPINLOCKS 1

typedef ethr_native_atomic_t ethr_native_rwlock_t;

#if defined(ETHR_TRY_INLINE_FUNCS) || defined(ETHR_AUX_IMPL__)

#define ETHR_WLOCK_FLAG__ (((long) 1) << 30)

static ETHR_INLINE void
ethr_native_rwlock_init(ethr_native_rwlock_t *lock)
{
    ethr_native_atomic_init((ethr_native_atomic_t *) lock, 0);
}

static ETHR_INLINE void
ethr_native_read_unlock(ethr_native_rwlock_t *lock)
{
    ETHR_COMPILER_BARRIER;
#ifdef DEBUG
    ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock) >= 0);
#endif
    ethr_native_atomic_dec_relb((ethr_native_atomic_t *) lock);
}

static ETHR_INLINE void
ethr_native_read_lock(ethr_native_rwlock_t *lock)
{
    long act, exp = 0;
    while (1) {
	act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
					      exp+1, exp);
	if (act == exp)
	    break;
	ETHR_SPIN_BODY;
	exp = (act & ETHR_WLOCK_FLAG__) ? 0 : act;
    }
    ETHR_COMPILER_BARRIER;
}

static ETHR_INLINE void
ethr_native_write_unlock(ethr_native_rwlock_t *lock)
{
    ETHR_COMPILER_BARRIER;
    ETHR_ASSERT(ethr_native_atomic_read((ethr_native_atomic_t *) lock)
		== ETHR_WLOCK_FLAG__);
    ethr_native_atomic_set_relb((ethr_native_atomic_t *) lock, 0);
}

static ETHR_INLINE void
ethr_native_write_lock(ethr_native_rwlock_t *lock)
{
    long act, exp = 0;
    while (1) {
	act = ethr_native_atomic_cmpxchg_acqb((ethr_native_atomic_t *) lock,
					      exp|ETHR_WLOCK_FLAG__, exp);
	if (act == exp)
	    break;
	ETHR_SPIN_BODY;
	exp = act & ~ETHR_WLOCK_FLAG__;
    }
    act |= ETHR_WLOCK_FLAG__;
    /* Wait for readers to leave */
    while (act != ETHR_WLOCK_FLAG__) {
	ETHR_SPIN_BODY;
	act = ethr_native_atomic_read_acqb((ethr_native_atomic_t *) lock);
    }
    ETHR_COMPILER_BARRIER;
}

#endif

#endif

#endif