aboutsummaryrefslogtreecommitdiffstats
path: root/erts/include/internal/win/ethr_membar.h
blob: c018f6d8695edc32add933f53bfa319f463115fa (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
/*
 * %CopyrightBegin%
 *
 * Copyright Ericsson AB 2011-2016. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 * %CopyrightEnd%
 */

/*
 * Description: Memory barriers for Windows
 * Author: Rickard Green
 */

#if (!defined(ETHR_WIN_MEMBAR_H__) \
     && (defined(_MSC_VER) && _MSC_VER >= 1400) \
     && (defined(_M_AMD64) \
	 || defined(_M_IA64) \
	 || defined(ETHR_HAVE__INTERLOCKEDCOMPAREEXCHANGE)))
#define ETHR_WIN_MEMBAR_H__

#define ETHR_LoadLoad	(1 << 0)
#define ETHR_LoadStore	(1 << 1)
#define ETHR_StoreLoad	(1 << 2)
#define ETHR_StoreStore	(1 << 3)

#include <intrin.h>
#undef ETHR_COMPILER_BARRIER
#define ETHR_COMPILER_BARRIER _ReadWriteBarrier()
#pragma intrinsic(_ReadWriteBarrier)

#pragma intrinsic(_InterlockedCompareExchange)

#define ETHR_MB_USING_INTERLOCKED__			\
do {							\
    volatile long x___ = 0;				\
    (void) _InterlockedCompareExchange(&x___, 2, 1);	\
} while (0)

#if defined(_M_IA64)

#define ETHR_MEMBAR(B) __mf()

#elif defined(_M_AMD64) || defined(_M_IX86)

#include <emmintrin.h>
#include <mmintrin.h>

#if ETHR_SIZEOF_PTR == 4
#  define ETHR_NO_SSE2_MB__ ETHR_MB_USING_INTERLOCKED__
#endif
#pragma intrinsic(_mm_mfence)
#pragma intrinsic(_mm_sfence)
#pragma intrinsic(_mm_lfence)

static ETHR_FORCE_INLINE void
ethr_cfence__(void)
{
    _ReadWriteBarrier();
}

static ETHR_FORCE_INLINE void
ethr_mfence__(void)
{
#if ETHR_SIZEOF_PTR == 4
    if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
	ETHR_NO_SSE2_MB__;
    else
#endif
	_mm_mfence();
}

static ETHR_FORCE_INLINE void
ethr_sfence__(void)
{
#if ETHR_SIZEOF_PTR == 4
    if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
	ETHR_NO_SSE2_MB__;
    else
#endif
	_mm_sfence();
}

static ETHR_FORCE_INLINE void
ethr_lfence__(void)
{
#if ETHR_SIZEOF_PTR == 4
    if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
	ETHR_NO_SSE2_MB__;
    else
#endif
	_mm_lfence();
}

#define ETHR_X86_OUT_OF_ORDER_MEMBAR(B)				\
  ETHR_CHOOSE_EXPR((B) == ETHR_StoreStore,			\
		   ethr_sfence__(),				\
		   ETHR_CHOOSE_EXPR((B) == ETHR_LoadLoad,	\
				    ethr_lfence__(),		\
				    ethr_mfence__()))

#ifdef ETHR_X86_OUT_OF_ORDER

#define ETHR_MEMBAR(B) \
  ETHR_X86_OUT_OF_ORDER_MEMBAR((B))

#else /* !ETHR_X86_OUT_OF_ORDER (the default) */

/*
 * We assume that only stores before loads may be reordered. That is,
 * we assume that *no* instructions like these are used:
 * - CLFLUSH,
 * - streaming stores executed with non-temporal move,
 * - string operations, or
 * - other instructions which aren't LoadLoad, LoadStore, and StoreStore
 *   ordered by themselves
 * If such instructions are used, either insert memory barriers
 * using ETHR_X86_OUT_OF_ORDER_MEMBAR() at appropriate places, or
 * define ETHR_X86_OUT_OF_ORDER. For more info see Intel 64 and IA-32
 * Architectures Software Developer's Manual; Vol 3A; Chapter 8.2.2.
 */

#define ETHR_MEMBAR(B) \
  ETHR_CHOOSE_EXPR((B) & ETHR_StoreLoad, ethr_mfence__(), ethr_cfence__())

#endif

#else /* No knowledge about platform; use interlocked fallback */

#define ETHR_MEMBAR(B) ETHR_MB_USING_INTERLOCKED__
#define ETHR_READ_DEPEND_MEMORY_BARRIER ETHR_MB_USING_INTERLOCKED__

#endif

#endif /* ETHR_WIN_MEMBAR_H__ */