1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
|
/*
* %CopyrightBegin%
*
* Copyright Ericsson AB 2011-2016. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* %CopyrightEnd%
*/
/*
* Description: Memory barriers for x86/x86-64
* Author: Rickard Green
*/
#ifndef ETHR_X86_MEMBAR_H__
#define ETHR_X86_MEMBAR_H__
#define ETHR_LoadLoad (1 << 0)
#define ETHR_LoadStore (1 << 1)
#define ETHR_StoreLoad (1 << 2)
#define ETHR_StoreStore (1 << 3)
#define ETHR_NO_SSE2_MEMORY_BARRIER__ \
do { \
volatile ethr_sint32_t x__ = 0; \
__asm__ __volatile__ ("lock; orl $0x0, %0\n\t" \
: "=m"(x__) \
: "m"(x__) \
: "memory"); \
} while (0)
static __inline__ void
ethr_cfence__(void)
{
__asm__ __volatile__ ("" : : : "memory");
}
static __inline__ void
ethr_mfence__(void)
{
#if ETHR_SIZEOF_PTR == 4
if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
ETHR_NO_SSE2_MEMORY_BARRIER__;
else
#endif
__asm__ __volatile__ ("mfence\n\t" : : : "memory");
}
static __inline__ void
ethr_sfence__(void)
{
#if ETHR_SIZEOF_PTR == 4
if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
ETHR_NO_SSE2_MEMORY_BARRIER__;
else
#endif
__asm__ __volatile__ ("sfence\n\t" : : : "memory");
}
static __inline__ void
ethr_lfence__(void)
{
#if ETHR_SIZEOF_PTR == 4
if (ETHR_X86_RUNTIME_CONF_HAVE_NO_SSE2__)
ETHR_NO_SSE2_MEMORY_BARRIER__;
else
#endif
__asm__ __volatile__ ("lfence\n\t" : : : "memory");
}
#define ETHR_X86_OUT_OF_ORDER_MEMBAR(B) \
ETHR_CHOOSE_EXPR((B) == ETHR_StoreStore, \
ethr_sfence__(), \
ETHR_CHOOSE_EXPR((B) == ETHR_LoadLoad, \
ethr_lfence__(), \
ethr_mfence__()))
#ifdef ETHR_X86_OUT_OF_ORDER
#define ETHR_MEMBAR(B) \
ETHR_X86_OUT_OF_ORDER_MEMBAR((B))
#else /* !ETHR_X86_OUT_OF_ORDER (the default) */
/*
* We assume that only stores before loads may be reordered. That is,
* we assume that *no* instructions like these are used:
* - CLFLUSH,
* - streaming stores executed with non-temporal move,
* - string operations, or
* - other instructions which aren't LoadLoad, LoadStore, and StoreStore
* ordered by themselves
* If such instructions are used, either insert memory barriers
* using ETHR_X86_OUT_OF_ORDER_MEMBAR() at appropriate places, or
* define ETHR_X86_OUT_OF_ORDER. For more info see Intel 64 and IA-32
* Architectures Software Developer's Manual; Vol 3A; Chapter 8.2.2.
*/
#define ETHR_MEMBAR(B) \
ETHR_CHOOSE_EXPR((B) & ETHR_StoreLoad, ethr_mfence__(), ethr_cfence__())
#endif /* !ETHR_X86_OUT_OF_ORDER */
#endif /* ETHR_X86_MEMBAR_H__ */
|