From 498a1d56be241458c85231b5f9da43f4eac0b033 Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Tue, 10 May 2011 14:26:44 +0200 Subject: Add needed barriers for write_concurrency tables Ets tables using the write_concurrency option could potentially get into an internally inconsistent state. --- erts/emulator/beam/erl_db_hash.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'erts/emulator') diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 9ef990cc4f..9092fa8785 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -105,7 +105,7 @@ #define NSEG_2 256 /* Size of second segment table */ #define NSEG_INC 128 /* Number of segments to grow after that */ -#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read(&(tb)->segtab)) +#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read_acqb(&(tb)->segtab)) #define NACTIVE(tb) ((int)erts_smp_atomic_read(&(tb)->nactive)) #define NITEMS(tb) ((int)erts_smp_atomic_read(&(tb)->common.nitems)) @@ -122,8 +122,8 @@ */ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval) { - Uint mask = erts_smp_atomic_read(&tb->szm); - Uint ix = hval & mask; + Uint mask = erts_smp_atomic_read_acqb(&tb->szm); + Uint ix = hval & mask; if (ix >= erts_smp_atomic_read(&tb->nactive)) { ix &= mask>>1; ASSERT(ix < erts_smp_atomic_read(&tb->nactive)); @@ -668,6 +668,7 @@ int db_create_hash(Process *p, DbTable *tbl) else { /* coarse locking */ tb->locks = NULL; } + ERTS_THR_MEMORY_BARRIER; #endif /* ERST_SMP */ return DB_ERROR_NONE; } @@ -2342,7 +2343,7 @@ static int alloc_seg(DbTableHash *tb) struct ext_segment* eseg; eseg = (struct ext_segment*) SEGTAB(tb)[seg_ix-1]; MY_ASSERT(eseg!=NULL && eseg->s.is_ext_segment); - erts_smp_atomic_set(&tb->segtab, (erts_aint_t) eseg->segtab); + erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t) eseg->segtab); tb->nsegs = eseg->nsegs; } ASSERT(seg_ix < tb->nsegs); @@ -2414,7 +2415,7 @@ static int free_seg(DbTableHash *tb, int free_records) MY_ASSERT(newtop->s.is_ext_segment); if (newtop->prev_segtab != NULL) { /* Time to use a smaller segtab */ - erts_smp_atomic_set(&tb->segtab, (erts_aint_t)newtop->prev_segtab); + erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t)newtop->prev_segtab); tb->nsegs = seg_ix; ASSERT(tb->nsegs == EXTSEG(SEGTAB(tb))->nsegs); } @@ -2431,7 +2432,7 @@ static int free_seg(DbTableHash *tb, int free_records) if (seg_ix > 0) { if (seg_ix < tb->nsegs) SEGTAB(tb)[seg_ix] = NULL; } else { - erts_smp_atomic_set(&tb->segtab, (erts_aint_t)NULL); + erts_smp_atomic_set_relb(&tb->segtab, (erts_aint_t)NULL); } #endif tb->nslots -= SEGSZ; @@ -2526,9 +2527,9 @@ static void grow(DbTableHash* tb, int nactive) } erts_smp_atomic_inc(&tb->nactive); if (from_ix == 0) { - erts_smp_atomic_set(&tb->szm, szm); + erts_smp_atomic_set_relb(&tb->szm, szm); } - erts_smp_atomic_set(&tb->is_resizing, 0); + erts_smp_atomic_set_relb(&tb->is_resizing, 0); /* Finally, let's split the bucket. We try to do it in a smart way to keep link order and avoid unnecessary updates of next-pointers */ @@ -2560,7 +2561,7 @@ static void grow(DbTableHash* tb, int nactive) return; abort: - erts_smp_atomic_set(&tb->is_resizing, 0); + erts_smp_atomic_set_relb(&tb->is_resizing, 0); } @@ -2604,7 +2605,7 @@ static void shrink(DbTableHash* tb, int nactive) erts_smp_atomic_set(&tb->nactive, src_ix); if (dst_ix == 0) { - erts_smp_atomic_set(&tb->szm, low_szm); + erts_smp_atomic_set_relb(&tb->szm, low_szm); } WUNLOCK_HASH(lck); @@ -2618,7 +2619,7 @@ static void shrink(DbTableHash* tb, int nactive) } /*else already done */ - erts_smp_atomic_set(&tb->is_resizing, 0); + erts_smp_atomic_set_relb(&tb->is_resizing, 0); } -- cgit v1.2.3 From 434cab885e50dd72f3c4f87f30d9ee21085cc657 Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Tue, 10 May 2011 20:12:02 +0200 Subject: Ensure that stack updates are seen when stack is released Ets tables using ordered_set could potentially get into an internally inconsistent state. --- erts/emulator/beam/erl_db_tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'erts/emulator') diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index a59c0c258d..d5b02584a5 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -111,7 +111,7 @@ static void release_stack(DbTableTree* tb, DbTreeStack* stack) { if (stack == &tb->static_stack) { ASSERT(erts_smp_atomic_read(&tb->is_stack_busy) == 1); - erts_smp_atomic_set(&tb->is_stack_busy, 0); + erts_smp_atomic_set_relb(&tb->is_stack_busy, 0); } else { erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb, -- cgit v1.2.3 From 673f5d1bf684f15ac5526d8a21552eca9a0c9052 Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Tue, 10 May 2011 20:25:53 +0200 Subject: Ensure that all rehashing information are seen when done This is not a bugfix. The change is done in order to avoid a future bug. --- erts/emulator/beam/safe_hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'erts/emulator') diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c index 21d6ce9304..3e9243c77d 100644 --- a/erts/emulator/beam/safe_hash.c +++ b/erts/emulator/beam/safe_hash.c @@ -99,7 +99,7 @@ static void rehash(SafeHash* h, int grow_limit) erts_free(h->type, (void *) old_tab); } /*else already done */ - erts_smp_atomic_set(&h->is_rehashing, 0); + erts_smp_atomic_set_relb(&h->is_rehashing, 0); } -- cgit v1.2.3 From 425e282be62f8205b1ba262b112f38688b421c49 Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Tue, 10 May 2011 20:43:11 +0200 Subject: Ensure quick break Make sure that we don't have to wait in poll before break handling is done. --- erts/emulator/sys/common/erl_check_io.c | 5 +++++ erts/emulator/sys/win32/erl_poll.c | 2 ++ 2 files changed, 7 insertions(+) (limited to 'erts/emulator') diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 218bd79584..71b374527e 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -1137,6 +1137,11 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) restart: +#ifdef ERTS_BREAK_REQUESTED + if (ERTS_BREAK_REQUESTED) + erts_do_break_handling(); +#endif + /* Figure out timeout value */ if (do_wait) { erts_time_remaining(&wait_time); diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c index 7662f190ef..074e2e247f 100644 --- a/erts/emulator/sys/win32/erl_poll.c +++ b/erts/emulator/sys/win32/erl_poll.c @@ -693,6 +693,7 @@ static void *break_waiter(void *param) ResetEvent(harr[0]); erts_mtx_lock(&break_waiter_lock); erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_BREAK); + ERTS_THR_MEMORY_BARRIER; SetEvent(break_happened_event); erts_mtx_unlock(&break_waiter_lock); break; @@ -700,6 +701,7 @@ static void *break_waiter(void *param) ResetEvent(harr[1]); erts_mtx_lock(&break_waiter_lock); erts_atomic32_set(&break_waiter_state,BREAK_WAITER_GOT_HALT); + ERTS_THR_MEMORY_BARRIER; SetEvent(break_happened_event); erts_mtx_unlock(&break_waiter_lock); break; -- cgit v1.2.3 From 3da283a85f73132e73ab911154c2c0ff8797d61d Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Tue, 10 May 2011 20:59:36 +0200 Subject: Remove pointless erts_ports_alive variable --- erts/emulator/beam/bif.c | 22 ++++++++++------------ erts/emulator/beam/erl_port_task.c | 3 --- erts/emulator/beam/global.h | 1 - erts/emulator/beam/io.c | 3 --- 4 files changed, 10 insertions(+), 19 deletions(-) (limited to 'erts/emulator') diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index b3325d635b..fe8dcc4d8b 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -3270,6 +3270,7 @@ BIF_RETTYPE ports_0(BIF_ALIST_0) Eterm* dead_ports; int alive, dead; Uint32 next_ss; + int i; /* To get a consistent snapshot... * We add alive ports from start of the buffer @@ -3283,19 +3284,16 @@ BIF_RETTYPE ports_0(BIF_ALIST_0) next_ss = erts_smp_atomic_inctest(&erts_ports_snapshot); - if (erts_smp_atomic_read(&erts_ports_alive) > 0) { - erts_aint_t i; - for (i = erts_max_ports-1; i >= 0; i--) { - Port* prt = &erts_port[i]; - erts_smp_port_state_lock(prt); - if (!(prt->status & ERTS_PORT_SFLGS_DEAD) - && prt->snapshot != next_ss) { - ASSERT(prt->snapshot == next_ss - 1); - *pp++ = prt->id; - prt->snapshot = next_ss; /* Consumed by this snapshot */ - } - erts_smp_port_state_unlock(prt); + for (i = erts_max_ports-1; i >= 0; i--) { + Port* prt = &erts_port[i]; + erts_smp_port_state_lock(prt); + if (!(prt->status & ERTS_PORT_SFLGS_DEAD) + && prt->snapshot != next_ss) { + ASSERT(prt->snapshot == next_ss - 1); + *pp++ = prt->id; + prt->snapshot = next_ss; /* Consumed by this snapshot */ } + erts_smp_port_state_unlock(prt); } dead_ports = (Eterm*)erts_smp_atomic_xchg(&erts_dead_ports_ptr, diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index 1b07024ca1..326021643f 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -658,8 +658,6 @@ erts_port_task_free_port(Port *pp) when scheduled out... */ ErtsPortTask *ptp = port_task_alloc(); erts_smp_port_state_lock(pp); - ASSERT(erts_smp_atomic_read(&erts_ports_alive) > 0); - erts_smp_atomic_dec(&erts_ports_alive); pp->status &= ~ERTS_PORT_SFLG_CLOSING; pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED; erts_may_save_closed_port(pp); @@ -681,7 +679,6 @@ erts_port_task_free_port(Port *pp) port_is_dequeued = 1; } erts_smp_port_state_lock(pp); - erts_smp_atomic_dec(&erts_ports_alive); pp->status &= ~ERTS_PORT_SFLG_CLOSING; pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED; erts_may_save_closed_port(pp); diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 96da894d90..18dc9f75d5 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -527,7 +527,6 @@ union erl_off_heap_ptr { /* arrays that get malloced at startup */ extern Port* erts_port; -extern erts_smp_atomic_t erts_ports_alive; extern Uint erts_max_ports; extern Uint erts_port_tab_index_mask; diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index f619c6f88b..86f550310c 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -56,7 +56,6 @@ static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error o per thread basis (for BC interfaces) */ Port* erts_port; /* The port table */ -erts_smp_atomic_t erts_ports_alive; erts_smp_atomic_t erts_bytes_out; /* No bytes sent out of the system */ erts_smp_atomic_t erts_bytes_in; /* No bytes gotten into the system */ @@ -421,7 +420,6 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver, new_name = (char*) erts_alloc(ERTS_ALC_T_PORT_NAME, sys_strlen(name)+1); sys_strcpy(new_name, name); erts_smp_runq_lock(runq); - erts_smp_atomic_inc(&erts_ports_alive); erts_smp_port_state_lock(prt); prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus; prt->snapshot = (Uint32) erts_smp_atomic_read(&erts_ports_snapshot); @@ -1274,7 +1272,6 @@ void init_io(void) erts_smp_atomic_init(&erts_bytes_out, 0); erts_smp_atomic_init(&erts_bytes_in, 0); - erts_smp_atomic_init(&erts_ports_alive, 0); for (i = 0; i < erts_max_ports; i++) { erts_port_task_init_sched(&erts_port[i].sched); -- cgit v1.2.3 From 78ebe8aa3754fc8837ab3a6b0bc11d1e78275eef Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Tue, 10 May 2011 21:12:05 +0200 Subject: Use 32-bit atomic for port snapshot --- erts/emulator/beam/bif.c | 2 +- erts/emulator/beam/global.h | 8 ++++---- erts/emulator/beam/io.c | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'erts/emulator') diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index fe8dcc4d8b..fe21e30d82 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -3282,7 +3282,7 @@ BIF_RETTYPE ports_0(BIF_ALIST_0) erts_smp_atomic_set(&erts_dead_ports_ptr, (erts_aint_t) (port_buf + erts_max_ports)); - next_ss = erts_smp_atomic_inctest(&erts_ports_snapshot); + next_ss = erts_smp_atomic32_inctest(&erts_ports_snapshot); for (i = erts_max_ports-1; i >= 0; i--) { Port* prt = &erts_port[i]; diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 18dc9f75d5..7dc5aa85e9 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -183,7 +183,7 @@ struct port { process to get (line oriented I/O)*/ Uint32 status; /* Status and type flags */ int control_flags; /* Flags for port_control() */ - Uint32 snapshot; /* Next snapshot that port should be part of */ + erts_aint32_t snapshot; /* Next snapshot that port should be part of */ struct reg_proc *reg; ErlDrvPDL port_data_lock; @@ -530,7 +530,7 @@ extern Port* erts_port; extern Uint erts_max_ports; extern Uint erts_port_tab_index_mask; -extern erts_smp_atomic_t erts_ports_snapshot; +extern erts_smp_atomic32_t erts_ports_snapshot; extern erts_smp_atomic_t erts_dead_ports_ptr; ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt); @@ -540,12 +540,12 @@ ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt); ERTS_GLB_INLINE void erts_may_save_closed_port(Port *prt) { ERTS_SMP_LC_ASSERT(erts_smp_lc_spinlock_is_locked(&prt->state_lck)); - if (prt->snapshot != erts_smp_atomic_read(&erts_ports_snapshot)) { + if (prt->snapshot != erts_smp_atomic32_read_acqb(&erts_ports_snapshot)) { /* Dead ports are added from the end of the snapshot buffer */ Eterm* tombstone = (Eterm*) erts_smp_atomic_addtest(&erts_dead_ports_ptr, -(erts_aint_t)sizeof(Eterm)); ASSERT(tombstone+1 != NULL); - ASSERT(prt->snapshot == (Uint32) erts_smp_atomic_read(&erts_ports_snapshot) - 1); + ASSERT(prt->snapshot == erts_smp_atomic32_read(&erts_ports_snapshot) - 1); *tombstone = prt->id; } /*else no ongoing snapshot or port was already included or created after snapshot */ diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 86f550310c..8c1126aa6e 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -189,7 +189,7 @@ typedef struct line_buf_context { static erts_smp_spinlock_t get_free_port_lck; static Uint last_port_num; static Uint port_num_mask; -erts_smp_atomic_t erts_ports_snapshot; /* Identifies the _next_ snapshot (not the ongoing) */ +erts_smp_atomic32_t erts_ports_snapshot; /* Identifies the _next_ snapshot (not the ongoing) */ static ERTS_INLINE void @@ -422,7 +422,7 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver, erts_smp_runq_lock(runq); erts_smp_port_state_lock(prt); prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus; - prt->snapshot = (Uint32) erts_smp_atomic_read(&erts_ports_snapshot); + prt->snapshot = erts_smp_atomic32_read(&erts_ports_snapshot); old_name = prt->name; prt->name = new_name; #ifdef ERTS_SMP @@ -1293,7 +1293,7 @@ void init_io(void) erts_port[i].port_data_lock = NULL; } - erts_smp_atomic_init(&erts_ports_snapshot, (erts_aint_t) 0); + erts_smp_atomic32_init(&erts_ports_snapshot, (erts_aint32_t) 0); last_port_num = 0; erts_smp_spinlock_init(&get_free_port_lck, "get_free_port"); -- cgit v1.2.3 From 150e88657f617d652d0ab5e4b45409edeb529c50 Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Tue, 10 May 2011 21:35:30 +0200 Subject: Reduce number of atomic ops Counters for active, and used schedulers have been coalesced in order to reduce the amount of atomic operations needed. Some currently not strictly necessary barriers have also been added in order to be future proof. --- erts/emulator/beam/erl_process.c | 135 +++++++++++++++++++++++++++++---------- 1 file changed, 100 insertions(+), 35 deletions(-) (limited to 'erts/emulator') diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 31f23d3978..559224be58 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -166,9 +166,8 @@ static struct { static struct { erts_smp_mtx_t update_mtx; - erts_smp_atomic32_t active_runqs; + erts_smp_atomic32_t no_runqs; int last_active_runqs; - erts_smp_atomic32_t used_runqs; int forced_check_balance; erts_smp_atomic32_t checking_balance; int halftime; @@ -965,7 +964,7 @@ sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read(&ssi->flags); + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) { break; @@ -1114,7 +1113,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) else { erts_aint_t dt; - erts_smp_atomic32_set(&function_calls, 0); + erts_smp_atomic32_set_relb(&function_calls, 0); *fcalls = 0; sched_waiting_sys(esdp->no, rq); @@ -1147,7 +1146,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) nonblockable_aux_work(esdp, ssi, aux_work); #endif - flgs = erts_smp_atomic32_read(&ssi->flags); + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); if (!(flgs & ERTS_SSI_FLG_WAITING)) { ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); goto sys_woken; @@ -1333,6 +1332,76 @@ wake_all_schedulers(void) } } +#define ERTS_NO_USED_RUNQS_SHIFT 16 +#define ERTS_NO_RUNQS_MASK 0xffff + +#if ERTS_MAX_NO_OF_SCHEDULERS > ERTS_NO_RUNQS_MASK +# error "Too large amount of schedulers allowed" +#endif + +static ERTS_INLINE void +init_no_runqs(int active, int used) +{ + erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK); + no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT); + erts_smp_atomic32_init(&balance_info.no_runqs, no_runqs); +} + +static ERTS_INLINE void +get_no_runqs(int *active, int *used) +{ + erts_aint32_t no_runqs = erts_smp_atomic32_read(&balance_info.no_runqs); + if (active) + *active = (int) (no_runqs & ERTS_NO_RUNQS_MASK); + if (used) + *used = (int) ((no_runqs >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK); +} + +static ERTS_INLINE void +set_no_used_runqs(int used) +{ + erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs); + while (1) { + erts_aint32_t act, new; + new = (used << ERTS_NO_USED_RUNQS_SHIFT) | (exp & ERTS_NO_RUNQS_MASK); + act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp); + if (act == exp) + break; + exp = act; + } +} + +static ERTS_INLINE void +set_no_active_runqs(int active) +{ + erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs); + while (1) { + erts_aint32_t act, new; + new = (exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT)) | active; + act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp); + if (act == exp) + break; + exp = act; + } +} + +static ERTS_INLINE int +try_inc_no_active_runqs(int active) +{ + erts_aint32_t exp = erts_smp_atomic32_read(&balance_info.no_runqs); + if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active) + return 0; + if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) { + erts_aint32_t new, act; + new = (exp & ~ERTS_NO_RUNQS_MASK) | active; + act = erts_smp_atomic32_cmpxchg(&balance_info.no_runqs, new, exp); + if (act == exp) + return 1; + } + return 0; +} + + static ERTS_INLINE int chk_wake_sched(ErtsRunQueue *crq, int ix, int activate) { @@ -1344,9 +1413,7 @@ chk_wake_sched(ErtsRunQueue *crq, int ix, int activate) iflgs = erts_smp_atomic32_read(&wrq->info_flags); if (!(iflgs & (ERTS_RUNQ_IFLG_SUSPENDED|ERTS_RUNQ_IFLG_NONEMPTY))) { if (activate) { - if (ix == erts_smp_atomic32_cmpxchg(&balance_info.active_runqs, - ix+1, - ix)) { + if (try_inc_no_active_runqs(ix+1)) { erts_smp_xrunq_lock(crq, wrq); wrq->flags &= ~ERTS_RUNQ_FLG_INACTIVE; erts_smp_xrunq_unlock(crq, wrq); @@ -1363,8 +1430,9 @@ wake_scheduler_on_empty_runq(ErtsRunQueue *crq) { int ix = crq->ix; int stop_ix = ix; - int active_ix = erts_smp_atomic32_read(&balance_info.active_runqs); - int balance_ix = erts_smp_atomic32_read(&balance_info.used_runqs); + int active_ix, balance_ix; + + get_no_runqs(&active_ix, &balance_ix); if (active_ix > balance_ix) active_ix = balance_ix; @@ -1416,7 +1484,7 @@ erts_sched_notify_check_cpu_bind(void) int ix; if (erts_common_run_queue) { for (ix = 0; ix < erts_no_schedulers; ix++) - erts_smp_atomic32_set(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1); + erts_smp_atomic32_set_relb(&ERTS_SCHEDULER_IX(ix)->chk_cpu_bind, 1); wake_all_schedulers(); } else { @@ -1871,8 +1939,7 @@ try_steal_task(ErtsRunQueue *rq) ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked); - active_rqs = erts_smp_atomic32_read(&balance_info.active_runqs); - blnc_rqs = erts_smp_atomic32_read(&balance_info.used_runqs); + get_no_runqs(&active_rqs, &blnc_rqs); if (active_rqs > blnc_rqs) active_rqs = blnc_rqs; @@ -1883,7 +1950,7 @@ try_steal_task(ErtsRunQueue *rq) if (active_rqs < blnc_rqs) { int no = blnc_rqs - active_rqs; int stop_ix = vix = active_rqs + rq->ix % no; - while (erts_smp_atomic32_read(&no_empty_run_queues) < blnc_rqs) { + while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { res = check_possible_steal_victim(rq, &rq_locked, vix); if (res) goto done; @@ -1898,7 +1965,7 @@ try_steal_task(ErtsRunQueue *rq) vix = rq->ix; /* ... then try to steal a job from another active queue... */ - while (erts_smp_atomic32_read(&no_empty_run_queues) < blnc_rqs) { + while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { vix++; if (vix >= active_rqs) vix = 0; @@ -1999,7 +2066,7 @@ check_balance(ErtsRunQueue *c_rq) return; } - blnc_no_rqs = (int) erts_smp_atomic32_read(&balance_info.used_runqs); + get_no_runqs(NULL, &blnc_no_rqs); if (blnc_no_rqs == 1) { c_rq->check_balance_reds = INT_MAX; erts_smp_atomic32_set(&balance_info.checking_balance, 0); @@ -2038,7 +2105,8 @@ check_balance(ErtsRunQueue *c_rq) forced = balance_info.forced_check_balance; balance_info.forced_check_balance = 0; - blnc_no_rqs = (int) erts_smp_atomic32_read(&balance_info.used_runqs); + get_no_runqs(¤t_active, &blnc_no_rqs); + if (blnc_no_rqs == 1) { erts_smp_mtx_unlock(&balance_info.update_mtx); erts_smp_runq_lock(c_rq); @@ -2052,8 +2120,6 @@ check_balance(ErtsRunQueue *c_rq) if (balance_info.full_reds_history_index >= ERTS_FULL_REDS_HISTORY_SIZE) balance_info.full_reds_history_index = 0; - current_active = erts_smp_atomic32_read(&balance_info.active_runqs); - /* Read balance information for all run queues */ for (qix = 0; qix < blnc_no_rqs; qix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); @@ -2387,7 +2453,7 @@ erts_fprintf(stderr, "--------------------------------\n"); } balance_info.last_active_runqs = active; - erts_smp_atomic32_set(&balance_info.active_runqs, active); + set_no_active_runqs(active); balance_info.halftime = 1; erts_smp_atomic32_set(&balance_info.checking_balance, 0); @@ -2695,9 +2761,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) erts_smp_atomic32_init(&schdlr_sspnd.msb.ongoing, 0); erts_smp_atomic32_init(&schdlr_sspnd.active, no_schedulers); schdlr_sspnd.msb.procs = NULL; - erts_smp_atomic32_set(&balance_info.used_runqs, - erts_common_run_queue ? 1 : no_schedulers_online); - erts_smp_atomic32_init(&balance_info.active_runqs, no_schedulers); + init_no_runqs(no_schedulers, + erts_common_run_queue ? 1 : no_schedulers_online); balance_info.last_active_runqs = no_schedulers; erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update"); balance_info.forced_check_balance = 0; @@ -2939,7 +3004,7 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read(&ssi->flags); + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_WAITING | ERTS_SSI_FLG_SUSPENDED)) @@ -3068,7 +3133,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) wake = 0; } - flgs = erts_smp_atomic32_read(&ssi->flags); + flgs = erts_smp_atomic32_read_acqb(&ssi->flags); if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) break; erts_smp_mtx_unlock(&schdlr_sspnd.mtx); @@ -3292,7 +3357,7 @@ erts_set_schedulers_online(Process *p, ErtsRunQueue *to_rq = ERTS_RUNQ_IX(ix % no); evacuate_run_queue(from_rq, to_rq); } - erts_smp_atomic32_set(&balance_info.used_runqs, no); + set_no_used_runqs(no); erts_smp_mtx_unlock(&balance_info.update_mtx); erts_smp_mtx_lock(&schdlr_sspnd.mtx); } @@ -3346,7 +3411,7 @@ erts_set_schedulers_online(Process *p, for (ix = erts_no_run_queues-1; ix >= no; ix--) evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(ix % no)); - erts_smp_atomic32_set(&balance_info.used_runqs, no); + set_no_used_runqs(no); erts_smp_mtx_unlock(&balance_info.update_mtx); erts_smp_mtx_lock(&schdlr_sspnd.mtx); for (ix = no; ix < online; ix++) { @@ -3443,7 +3508,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) else { erts_smp_mtx_unlock(&schdlr_sspnd.mtx); erts_smp_mtx_lock(&balance_info.update_mtx); - erts_smp_atomic32_set(&balance_info.used_runqs, 1); + set_no_used_runqs(1); for (ix = 0; ix < online; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); erts_smp_runq_lock(rq); @@ -3580,7 +3645,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int all) evacuate_run_queue(ERTS_RUNQ_IX(ix), ERTS_RUNQ_IX(ix % online)); - erts_smp_atomic32_set(&balance_info.used_runqs, online); + set_no_used_runqs(online); /* Make sure that we balance soon... */ balance_info.forced_check_balance = 1; erts_smp_runq_lock(ERTS_RUNQ_IX(0)); @@ -5097,7 +5162,7 @@ Process *schedule(Process *p, int calls) esdp = erts_get_scheduler_data(); rq = erts_get_runq_current(esdp); ASSERT(esdp); - fcalls = (int) erts_smp_atomic32_read(&function_calls); + fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls); actual_reds = reds = 0; erts_smp_runq_lock(rq); } else { @@ -5248,14 +5313,14 @@ Process *schedule(Process *p, int calls) | ERTS_RUNQ_FLG_CHK_CPU_BIND | ERTS_RUNQ_FLG_SUSPENDED)) { if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED) - || (erts_smp_atomic32_read(&esdp->ssi->flags) + || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED)) { ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED); suspend_scheduler(esdp); } if ((rq->flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) - || erts_smp_atomic32_read(&esdp->chk_cpu_bind)) { + || erts_smp_atomic32_read_acqb(&esdp->chk_cpu_bind)) { erts_sched_check_cpu_bind(esdp); } } @@ -5306,7 +5371,7 @@ Process *schedule(Process *p, int calls) if (rq->flags & (ERTS_RUNQ_FLG_SHARED_RUNQ | ERTS_RUNQ_FLG_SUSPENDED)) { if ((rq->flags & ERTS_RUNQ_FLG_SUSPENDED) - || (erts_smp_atomic32_read(&esdp->ssi->flags) + || (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED)) { ASSERT(erts_smp_atomic32_read(&esdp->ssi->flags) & ERTS_SSI_FLG_SUSPENDED); @@ -5350,7 +5415,7 @@ Process *schedule(Process *p, int calls) * Schedule system-level activities. */ - erts_smp_atomic32_set(&function_calls, 0); + erts_smp_atomic32_set_relb(&function_calls, 0); fcalls = 0; ASSERT(!erts_port_task_have_outstanding_io_tasks()); @@ -5392,7 +5457,7 @@ Process *schedule(Process *p, int calls) if (erts_common_run_queue->waiting) wake_scheduler(erts_common_run_queue, 0, 1); } - else if (erts_smp_atomic32_read(&no_empty_run_queues) != 0) { + else if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { wake_scheduler_on_empty_runq(rq); rq->wakeup_other = 0; } -- cgit v1.2.3 From 6af0f286a3a46a7e2faf722306e7be57bf3de687 Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Tue, 10 May 2011 21:11:41 +0200 Subject: Fix build with hipe on amd64 --- erts/emulator/beam/atom.names | 1 + 1 file changed, 1 insertion(+) (limited to 'erts/emulator') diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 327620772f..de76cb9680 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -76,6 +76,7 @@ atom allocator_sizes atom alloc_util_allocators atom allow_passive_connect atom already_loaded +atom amd64 atom anchored atom and atom andalso -- cgit v1.2.3 From 139fa05489a6ba3e4384e6f20ea3f943741449d5 Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Tue, 10 May 2011 14:37:49 +0200 Subject: Silence warnings --- erts/emulator/beam/beam_emu.c | 2 +- erts/emulator/beam/erl_drv_thread.c | 7 +++++++ erts/emulator/sys/common/erl_poll.c | 7 ++++--- 3 files changed, 12 insertions(+), 4 deletions(-) (limited to 'erts/emulator') diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 32ea8588d2..99bf7e0439 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -5322,7 +5322,7 @@ void process_main(void) ep->code[3] = (BeamInstr) OpCode(apply_bif); ep->code[4] = (BeamInstr) bif_table[i].f; /* XXX: set func info for bifs */ - ((BeamInstr*)ep->code + 3)[-5] = (BeamInstr) BeamOp(op_i_func_info_IaaI); + ep->fake_op_func_info_for_hipe[0] = (BeamInstr) BeamOp(op_i_func_info_IaaI); } return; diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c index 39bbe9633b..dc578f6d2a 100644 --- a/erts/emulator/beam/erl_drv_thread.c +++ b/erts/emulator/beam/erl_drv_thread.c @@ -700,6 +700,13 @@ erl_drv_thread_join(ErlDrvTid tid, void **respp) extern int erts_darwin_main_thread_pipe[2]; extern int erts_darwin_main_thread_result_pipe[2]; +int erl_drv_stolen_main_thread_join(ErlDrvTid tid, void **respp); +int erl_drv_steal_main_thread(char *name, + ErlDrvTid *dtid, + void* (*func)(void*), + void* arg, + ErlDrvThreadOpts *opts); + int erl_drv_stolen_main_thread_join(ErlDrvTid tid, void **respp) diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 3ae5b8d747..f5c785d683 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -766,7 +766,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) short filter; int fd = (int) ebuf[i].ident; - switch ((int) ebuf[i].udata) { + switch ((int) (long) ebuf[i].udata) { /* * Since we use a lazy update approach EV_DELETE will @@ -805,7 +805,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) if (fd == (int) ebuf[j].ident) { ebuf[j].udata = (void *) ERTS_POLL_KQ_OP_HANDLED; if (!(ebuf[j].flags & EV_ERROR)) { - switch ((int) ebuf[j].udata) { + switch ((int) (long) ebuf[j].udata) { case ERTS_POLL_KQ_OP_ADD2_W: filter = EVFILT_WRITE; goto rm_add_fb; @@ -823,7 +823,8 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) } } /* The other add succeded... */ - filter = (((int) ebuf[i].udata == ERTS_POLL_KQ_OP_ADD2_W) + filter = ((((int) (long) ebuf[i].udata) + == ERTS_POLL_KQ_OP_ADD2_W) ? EVFILT_READ : EVFILT_WRITE); rm_add_fb: -- cgit v1.2.3